prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>ArrayLongLongHashMapTest.java<|end_file_name|><|fim▁begin|>package org.kevoree.modeling.memory.struct.map.impl;
import org.kevoree.modeling.memory.struct.map.BaseKLongLongHashMapTest;
import org.kevoree.modeling.memory.struct.map.KLongLongMap;
public class ArrayLongLongHashMapTest extends BaseKLongLongHashMapTest {
@Override
public KLongLongMap createKLongLongHashMap(int p_initalCapacity, float p_loadFactor) {
return new ArrayLongLongMap(p_initalCapacity, p_loadFactor);
}<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>deferred.rs<|end_file_name|><|fim▁begin|>use std::ops;
use std::str;
use std::fmt;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum Deferred<T> {
StaticStr(&'static str),
Actual(T),
}
impl<T> Deferred<T> where T: From<&'static str> {
pub(crate) fn into_actual(self) -> T {
match self {
Deferred::StaticStr(value) => value.into(),
Deferred::Actual(value) => value,
}
}
pub(crate) fn actual(&mut self) -> &mut T {
if let Deferred::StaticStr(value) = *self {
*self = Deferred::Actual(value.into());
}
match *self {
Deferred::StaticStr(_) => panic!("unexpected static storage"),
Deferred::Actual(ref mut value) => value,
}
}
}
impl<T> fmt::Display for Deferred<T> where T: fmt::Display {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Deferred::StaticStr(value) => fmt::Display::fmt(value, fmt),
Deferred::Actual(ref value) => fmt::Display::fmt(value, fmt),
}
}
}<|fim▁hole|> type Target = str;
fn deref(&self) -> &str {
match *self {
Deferred::StaticStr(value) => value,
Deferred::Actual(ref value) => value,
}
}
}<|fim▁end|>
|
impl<T> ops::Deref for Deferred<T> where T: ops::Deref<Target=str> {
|
<|file_name|>profile.ts<|end_file_name|><|fim▁begin|>import "bootstrap-slider";
import "bootstrap-switch";
export module profile {
function onInfoSubmit() {
var params: { [key: string]: string } = {};
$("#info-container .info-field").each(function() {
let name: string = this.getAttribute("name");
if (name == null) {
return;
}
let value: string = $(this).val();
if ($(this).hasClass("info-slider")) {
let valueTokens: string[] = this.getAttribute("data-value").split(",");
name = name.substring(0, 1).toUpperCase() + name.substring(1);
params["min" + name] = valueTokens[0];
params["max" + name] = valueTokens[1];
return;
} else if (this.getAttribute("type") == "checkbox") {
value = (this.checked ? 1 : 0).toString();
}
params[name] = value;
});
$.post("/office/post/update-profile", params, function(data) {
$("#errors").addClass("hidden");
$("#success").removeClass("hidden");
window.scrollTo(0, 0);
}, "json").fail(function(err) {
$("#success").addClass("hidden");
$("#errors").text(`Error code ${err.status} occurred. Please contact a developer.`);
$("#errors").removeClass("hidden");
window.scrollTo(0, 0);
});
}
$(document).ready(function() {
(<any>$("#hours-slider")).bootstrapSlider({});
$("input.info-field[type='text']").on("keydown", function(evt) {
if (evt.which == 13) {
onInfoSubmit.apply(this);
}
});
$("input.info-field[type='checkbox']").each(function(index: number, element: HTMLElement) {
let $element: JQuery = $(element);
$element.bootstrapSwitch({
"state": $element.prop("checked")
});
});<|fim▁hole|><|fim▁end|>
|
$("button.iu-button[type='submit']").on("click", onInfoSubmit);
});
}
|
<|file_name|>issue-8767.rs<|end_file_name|><|fim▁begin|>impl B { //~ ERROR cannot find type `B` in this scope
}
<|fim▁hole|><|fim▁end|>
|
fn main() {
}
|
<|file_name|>594422d373ee_fip_qos.py<|end_file_name|><|fim▁begin|>#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
from neutron.db import migration
"""fip qos
Revision ID: 594422d373ee
Revises: 7d32f979895f
Create Date: 2016-04-26 17:16:10.323756
"""
# revision identifiers, used by Alembic.
revision = '594422d373ee'
down_revision = '7d32f979895f'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]
<|fim▁hole|> op.create_table(
'qos_fip_policy_bindings',
sa.Column('policy_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('fip_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('floatingips.id', ondelete='CASCADE'),
nullable=False, unique=True))<|fim▁end|>
|
def upgrade():
|
<|file_name|>attr.hpp<|end_file_name|><|fim▁begin|>/*=============================================================================
Copyright (c) 2001-2011 Hartmut Kaiser
Copyright (c) 2001-2014 Joel de Guzman
Copyright (c) 2013 Agustin Berge
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef BOOST_SPIRIT_X3_ATTR_JUL_23_2008_0956AM
#define BOOST_SPIRIT_X3_ATTR_JUL_23_2008_0956AM
#include <boost/spirit/home/x3/core/parser.hpp>
#include <boost/spirit/home/x3/support/unused.hpp>
#include <boost/spirit/home/x3/support/traits/container_traits.hpp>
#include <boost/spirit/home/x3/support/traits/move_to.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/type_traits/remove_cv.hpp>
#include <boost/type_traits/remove_reference.hpp>
#include <algorithm>
#include <cstddef>
#include <string>
#include <utility>
namespace boost { namespace spirit { namespace x3
{
template <typename Value>
struct attr_parser : parser<attr_parser<Value>>
{
typedef Value attribute_type;
static bool const has_attribute =
!is_same<unused_type, attribute_type>::value;
static bool const handles_container =
traits::is_container<attribute_type>::value;
attr_parser(Value const& value)
: value_(value) {}
attr_parser(Value&& value)
: value_(std::move(value)) {}
template <typename Iterator, typename Context
, typename RuleContext, typename Attribute>
bool parse(Iterator& /* first */, Iterator const& /* last */
, Context const& /* context */, RuleContext&, Attribute& attr_) const
{
// $$$ Change to copy_to once we have it $$$
traits::move_to(value_, attr_);
return true;
}
Value value_;
private:
// silence MSVC warning C4512: assignment operator could not be generated<|fim▁hole|>
template <typename Value, std::size_t N>
struct attr_parser<Value[N]> : parser<attr_parser<Value[N]>>
{
typedef Value attribute_type[N];
static bool const has_attribute =
!is_same<unused_type, attribute_type>::value;
static bool const handles_container = true;
attr_parser(Value const (&value)[N])
{
std::copy(value + 0, value + N, value_ + 0);
}
attr_parser(Value (&&value)[N])
{
std::move(value + 0, value + N, value_ + 0);
}
template <typename Iterator, typename Context
, typename RuleContext, typename Attribute>
bool parse(Iterator& /* first */, Iterator const& /* last */
, Context const& /* context */, RuleContext&, Attribute& attr_) const
{
// $$$ Change to copy_to once we have it $$$
traits::move_to(value_ + 0, value_ + N, attr_);
return true;
}
Value value_[N];
private:
// silence MSVC warning C4512: assignment operator could not be generated
attr_parser& operator= (attr_parser const&);
};
template <typename Value>
struct get_info<attr_parser<Value>>
{
typedef std::string result_type;
std::string operator()(attr_parser<Value> const& /*p*/) const
{
return "attr";
}
};
struct attr_gen
{
template <typename Value>
attr_parser<typename remove_cv<
typename remove_reference<Value>::type>::type>
operator()(Value&& value) const
{
return { std::forward<Value>(value) };
}
template <typename Value, std::size_t N>
attr_parser<typename remove_cv<Value>::type[N]>
operator()(Value (&value)[N]) const
{
return { value };
}
template <typename Value, std::size_t N>
attr_parser<typename remove_cv<Value>::type[N]>
operator()(Value (&&value)[N]) const
{
return { value };
}
};
auto const attr = attr_gen{};
}}}
#endif<|fim▁end|>
|
attr_parser& operator= (attr_parser const&);
};
|
<|file_name|>query.py<|end_file_name|><|fim▁begin|>from key import Key
def _object_getattr(obj, field):
"""Attribute getter for the objects to operate on.
This function can be overridden in classes or instances of Query, Filter, and
Order. Thus, a custom function to extract values to attributes can be
specified, and the system can remain agnostic to the client's data model,
without loosing query power.
For example, the default implementation works with attributes and items::
def _object_getattr(obj, field):
# check whether this key is an attribute
if hasattr(obj, field):
value = getattr(obj, field)
# if not, perhaps it is an item (raw dicts, etc)
elif field in obj:
value = obj[field]
# return whatever we've got.
return value
Or consider a more complex, application-specific structure::
def _object_getattr(version, field):
if field in ['key', 'committed', 'created', 'hash']:
return getattr(version, field)
else:
return version.attributes[field]['value']
"""
# TODO: consider changing this to raise an exception if no value is found.
value = None
# check whether this key is an attribute
if hasattr(obj, field):
value = getattr(obj, field)
# if not, perhaps it is an item (raw dicts, etc)
elif field in obj:
value = obj[field]
# return whatever we've got.
return value
def limit_gen(limit, iterable):
"""A generator that applies a count `limit`."""
limit = int(limit)
assert limit >= 0, 'negative limit'
for item in iterable:
if limit <= 0:
break
yield item
limit -= 1
def offset_gen(offset, iterable, skip_signal=None):
"""A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element.
"""
offset = int(offset)
assert offset >= 0, 'negative offset'
for item in iterable:
if offset > 0:
offset -= 1
if callable(skip_signal):
skip_signal(item)
else:
yield item
def chain_gen(iterables):
"""A generator that chains `iterables`."""
for iterable in iterables:
for item in iterable:
yield item
class Filter(object):
"""Represents a Filter for a specific field and its value.
Filters are used on queries to narrow down the set of matching objects.
Args:
field: the attribute name (string) on which to apply the filter.
op: the conditional operator to apply (one of
['<', '<=', '=', '!=', '>=', '>']).
value: the attribute value to compare against.
Examples::
Filter('name', '=', 'John Cleese')
Filter('age', '>=', 18)
"""
conditional_operators = ['<', '<=', '=', '!=', '>=', '>']
"""Conditional operators that Filters support."""
_conditional_cmp = {
"<": lambda a, b: a < b,
"<=": lambda a, b: a <= b,
"=": lambda a, b: a == b,
"!=": lambda a, b: a != b,
">=": lambda a, b: a >= b,
">": lambda a, b: a > b
}
object_getattr = staticmethod(_object_getattr)
"""Object attribute getter. Can be overridden to match client data model.
See :py:meth:`datastore.query._object_getattr`.
"""
def __init__(self, field, op, value):
if op not in self.conditional_operators:
raise ValueError(
'"%s" is not a valid filter Conditional Operator' % op)
self.field = field
self.op = op
self.value = value
def __call__(self, obj):
"""Returns whether this object passes this filter.
This method aggressively tries to find the appropriate value.
"""
value = self.object_getattr(obj, self.field)
# TODO: which way should the direction go here? it may make more sense to
# convert the passed-in value instead. Or try both? Or not at all?
if not isinstance(value, self.value.__class__) and not self.value is None and not value is None:
value = self.value.__class__(value)
return self.valuePasses(value)
def valuePasses(self, value):
"""Returns whether this value passes this filter"""
return self._conditional_cmp[self.op](value, self.value)
def __str__(self):
return '%s %s %s' % (self.field, self.op, self.value)
def __repr__(self):
return "Filter('%s', '%s', %s)" % (self.field, self.op, repr(self.value))
def __eq__(self, o):
return self.field == o.field and self.op == o.op and self.value == o.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def generator(self, iterable):
"""Generator function that iteratively filters given `items`."""
for item in iterable:
if self(item):
yield item
@classmethod
def filter(cls, filters, iterable):
"""Returns the elements in `iterable` that pass given `filters`"""
if isinstance(filters, Filter):
filters = [filters]
for filter in filters:
iterable = filter.generator(iterable)
return iterable
class Order(object):
"""Represents an Order upon a specific field, and a direction.
Orders are used on queries to define how they operate on objects
Args:
order: an order in string form. This follows the format: [+-]name
where + is ascending, - is descending, and name is the name
of the field to order by.
Note: if no ordering operator is specified, + is default.
Examples::
Order('+name') # ascending order by name
Order('-age') # descending order by age
Order('score') # ascending order by score
"""
order_operators = ['-', '+']
"""Ordering operators: + is ascending, - is descending."""
object_getattr = staticmethod(_object_getattr)
"""Object attribute getter. Can be overridden to match client data model.
See :py:meth:`datastore.query._object_getattr`.
"""
def __init__(self, order):
self.op = '+'
try:
if order[0] in self.order_operators:
self.op = order[0]
order = order[1:]
except IndexError:
raise ValueError('Order input be at least two characters long.')
self.field = order
if self.op not in self.order_operators:
raise ValueError('"%s" is not a valid Order Operator.' % op)
def __str__(self):
return '%s%s' % (self.op, self.field)
def __repr__(self):
return "Order('%s%s')" % (self.op, self.field)
def __eq__(self, other):
return self.field == other.field and self.op == other.op
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def isAscending(self):
return self.op == '+'
def isDescending(self):
return not self.isAscending()
def keyfn(self, obj):
"""A key function to be used in pythonic sort operations."""
return self.object_getattr(obj, self.field)
@classmethod
def multipleOrderComparison(cls, orders):
"""Returns a function that will compare two items according to `orders`"""
comparers = [(o.keyfn, 1 if o.isAscending() else -1) for o in orders]
def cmpfn(a, b):
for keyfn, ascOrDesc in comparers:
comparison = cmp(keyfn(a), keyfn(b)) * ascOrDesc
if comparison is not 0:
return comparison
return 0
return cmpfn
@classmethod
def sorted(cls, items, orders):
"""Returns the elements in `items` sorted according to `orders`"""
return sorted(items, cmp=cls.multipleOrderComparison(orders))
class Query(object):
"""A Query describes a set of objects.
Queries are used to retrieve objects and instances matching a set of criteria
from Datastores. Query objects themselves are simply descriptions,
the actual Query implementations are left up to the Datastores.
"""
"""Object attribute getter. Can be overridden to match client data model."""
object_getattr = staticmethod(_object_getattr)
def __init__(self, key=Key('/'), limit=None, offset=0, offset_key=None, object_getattr=None):
""" Initialize a query.
Parameters
key: a key representing the level of this query. For example, a Query with
Key('/MontyPython/Actor:') would query objects in that key path, eg:
Key('/MontyPython/Actor:JohnCleese')
Key('/MontyPython/Actor:EricIdle')
Key('/MontyPython/Actor:GrahamChapman')
It is up to datastores how to implement this namespacing. E.g.,
some datastores may store values in different tables or collections.
limit: an integer representing the maximum number of results to return.
offset: an integer representing a number of results to skip.
object_getattr: a function to extract attribute values from an object. It
is used to satisfy query filters and orders. Defining this function
allows the client to control the data model of the stored values.
The default function attempts to access values as attributes
(__getattr__) or items (__getitem__).
"""
if not isinstance(key, Key):
raise TypeError('key must be of type %s' % Key)
self.key = key
self.limit = int(limit) if limit is not None else None
self.offset = int(offset)
self.offset_key = offset_key
self.filters = []
self.orders = []
if object_getattr:
self.object_getattr = object_getattr
def __str__(self):
"""Returns a string describing this query."""
return repr(self)
def __repr__(self):
"""Returns the representation of this query. Enables eval(repr(.))."""
return 'Query.from_dict(%s)' % self.dict()
def __call__(self, iterable):
"""Naively apply this query on an iterable of objects.
Applying a query applies filters, sorts by appropriate orders, and returns
a limited set.
WARNING: When orders are applied, this function operates on the entire set
of entities directly, not just iterators/generators. That means
the entire result set will be in memory. Datastores with large
objects and large query results should translate the Query and
perform their own optimizations.
"""
cursor = Cursor(self, iterable)
cursor.apply_filter()
cursor.apply_order()
cursor.apply_offset()
cursor.apply_limit()
return cursor
def order(self, order):
"""Adds an Order to this query.
Args:
see :py:class:`Order <datastore.query.Order>` constructor
Returns self for JS-like method chaining::
query.order('+age').order('-home')<|fim▁hole|>
# ensure order gets attr values the same way the rest of the query
# does.
order.object_getattr = self.object_getattr
self.orders.append(order)
return self # for chaining
def filter(self, *args):
"""Adds a Filter to this query.
Args:
see :py:class:`Filter <datastore.query.Filter>` constructor
Returns self for JS-like method chaining::
query.filter('age', '>', 18).filter('sex', '=', 'Female')
"""
if len(args) == 1 and isinstance(args[0], Filter):
filter = args[0]
else:
filter = Filter(*args)
# ensure filter gets attr values the same way the rest of the query
# does.
filter.object_getattr = self.object_getattr
self.filters.append(filter)
return self # for chaining
def __cmp__(self, other):
return cmp(self.dict(), other.dict())
def __hash__(self):
return hash(repr(self))
def copy(self):
"""Returns a copy of this query."""
if self.object_getattr is Query.object_getattr:
other = Query(self.key)
else:
other = Query(self.key, object_getattr=self.object_getattr)
other.limit = self.limit
other.offset = self.offset
other.offset_key = self.offset_key
other.filters = self.filters
other.orders = self.orders
return other
def dict(self):
"""Returns a dictionary representing this query."""
d = dict()
d['key'] = str(self.key)
if self.limit is not None:
d['limit'] = self.limit
if self.offset > 0:
d['offset'] = self.offset
if self.offset_key:
d['offset_key'] = str(self.offset_key)
if len(self.filters) > 0:
d['filter'] = [[f.field, f.op, f.value] for f in self.filters]
if len(self.orders) > 0:
d['order'] = [str(o) for o in self.orders]
return d
@classmethod
def from_dict(cls, dictionary):
"""Constructs a query from a dictionary."""
query = cls(Key(dictionary['key']))
for key, value in dictionary.items():
if key == 'order':
for order in value:
query.order(order)
elif key == 'filter':
for filter in value:
if not isinstance(filter, Filter):
filter = Filter(*filter)
query.filter(filter)
elif key in ['limit', 'offset', 'offset_key']:
setattr(query, key, value)
return query
def is_iterable(obj):
return hasattr(obj, '__iter__') or hasattr(obj, '__getitem__')
class Cursor(object):
"""Represents a query result generator."""
__slots__ = ('query', '_iterable', '_iterator', 'skipped', 'returned', )
def __init__(self, query, iterable):
if not isinstance(query, Query):
raise ValueError('Cursor received invalid query: %s' % query)
if not is_iterable(iterable):
raise ValueError('Cursor received invalid iterable: %s' % iterable)
self.query = query
self._iterable = iterable
self._iterator = None
self.returned = 0
self.skipped = 0
def __iter__(self):
"""The cursor itself is the iterator. Note that it cannot be used twice,
and once iteration starts, the cursor cannot be modified.
"""
if self._iterator:
raise RuntimeError('Attempt to iterate over Cursor twice.')
self._iterator = iter(self._iterable)
return self
def next(self):
"""Iterator next. Build up count of returned elements during iteration."""
# if iteration has not begun, begin it.
if not self._iterator:
self.__iter__()
next = self._iterator.next()
if next is not StopIteration:
self._returned_inc(next)
return next
def _skipped_inc(self, item):
"""A function to increment the skipped count."""
self.skipped += 1
def _returned_inc(self, item):
"""A function to increment the returned count."""
self.returned += 1
def _ensure_modification_is_safe(self):
"""Assertions to ensure modification of this Cursor is safe."""
assert self.query, 'Cursor must have a Query.'
assert is_iterable(
self._iterable), 'Cursor must have a resultset iterable.'
assert not self._iterator, 'Cursor must not be modified after iteration.'
def apply_filter(self):
"""Naively apply query filters."""
self._ensure_modification_is_safe()
if len(self.query.filters) > 0:
self._iterable = Filter.filter(self.query.filters, self._iterable)
def apply_order(self):
"""Naively apply query orders."""
self._ensure_modification_is_safe()
if len(self.query.orders) > 0:
self._iterable = Order.sorted(self._iterable, self.query.orders)
# not a generator :(
def apply_offset(self):
"""Naively apply query offset."""
self._ensure_modification_is_safe()
if self.query.offset != 0:
self._iterable = \
offset_gen(self.query.offset,
self._iterable, self._skipped_inc)
# _skipped_inc helps keep count of skipped elements
def apply_limit(self):
"""Naively apply query limit."""
self._ensure_modification_is_safe()
if self.query.limit is not None:
self._iterable = limit_gen(self.query.limit, self._iterable)<|fim▁end|>
|
"""
order = order if isinstance(order, Order) else Order(order)
|
<|file_name|>test_with_httpretty.py<|end_file_name|><|fim▁begin|>import concurrent
from concurrent.futures._base import Future
import json
from threading import Barrier
import time
import unittest
import requests_mock
from rpcclient.client import RpcClient
from rpcclient.deserialize import DictDeserializer
from rpcclient.exceptions import RemoteFailedError
from rpcclient.handlers import RequestHandler
from rpcclient.test.testutils import insert_id, create_mock_rpc_client
UNMAPPED_BEHAVIOUR = DictDeserializer.UnmappedBehaviour
__author__ = '[email protected]'
class ClientTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
def test_login(self):
self.assertEqual(self.client.token, "yea")
@requests_mock.mock()
def test_get_first_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_get_second_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test.level2(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test.level2')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_async_request(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
start_time = time.time()
interval_time = 2
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
self.assertGreater(time.time() - start_time, interval_time, "Expected request to wait between calls")
last_request = mock.request_history[-1].json()
self.assertIn('method', last_request)
self.assertRegex(last_request['method'], 'report.data.get')
self.assertIn('params', last_request)
self.assertIn('report_token', last_request['params'])
self.assertRegex(last_request['params']['report_token'], "08d7d7bc608848668b3afa6b528a45d8")
@requests_mock.mock()
def test_async_timeout(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.assertRaises(TimeoutError, self.client.test.task, _timeout=3, _sleep_interval=2)
@requests_mock.mock()
def test_async_timeout_from_configuration(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.client.configuration['timeout'] = 3
self.client.configuration['sleep_interval'] = 2
self.assertRaises(TimeoutError, self.client.test.task)
@requests_mock.mock()
def test_async_handler_ignores_single_failure_for_status(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
interval_time = 1
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
def test_override_handlers(self):
called_with_params = {}
class MockHandler(RequestHandler):
def __init__(self, method, url, headers, token, configuration=None, **kwargs):
super().__init__(method, url, headers, token, configuration, **kwargs)
called_with_params['method'] = method
def handle(self, **kwargs):
return 'Mock value'
client = RpcClient(configuration={
'host': 'http://mockhost',
'handlers': [
(lambda *args, **kwargs: True, MockHandler)
],
'login': 'False token',
'username': '',
'password': '',
})
self.assertEqual(client.some.method(arg1='Argument'), 'Mock value')
self.assertEqual(called_with_params['method'], 'some.method')
self.assertEqual(client.token, 'False token')
@requests_mock.mock()
def test_async_can_run_in_different_thread(self, mock):
b = Barrier(2, timeout=5)
def block_response(response_dict):
def callback(request, context):
b.wait()
body = request.body
request_json = json.loads(body)
response_dict['id'] = request_json['id']
context.status_code = 200
return response_dict
return callback
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': block_response(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test.task(_sleep_interval=0.5, _async=True)
b.wait()
self.assertIsInstance(response, Future)
self.assertTrue(response.running())
done, not_done = concurrent.futures.wait([response], timeout=5)
self.assertGreater(len(done), 0)
self.assertIsInstance(response.result(), dict)
@requests_mock.mock()
def test_return_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test(arg1="arg")
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_return_list_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": [1, 2, 3]})},
])
response = self.client.test(arg1="arg")
self.assertListEqual(response, [1, 2, 3])
@requests_mock.mock()
def test_raises_error_on_none_200(self, mock):
mock.register_uri('POST', "http://server/api/", json=insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
}, status_code=500))
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_response_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": 1, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_result_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"error": "true"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
class AutoDeserializationTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
@requests_mock.mock()
def test_deserializer_passed_in_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
response = self.client.test(_deserializer=result_deserializer)
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary(self, mock):
class Result(object):<|fim▁hole|> def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary_used_just_for_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_deserializer_from_factory(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': lambda method: result_deserializer if method == 'test' else None
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_global_from_conf(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': result_deserializer
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")<|fim▁end|>
| |
<|file_name|>useragent.go<|end_file_name|><|fim▁begin|>package dockerversion // import "github.com/tiborvass/docker/dockerversion"
import (
"context"
"fmt"
"runtime"
"github.com/tiborvass/docker/pkg/parsers/kernel"
"github.com/tiborvass/docker/pkg/useragent"
)
// UAStringKey is used as key type for user-agent string in net/context struct
type UAStringKey struct{}
// DockerUserAgent is the User-Agent the Docker client uses to identify itself.
// In accordance with RFC 7231 (5.5.3) is of the form:
// [docker client's UA] UpstreamClient([upstream client's UA])
func DockerUserAgent(ctx context.Context) string {
httpVersion := make([]useragent.VersionInfo, 0, 6)
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version})
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()})
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit})
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()})
}
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS})
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH})
dockerUA := useragent.AppendVersions("", httpVersion...)
upstreamUA := getUserAgentFromContext(ctx)
if len(upstreamUA) > 0 {
ret := insertUpstreamUserAgent(upstreamUA, dockerUA)
return ret
}
return dockerUA
}
<|fim▁hole|>func getUserAgentFromContext(ctx context.Context) string {
var upstreamUA string
if ctx != nil {
var ki interface{} = ctx.Value(UAStringKey{})
if ki != nil {
upstreamUA = ctx.Value(UAStringKey{}).(string)
}
}
return upstreamUA
}
// escapeStr returns s with every rune in charsToEscape escaped by a backslash
func escapeStr(s string, charsToEscape string) string {
var ret string
for _, currRune := range s {
appended := false
for _, escapableRune := range charsToEscape {
if currRune == escapableRune {
ret += `\` + string(currRune)
appended = true
break
}
}
if !appended {
ret += string(currRune)
}
}
return ret
}
// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent
// string of the form:
// $dockerUA UpstreamClient($upstreamUA)
func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string {
charsToEscape := `();\`
upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape)
return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped)
}<|fim▁end|>
|
// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists
|
<|file_name|>ThreadableLoader.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "core/loader/ThreadableLoader.h"
#include "core/dom/Document.h"
#include "core/dom/ExecutionContext.h"
#include "core/loader/DocumentThreadableLoader.h"
#include "core/loader/ThreadableLoaderClientWrapper.h"<|fim▁hole|>#include "core/loader/WorkerThreadableLoader.h"
#include "core/workers/WorkerGlobalScope.h"
#include "core/workers/WorkerThread.h"
namespace blink {
PassOwnPtr<ThreadableLoader> ThreadableLoader::create(ExecutionContext& context, ThreadableLoaderClient* client, const ThreadableLoaderOptions& options, const ResourceLoaderOptions& resourceLoaderOptions)
{
ASSERT(client);
if (context.isWorkerGlobalScope()) {
return WorkerThreadableLoader::create(toWorkerGlobalScope(context), client, options, resourceLoaderOptions);
}
return DocumentThreadableLoader::create(toDocument(context), client, options, resourceLoaderOptions);
}
void ThreadableLoader::loadResourceSynchronously(ExecutionContext& context, const ResourceRequest& request, ThreadableLoaderClient& client, const ThreadableLoaderOptions& options, const ResourceLoaderOptions& resourceLoaderOptions)
{
if (context.isWorkerGlobalScope()) {
WorkerThreadableLoader::loadResourceSynchronously(toWorkerGlobalScope(context), request, client, options, resourceLoaderOptions);
return;
}
DocumentThreadableLoader::loadResourceSynchronously(toDocument(context), request, client, options, resourceLoaderOptions);
}
} // namespace blink<|fim▁end|>
| |
<|file_name|>api.py<|end_file_name|><|fim▁begin|>from allocine import allocine
from constants import *
from elcinema import elcinema
from imdb import imdby as Imdb
from rotten import rotten
from tmdb import tmdb<|fim▁hole|>
def search(query, site):
if site == "imdb":
provider = Imdb()
elif site == "elcinema":
provider = elcinema()
elif site == "rottentomatoes":
provider = rotten(rotten_key)
elif site == "themoviedatabase":
provider = tmdb(tmdb_key)
elif site == "allocine":
provider = allocine()
results = provider.search(query)
return results
def info(movie_id, site):
if site == "imdb":
provider = Imdb()
elif site == "elcinema":
provider = elcinema()
elif site == "rottentomatoes":
provider = rotten(rotten_key)
elif site == "themoviedatabase":
provider = tmdb(tmdb_key)
elif site == "allocine":
provider = allocine()
result = provider.info(movie_id)
return result<|fim▁end|>
| |
<|file_name|>types.py<|end_file_name|><|fim▁begin|>import os
import sys
import stat
from ._compat import open_stream, text_type, filename_to_ui, get_streerror
from .exceptions import BadParameter
from .utils import safecall, LazyFile
class ParamType(object):
"""Helper for converting values through types. The following is
necessary for a valid type:
* it needs a name
* it needs to pass through None unchanged
* it needs to convert from a string
* it needs to convert its result type through unchanged
(eg: needs to be idempotent)
* it needs to be able to deal with param and context being `None`.
This can be the case when the object is used with prompt
inputs.
"""
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Converts the value. This is not invoked for values that are
`None` (the missing value).
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or '').split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except UnicodeError:
value = str(value).decode('utf-8', 'replace')
self.fail(value, param, ctx)
class StringParamType(ParamType):
name = 'text'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value
return value
def __repr__(self):
return 'STRING'
class Choice(ParamType):
"""The choice type allows a value to checked against a fixed set of
supported values. All of these values have to be strings.
See :ref:`choice-opts` for an example.
"""
name = 'choice'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
# Exact match
if value in self.choices:
return value
# Match through normalization
if ctx is not None and \
ctx.token_normalize_func is not None:
value = ctx.token_normalize_func(value)
for choice in self.choices:
if ctx.token_normalize_func(choice) == value:
return choice
self.fail('invalid choice: %s. (choose from %s)' %
(value, ', '.join(self.choices)), param, ctx)
def __repr__(self):
return 'Choice(%r)' % list(self.choices)
class IntParamType(ParamType):
name = 'integer'
def convert(self, value, param, ctx):
try:
return int(value)
except ValueError:
self.fail('%s is not a valid integer' % value, param, ctx)
def __repr__(self):
return 'INT'
class IntRange(IntParamType):
"""A parameter that works similar to :data:`click.INT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = 'integer range'
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if self.min is not None and rv < self.min or \
self.max is not None and rv > self.max:
if self.min is None:
self.fail('%s is bigger than the maximum valid value '
'%s.' % (rv, self.max), param, ctx)
elif self.max is None:
self.fail('%s is smaller than the minimum valid value '
'%s.' % (rv, self.min), param, ctx)
else:
self.fail('%s is not in the valid range of %s to %s.'
% (rv, self.min, self.max), param, ctx)
return rv
def __repr__(self):
return 'IntRange(%r, %r)' % (self.min, self.max)
class BoolParamType(ParamType):
name = 'boolean'
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', '1', 'yes', 'y'):
return True
elif value in ('false', '0', 'no', 'n'):
return False
self.fail('%s is not a valid boolean' % value, param, ctx)
def __repr__(self):
return 'BOOL'
class FloatParamType(ParamType):
name = 'float'
def convert(self, value, param, ctx):
try:
return float(value)
except ValueError:
self.fail('%s is not a valid floating point value' %
value, param, ctx)
def __repr__(self):
return 'FLOAT'
class UUIDParameterType(ParamType):
name = 'uuid'
def convert(self, value, param, ctx):
import uuid
try:
return uuid.UUID(value)
except ValueError:
self.fail('%s is not a valid UUID value' % value, param, ctx)
def __repr__(self):
return 'UUID'
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or
upon first IO. The default is to be non lazy for standard input and
output streams as well as files opened for reading, lazy otherwise.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = 'filename'
envvar_list_splitter = os.path.pathsep
def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
atomic=False):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == '-':
return False
elif 'w' in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, 'read') or hasattr(value, 'write'):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(value, self.mode, self.encoding, self.errors,
atomic=self.atomic)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(value, self.mode,
self.encoding, self.errors,
atomic=self.atomic)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except (IOError, OSError) as e:
self.fail('Could not open file: %s: %s' % (
filename_to_ui(value),
get_streerror(e),
), param, ctx)
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning a open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved.
"""
envvar_list_splitter = os.path.pathsep
def __init__(self, exists=False, file_okay=True, dir_okay=True,
writable=False, readable=True, resolve_path=False):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
if self.file_okay and not self.dir_okay:
self.name = 'file'
self.path_type = 'File'
if self.dir_okay and not self.file_okay:
self.name = 'directory'
self.path_type = 'Directory'
else:
self.name = 'path'
self.path_type = 'Path'
def convert(self, value, param, ctx):
rv = value
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return rv
self.fail('%s "%s" does not exist.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail('%s "%s" is a file.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail('%s "%s" is a directory.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if self.writable and not os.access(value, os.W_OK):
self.fail('%s "%s" is not writable.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if self.readable and not os.access(value, os.R_OK):
self.fail('%s "%s" is not readable.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
<|fim▁hole|> return rv
def convert_type(ty, default=None):
"""Converts a callable or python ty into the most appropriate param
ty.
"""
if isinstance(ty, ParamType):
return ty
guessed_type = False
if ty is None and default is not None:
ty = type(default)
guessed_type = True
if ty is text_type or ty is str or ty is None:
return STRING
if ty is int:
return INT
# Booleans are only okay if not guessed. This is done because for
# flags the default value is actually a bit of a lie in that it
# indicates which of the flags is the one we want. See get_default()
# for more information.
if ty is bool and not guessed_type:
return BOOL
if ty is float:
return FLOAT
if guessed_type:
return STRING
# Catch a common mistake
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError('Attempted to use an uninstantiated '
'parameter type (%s).' % ty)
except TypeError:
pass
return FuncParamType(ty)
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()<|fim▁end|>
| |
<|file_name|>RealData.java<|end_file_name|><|fim▁begin|>package com.flying.promotion.javatuning.future.jdk;
import java.util.concurrent.Callable;<|fim▁hole|> * Created by Joseph on 7/25/2016.
*/
public class RealData implements Callable<String>{
private String para;
public RealData(String para){
this.para=para;
}
@Override
public String call() throws Exception {
StringBuffer sb=new StringBuffer();
for (int i = 0; i < 10; i++) {
sb.append(para);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
}
}
return sb.toString();
}
}<|fim▁end|>
|
/**
|
<|file_name|>browserify-zlib.d.ts<|end_file_name|><|fim▁begin|>/// <reference path='../typings/browser.d.ts' />
// TypeScript definitions for browserify-zlib:
// https://github.com/devongovett/browserify-zlib
//
// Note that this is a tiny fraction of available
// methods; for a reference, see the Node.js zlib
// documentation.
declare module 'browserify-zlib' {
function gzipSync(buffer:Buffer) : Buffer;<|fim▁hole|><|fim▁end|>
|
function gunzipSync(buffer:Buffer) : Buffer;
}
|
<|file_name|>test_protocol.py<|end_file_name|><|fim▁begin|># Copyright 2014 CERN.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from openstackclient.identity.v3 import federation_protocol
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestProtocol(identity_fakes.TestFederatedIdentity):
def setUp(self):
super(TestProtocol, self).setUp()
federation_lib = self.app.client_manager.identity.federation
self.protocols_mock = federation_lib.protocols
self.protocols_mock.reset_mock()
class TestProtocolCreate(TestProtocol):
def setUp(self):
super(TestProtocolCreate, self).setUp()
proto = copy.deepcopy(identity_fakes.PROTOCOL_OUTPUT)
resource = fakes.FakeResource(None, proto, loaded=True)
self.protocols_mock.create.return_value = resource
self.cmd = federation_protocol.CreateProtocol(self.app, None)
def test_create_protocol(self):
argslist = [
identity_fakes.protocol_id,
'--identity-provider', identity_fakes.idp_id,
'--mapping', identity_fakes.mapping_id
]
verifylist = [
('federation_protocol', identity_fakes.protocol_id),
('identity_provider', identity_fakes.idp_id),
('mapping', identity_fakes.mapping_id)
]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.create.assert_called_with(
protocol_id=identity_fakes.protocol_id,
identity_provider=identity_fakes.idp_id,
mapping=identity_fakes.mapping_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id,
identity_fakes.idp_id,
identity_fakes.mapping_id)
self.assertEqual(datalist, data)
class TestProtocolDelete(TestProtocol):
def setUp(self):
super(TestProtocolDelete, self).setUp()
# This is the return value for utils.find_resource()
self.protocols_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROTOCOL_OUTPUT),
loaded=True,
)
self.protocols_mock.delete.return_value = None
self.cmd = federation_protocol.DeleteProtocol(self.app, None)
def test_delete_identity_provider(self):
arglist = [
'--identity-provider', identity_fakes.idp_id,
identity_fakes.protocol_id
]
verifylist = [
('federation_protocol', [identity_fakes.protocol_id]),
('identity_provider', identity_fakes.idp_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.protocols_mock.delete.assert_called_with(
identity_fakes.idp_id, identity_fakes.protocol_id)
self.assertIsNone(result)
class TestProtocolList(TestProtocol):
def setUp(self):
super(TestProtocolList, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True)
self.protocols_mock.list.return_value = [fakes.FakeResource(
None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True)]
self.cmd = federation_protocol.ListProtocols(self.app, None)
def test_list_protocols(self):
arglist = ['--identity-provider', identity_fakes.idp_id]
verifylist = [('identity_provider', identity_fakes.idp_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.list.assert_called_with(identity_fakes.idp_id)
class TestProtocolSet(TestProtocol):
def setUp(self):
super(TestProtocolSet, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT, loaded=True)
self.protocols_mock.update.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT_UPDATED, loaded=True)
self.cmd = federation_protocol.SetProtocol(self.app, None)
def test_set_new_mapping(self):
arglist = [
identity_fakes.protocol_id,
'--identity-provider', identity_fakes.idp_id,
'--mapping', identity_fakes.mapping_id
]
verifylist = [('identity_provider', identity_fakes.idp_id),
('federation_protocol', identity_fakes.protocol_id),
('mapping', identity_fakes.mapping_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.update.assert_called_with(
identity_fakes.idp_id, identity_fakes.protocol_id,
identity_fakes.mapping_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id, identity_fakes.idp_id,
identity_fakes.mapping_id_updated)
self.assertEqual(datalist, data)
class TestProtocolShow(TestProtocol):
def setUp(self):
super(TestProtocolShow, self).setUp()
self.protocols_mock.get.return_value = fakes.FakeResource(
None, identity_fakes.PROTOCOL_OUTPUT, loaded=False)
self.cmd = federation_protocol.ShowProtocol(self.app, None)
<|fim▁hole|> def test_show_protocol(self):
arglist = [identity_fakes.protocol_id, '--identity-provider',
identity_fakes.idp_id]
verifylist = [('federation_protocol', identity_fakes.protocol_id),
('identity_provider', identity_fakes.idp_id)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.protocols_mock.get.assert_called_with(identity_fakes.idp_id,
identity_fakes.protocol_id)
collist = ('id', 'identity_provider', 'mapping')
self.assertEqual(collist, columns)
datalist = (identity_fakes.protocol_id,
identity_fakes.idp_id,
identity_fakes.mapping_id)
self.assertEqual(datalist, data)<|fim▁end|>
| |
<|file_name|>IRPredictor.java<|end_file_name|><|fim▁begin|><|fim▁hole|> *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.java.jinterval.ir;
import com.cflex.util.lpSolve.LpConstant;
import com.cflex.util.lpSolve.LpModel;
import com.cflex.util.lpSolve.LpSolver;
import java.util.ArrayList;
//import org.apache.commons.math3.exception.MathIllegalArgumentException;
//TODO: Add exceptions and throws
public class IRPredictor {
private double[][] X;
private double[] Y;
private double[] E;
private boolean dataAreGiven;
// Number of observations
private int ObsNumber;
// Number of variables
private int VarNumber;
// Storage for predictions
private double PredictionMin;
private double PredictionMax;
private double[] PredictionMins;
private double[] PredictionMaxs;
// Last error
int ExitCode;
public IRPredictor(){
ObsNumber = 0;
VarNumber = 0;
dataAreGiven = false;
}
public IRPredictor(double[][] X, double[] Y, double[] E){
setData(X,Y,E);
}
public final void setData(double[][] X, double[] Y, double[] E)
// throws IllegalDimensionException
{
// if(X.length != Y.length || X.length != E.length)
// throw IllegalDimensionException;
this.X=X;
this.Y=Y;
this.E=E;
ObsNumber = X.length;
VarNumber = X[0].length;
dataAreGiven = true;
}
public int getExitCode() {
return ExitCode;
}
private boolean solveLpp(double[] Objective)
// throws IllegalDimensionException
{
if (Objective.length != VarNumber){
// throw IllegalDimensionException;
ExitCode = -1;
return false;
}
try {
// Init LP Solver
LpModel Lpp = new LpModel(0, VarNumber);
// Define LPP
double[] zObjective = new double[VarNumber+1];
System.arraycopy(Objective, 0, zObjective, 1, VarNumber);
Lpp.setObjFn(zObjective);
double[] zX=new double[VarNumber+1];
for (int i=0; i<ObsNumber; i++) {
System.arraycopy(X[i], 0, zX, 1, VarNumber);
Lpp.addConstraint(zX, LpConstant.LE, Y[i]+E[i]);
Lpp.addConstraint(zX, LpConstant.GE, Y[i]-E[i]);
// Solver.add_constraint(Lpp, zX, constant.LE, Y[i]+E[i]);
// Solver.add_constraint(Lpp, zX, constant.GE, Y[i]-E[i]);
}
//Solver.set_minim(Lpp);
//Lpp.setMinimum();
LpSolver Solver = new LpSolver(Lpp);
ExitCode = Solver.solve();
// ExitCode = Solver.solve(Lpp);
switch ( ExitCode ) {
case LpConstant.OPTIMAL:
PredictionMin = Lpp.getBestSolution(0);
break;
case LpConstant.INFEASIBLE:
//throw InfeasibleException
case LpConstant.UNBOUNDED:
//throw UnboundedException
}
// Solver.set_maxim(Lpp);
Lpp.setMaximum();
ExitCode = Solver.solve();
switch ( ExitCode ) {
case LpConstant.OPTIMAL:
PredictionMax = Lpp.getBestSolution(0);
break;
case LpConstant.INFEASIBLE:
//throw InfeasibleException
case LpConstant.UNBOUNDED:
//throw UnboundedException
}
} catch (Exception e){
//e.printStackTrace();
}
return ExitCode == LpConstant.OPTIMAL;
}
public boolean isDataConsistent(){
return solveLpp(X[0]);
}
public void compressData(){
}
public boolean predictAt(double[] x){
return solveLpp(x);
}
public boolean predictAtEveryDataPoint(){
PredictionMins = new double[ObsNumber];
PredictionMaxs = new double[ObsNumber];
boolean Solved = true;
for (int i=0; i<ObsNumber; i++){
Solved = Solved && predictAt(X[i]);
if(!Solved) {
break;
}
PredictionMins[i] = getMin();
PredictionMaxs[i] = getMax();
}
return Solved;
}
public double getMin(){
return PredictionMin;
}
public double getMax(){
return PredictionMax;
}
public double getMin(int i) {
return PredictionMins[i];
}
public double getMax(int i) {
return PredictionMaxs[i];
}
public double[] getMins() {
return PredictionMins;
}
public double[] getMaxs() {
return PredictionMaxs;
}
public double[] getResiduals(){
//Residuals=(y-(vmax+vmin)/2)/beta
double v;
double[] residuals = new double[ObsNumber];
for(int i=0; i<ObsNumber; i++) {
v = (PredictionMins[i]+PredictionMaxs[i])/2;
residuals[i] = (Y[i]-v)/E[i];
}
return residuals;
}
public double[] getLeverages(){
//Leverage=((vmax-vmin)/2)/beta
double v;
double[] leverages = new double[ObsNumber];
for(int i=0; i<ObsNumber; i++) {
v = (PredictionMaxs[i]-PredictionMins[i])/2;
leverages[i] = v/E[i];
}
return leverages;
}
public int[] getBoundary(){
final double EPSILON = 1.0e-6;
ArrayList<Integer> boundary = new ArrayList<Integer>();
double yp, ym, vp, vm;
for (int i=0; i<ObsNumber; i++){
yp = Y[i]+E[i];
vp = PredictionMaxs[i];
ym = Y[i]-E[i];
vm = PredictionMins[i];
if ( Math.abs(yp - vp) < EPSILON || Math.abs(ym - vm) < EPSILON ) {
boundary.add(1);
}
else {
boundary.add(0);
}
}
int[] a_boundary = new int[boundary.size()];
for (int i=0; i<a_boundary.length; i++){
a_boundary[i] = boundary.get(i).intValue();
}
return a_boundary;
}
public int[] getBoundaryNumbers(){
int Count = 0;
int[] boundary = getBoundary();
for (int i=0; i<boundary.length; i++){
if(boundary[i] == 1) {
Count++;
}
}
int j = 0;
int[] numbers = new int[Count];
for (int i=0; i<boundary.length; i++){
if(boundary[i] == 1) {
numbers[j++] = i;
}
}
return numbers;
}
//TODO: Implement getOutliers()
// public int[] getOutliers(){
//
// }
//TODO: Implement getOutliersNumbers()
// public int[] getOutliersNumbers(){
//
// }
public double[] getOutliersWeights(){
double[] outliers = new double[ObsNumber];
for(int i=0; i<ObsNumber; i++) {
outliers[i]=0;
}
try {
LpModel Lpp = new LpModel(0, ObsNumber+VarNumber);
// Build and set objective of LPP
double[] zObjective = new double[ObsNumber+VarNumber+1];
for(int i=1;i<=VarNumber; i++) {
zObjective[i] = 0;
}
for(int i=1;i<=ObsNumber; i++) {
zObjective[VarNumber+i] = 1;
}
Lpp.setObjFn(zObjective);
//Solver.set_minim(Lpp);
// Build and set constraints of LPP
double[] Row = new double[ObsNumber+VarNumber+1];
for (int i=0; i<ObsNumber; i++) {
for (int j=1; j<=VarNumber; j++) {
Row[j]=X[i][j-1];
}
for(int j=1; j<=ObsNumber; j++) {
Row[VarNumber+j] = 0;
}
Row[VarNumber+i+1] = -E[i];
// Solver.add_constraint(Lpp, Row, constant.LE, Y[i]);
Lpp.addConstraint(Row, LpConstant.LE, Y[i]);
Row[VarNumber+i+1] = E[i];
// Solver.add_constraint(Lpp, Row, constant.GE, Y[i]);
Lpp.addConstraint(Row, LpConstant.GE, Y[i]);
for (int j=1; j<=ObsNumber+VarNumber; j++) {
Row[j] = 0;
}
Row[VarNumber+i+1] = 1;
// Solver.add_constraint(Lpp, Row, constant.GE, 1);
Lpp.addConstraint(Row, LpConstant.GE, 1);
}
// Solve LPP and get outliers' weights
LpSolver Solver = new LpSolver(Lpp);
ExitCode = Solver.solve();
for(int i = 0; i < ObsNumber; i++) {
outliers[i] = Lpp.getBestSolution(Lpp.getRows()+VarNumber+i+1);
}
} catch(Exception e){
//e.printStackTrace();
}
return outliers;
}
}<|fim▁end|>
|
/*
* Copyright (c) 2012, JInterval Project.
* All rights reserved.
|
<|file_name|>BinaryMain.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>
int main(int argc, char* argv[])
{
BinaryLLHeap <int> heap;
heap.insert(5);
return 0;
}<|fim▁end|>
|
#include "BinaryLLHeap.h"
|
<|file_name|>HighCardDictDimensionIndexCodec.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.core.datastore.page.encoding.dimension.legacy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorage;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorageForNoInvertedIndexForShort;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorageForShort;
import org.apache.carbondata.core.datastore.compression.Compressor;
import org.apache.carbondata.core.datastore.compression.CompressorFactory;
import org.apache.carbondata.core.datastore.page.ColumnPage;
import org.apache.carbondata.core.datastore.page.encoding.ColumnPageEncoder;
import org.apache.carbondata.core.util.ByteUtil;
import org.apache.carbondata.format.Encoding;
public class HighCardDictDimensionIndexCodec extends IndexStorageCodec {
/**
* whether this column is varchar data type(long string)
*/
private boolean isVarcharType;
public HighCardDictDimensionIndexCodec(boolean isSort, boolean isInvertedIndex,
boolean isVarcharType) {
super(isSort, isInvertedIndex);
this.isVarcharType = isVarcharType;
}
@Override
public String getName() {
return "HighCardDictDimensionIndexCodec";<|fim▁hole|> return new IndexStorageEncoder() {
@Override
protected void encodeIndexStorage(ColumnPage input) {
BlockIndexerStorage<byte[][]> indexStorage;
byte[][] data = input.getByteArrayPage();
boolean isDictionary = input.isLocalDictGeneratedPage();
if (isInvertedIndex) {
indexStorage = new BlockIndexerStorageForShort(data, isDictionary, !isDictionary, isSort);
} else {
indexStorage =
new BlockIndexerStorageForNoInvertedIndexForShort(data, isDictionary);
}
byte[] flattened = ByteUtil.flatten(indexStorage.getDataPage());
Compressor compressor = CompressorFactory.getInstance().getCompressor(
input.getColumnCompressorName());
super.compressedDataPage = compressor.compressByte(flattened);
super.indexStorage = indexStorage;
}
@Override
protected List<Encoding> getEncodingList() {
List<Encoding> encodings = new ArrayList<>();
if (isVarcharType) {
encodings.add(Encoding.DIRECT_COMPRESS_VARCHAR);
} else if (indexStorage.getRowIdPageLengthInBytes() > 0) {
encodings.add(Encoding.INVERTED_INDEX);
}
if (indexStorage.getDataRlePageLengthInBytes() > 0) {
encodings.add(Encoding.RLE);
}
return encodings;
}
};
}
}<|fim▁end|>
|
}
@Override
public ColumnPageEncoder createEncoder(Map<String, String> parameter) {
|
<|file_name|>control_detail.component.html.ts<|end_file_name|><|fim▁begin|>export const HTML_TEMPLATE = `<|fim▁hole|> </form>
</div>
`;<|fim▁end|>
|
<div class="navbar navbar-default" role="navigation">
<h1 class="navbar-form pull-left" style="display: inline;">Auftrag {{ id }}</h1>
<form class="navbar-form pull-right">
<button class="btn btn-primary" (click)="back()">Zurück zur Übersicht</button>
|
<|file_name|>test_crashstorage.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import time
import json
from contextlib import nested
import mock
from nose.tools import eq_, ok_, assert_raises
from configman import ConfigurationManager
<|fim▁hole|> CrashIDNotFound,
Redactor,
MemoryDumpsMapping
)
from socorro.external.hbase.crashstorage import HBaseCrashStorage
from socorro.external.hbase.connection_context import \
HBaseConnectionContextPooled
from socorro.lib.util import DotDict
from socorro.unittest.config import commonconfig
from socorro.database.transaction_executor import (
TransactionExecutorWithLimitedBackoff
)
from socorro.unittest.testbase import TestCase
class SomeThriftError(Exception):
pass
_run_integration_tests = os.environ.get('RUN_HBASE_INTEGRATION_TESTS', False)
if _run_integration_tests in ('false', 'False', 'no', '0'):
_run_integration_tests = False
if not _run_integration_tests:
import logging
logging.warning("Skipping HBase integration tests")
else:
class TestIntegrationHBaseCrashStorage(TestCase):
"""
If you ever get this::
Traceback (most recent call last):
...
socorro.external.hbase.hbase_client.FatalException: the connection
is not viable. retries fail:
Then try the following:
/etc/init.d/hadoop-hbase-master restart
/etc/init.d/hadoop-hbase-thrift restart
Also, you can look in /var/log/hbase for clues.
Still not working, try:
hbase shell
> describe 'crash_reports'
and keep an eye on the logs.
"""
def tearDown(self):
super(TestIntegrationHBaseCrashStorage, self).tearDown()
self._truncate_hbase_table()
def _truncate_hbase_table(self):
connection = hbase_client.HBaseConnectionForCrashReports(
commonconfig.hbaseHost.default,
commonconfig.hbasePort.default,
100
)
for row in connection.merge_scan_with_prefix(
'crash_reports', '', ['ids:ooid']):
index_row_key = row['_rowkey']
connection.client.deleteAllRow(
'crash_reports', index_row_key)
# because of HBase's async nature, deleting can take time
list(connection.iterator_for_all_legacy_to_be_processed())
def test_basic_hbase_crashstorage(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.required_config
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = HBaseCrashStorage(config)
eq_(list(crashstorage.new_crashes()), [])
crash_id = '86b58ff2-9708-487d-bfc4-9dac32121214'
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
fake_raw_dump_1 = 'peter is a swede'
fake_raw_dump_2 = 'lars is a norseman'
fake_raw_dump_3 = 'adrian is a frenchman'
fake_dumps = MemoryDumpsMapping({
'upload_file_minidump': fake_raw_dump_1,
'lars': fake_raw_dump_2,
'adrian': fake_raw_dump_3
})
crashstorage.save_raw_crash(json.loads(raw),
fake_dumps,
crash_id)
assert config.logger.info.called
assert config.logger.info.call_count > 1
msg_tmpl, msg_arg = config.logger.info.call_args_list[1][0]
# ie logging.info(<template>, <arg>)
msg = msg_tmpl % msg_arg
ok_('saved' in msg)
ok_(crash_id in msg)
raw_crash = crashstorage.get_raw_crash(crash_id)
assert isinstance(raw_crash, dict)
eq_(raw_crash['name'], 'Peter')
dump = crashstorage.get_raw_dump(crash_id)
assert isinstance(dump, basestring)
ok_('peter is a swede' in dump)
dumps = crashstorage.get_raw_dumps(crash_id)
assert isinstance(dumps, dict)
ok_('upload_file_minidump' in dumps)
ok_('lars' in dumps)
ok_('adrian' in dumps)
eq_(dumps['upload_file_minidump'],
fake_dumps['upload_file_minidump'])
eq_(dumps['lars'],
fake_dumps['lars'])
eq_(dumps['adrian'],
fake_dumps['adrian'])
# hasn't been processed yet
assert_raises(CrashIDNotFound,
crashstorage.get_processed,
crash_id)
pro = ('{"name":"Peter",'
'"uuid":"86b58ff2-9708-487d-bfc4-9dac32121214", '
'"submitted_timestamp":"%d", '
'"completeddatetime": "%d"}' %
(time.time(), time.time()))
crashstorage.save_processed(json.loads(pro))
data = crashstorage.get_processed(crash_id)
eq_(data['name'], u'Peter')
hb_connection = crashstorage.hbaseConnectionPool.connection()
ok_(hb_connection.transport.isOpen())
crashstorage.close()
ok_(not hb_connection.transport.isOpen())
class TestHBaseCrashStorage(TestCase):
def test_hbase_crashstorage_basic_error(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
}],
argv_source=[]
)
with config_manager.context() as config:
config.executor_identity = lambda: 'dwight' # bogus thread id
hbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
with mock.patch(hbaseclient_) as hclient:
klass = hclient.HBaseConnectionForCrashReports
def retry_raiser(*args, **kwargs):
raise SomeThriftError('try again')
klass.put_json_dump.side_effect = ValueError('crap!')
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
# Note, we're not expect it to raise an error
assert_raises(ValueError,
crashstorage.save_raw_crash,
json.loads(raw),
raw,
"abc123"
)
#eq_(instance.put_json_dump.call_count, 3)
def test_hbase_crashstorage_error_after_retries(self):
cshbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
cchbaseclient_ = \
'socorro.external.hbase.connection_context.hbase_client'
with nested(mock.patch(cshbaseclient_),
mock.patch(cchbaseclient_)) as (cshclient, cchclient):
fake_hbase_client_connection = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_put_json_method = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.put_json_dump = \
fake_put_json_method
cchclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_hbase_client_connection.hbaseThriftExceptions = \
(SomeThriftError,)
fake_put_json_method.side_effect = SomeThriftError('try again')
config = DotDict({
'logger': mock.MagicMock(),
'hbase_timeout': 0,
'hbase_host': 'somehost',
'hbase_port': 9090,
'number_of_retries': 2,
'hbase_connection_pool_class':
HBaseConnectionContextPooled,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
'redactor_class': Redactor,
'forbidden_keys':
Redactor.required_config.forbidden_keys.default,
'executor_identity': lambda: 'dwight' # bogus thread id
})
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
assert_raises(SomeThriftError,
crashstorage.save_raw_crash,
json.loads(raw),
raw,
{}
)
eq_(fake_put_json_method.call_count, 3)
def test_hbase_crashstorage_success_after_retries(self):
cshbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
cchbaseclient_ = \
'socorro.external.hbase.connection_context.hbase_client'
with nested(mock.patch(cshbaseclient_),
mock.patch(cchbaseclient_)) as (cshclient, cchclient):
fake_hbase_client_connection = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_put_json_method = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.put_json_dump = \
fake_put_json_method
cchclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_hbase_client_connection.hbaseThriftExceptions = \
(SomeThriftError,)
_attempts = [SomeThriftError, SomeThriftError]
def retry_raiser_iterator(*args, **kwargs):
try:
raise _attempts.pop(0)
except IndexError:
return None
fake_put_json_method.side_effect = retry_raiser_iterator
config = DotDict({
'logger': mock.MagicMock(),
'hbase_timeout': 0,
'hbase_host': 'somehost',
'hbase_port': 9090,
'number_of_retries': 2,
'hbase_connection_pool_class':
HBaseConnectionContextPooled,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
'redactor_class': Redactor,
'forbidden_keys':
Redactor.required_config.forbidden_keys.default,
'executor_identity': lambda: 'dwight' # bogus thread id
})
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
crashstorage.save_raw_crash(json.loads(raw), raw, "abc123")
eq_(fake_put_json_method.call_count, 3)
def test_hbase_crashstorage_puts_and_gets(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
}],
argv_source=[]
)
with config_manager.context() as config:
config.executor_identity = lambda: 'dwight' # bogus thread id
hbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
with mock.patch(hbaseclient_) as hclient:
# test save_raw_crash
raw_crash = {
"name": "Peter",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"submitted_timestamp": "2012-05-04T15:10:00",
"user_id": "000-00-0000",
}
fake_binary_dump = "this a bogus binary dump"
expected_raw_crash = raw_crash
expected_dump = fake_binary_dump
expected_dump_2 = fake_binary_dump + " number 2"
# saves us from loooong lines
klass = hclient.HBaseConnectionForCrashReports
crashstorage = HBaseCrashStorage(config)
crashstorage.save_raw_crash(raw_crash, fake_binary_dump,
"abc123")
eq_(
klass.put_json_dump.call_count,
1
)
a = klass.put_json_dump.call_args
eq_(len(a[0]), 4)
#eq_(a[0][1], "abc123")
eq_(a[0][2], expected_raw_crash)
eq_(a[0][3], expected_dump)
eq_(a[1], {'number_of_retries': 0})
# test save_processed
processed_crash = {
"name": "Peter",
"uuid": "abc123",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"user_id": "000-00-0000",
}
expected_processed_crash = {
"name": "Peter",
"uuid": "abc123",
}
expected_unredacted_processed_crash = {
"name": "Peter",
"uuid": "abc123",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"user_id": "000-00-0000",
}
crashstorage = HBaseCrashStorage(config)
crashstorage.save_processed(processed_crash)
eq_(klass.put_processed_json.call_count, 1)
a = klass.put_processed_json.call_args
eq_(len(a[0]), 3)
eq_(a[0][1], "abc123")
eq_(a[0][2], expected_unredacted_processed_crash)
eq_(a[1], {'number_of_retries': 0})
# test get_raw_crash
m = mock.Mock(return_value=raw_crash)
klass.get_json = m
r = crashstorage.get_raw_crash("abc123")
ok_(isinstance(r, DotDict))
a = klass.get_json.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_json.call_count, 1)
eq_(r, expected_raw_crash)
# test get_raw_dump
m = mock.Mock(return_value=fake_binary_dump)
klass.get_dump = m
r = crashstorage.get_raw_dump("abc123")
a = klass.get_dump.call_args
eq_(len(a[0]), 3)
eq_(a[0][1], "abc123")
eq_(klass.get_dump.call_count, 1)
eq_(r, expected_dump)
# test get_raw_dumps
m = mock.Mock(return_value={'upload_file_minidump':
fake_binary_dump})
klass.get_dumps = m
r = crashstorage.get_raw_dumps("abc123")
a = klass.get_dumps.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_dumps.call_count, 1)
eq_(r, {'upload_file_minidump': expected_dump})
# test get_raw_dumps 2
m = mock.Mock(return_value={'upload_file_minidump':
fake_binary_dump,
'aux_1':
expected_dump_2})
klass.get_dumps = m
r = crashstorage.get_raw_dumps("abc123")
a = klass.get_dumps.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_dumps.call_count, 1)
eq_(r, {'upload_file_minidump':
fake_binary_dump,
'aux_1':
expected_dump_2})
# test get_processed
m = mock.Mock(return_value=expected_processed_crash)
klass.get_processed_json = m
r = crashstorage.get_processed("abc123")
ok_(isinstance(r, DotDict))
a = klass.get_processed_json.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_processed_json.call_count, 1)
eq_(r, expected_processed_crash)<|fim▁end|>
|
from socorro.external.hbase import hbase_client
from socorro.external.crashstorage_base import (
|
<|file_name|>svh-a-no-change.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `svh-a-*.rs` files are all deviations from the base file
//! svh-a-base.rs with some difference (usually in `fn foo`) that
//! should not affect the strict version hash (SVH) computation<|fim▁hole|>
#![crate_name = "a"]
#![feature(core)]
use std::marker::MarkerTrait;
macro_rules! three {
() => { 3 }
}
pub trait U : MarkerTrait {}
pub trait V : MarkerTrait {}
impl U for () {}
impl V for () {}
static A_CONSTANT : isize = 2;
pub fn foo<T:U>(_: isize) -> isize {
3
}
pub fn an_unused_name() -> isize {
4
}<|fim▁end|>
|
//! (#14132).
|
<|file_name|>extpipe.cpp<|end_file_name|><|fim▁begin|>#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <sys/wait.h>
#include <errno.h>
#include <vdr/tools.h>
#include <vdr/thread.h>
#include "extpipe.h"
cExtPipe::cExtPipe(void)
{
pid = -1;
f_stderr = -1;
f_stdout= -1;
}
cExtPipe::~cExtPipe()
{
int status;
Close(status);
}
<|fim▁hole|> int fd_stdout[2];
int fd_stderr[2];
if (pipe(fd_stdout) < 0)
{
LOG_ERROR;
return false;
}
if (pipe(fd_stderr) < 0)
{
close(fd_stdout[0]);
close(fd_stdout[1]);
LOG_ERROR;
return false;
}
if ((pid = fork()) < 0) // fork failed
{
LOG_ERROR;
close(fd_stdout[0]);
close(fd_stdout[1]);
close(fd_stderr[0]);
close(fd_stderr[1]);
return false;
}
if (pid > 0) // parent process
{
close(fd_stdout[1]); // close write fd, we need only read fd
close(fd_stderr[1]); // close write fd, we need only read fd
f_stdout = fd_stdout[0];
f_stderr = fd_stderr[0];
return true;
}
else // child process
{
close(fd_stdout[0]); // close read fd, we need only write fd
close(fd_stderr[0]); // close read fd, we need only write fd
if (dup2(fd_stdout[1], STDOUT_FILENO) == -1) // now redirect
{
LOG_ERROR;
close(fd_stderr[1]);
close(fd_stdout[1]);
_exit(-1);
}
if (dup2(fd_stderr[1], STDERR_FILENO) == -1) // now redirect
{
LOG_ERROR;
close(fd_stderr[1]);
close(fd_stdout[1]);
_exit(-1);
}
int MaxPossibleFileDescriptors = getdtablesize();
for (int i = STDERR_FILENO + 1; i < MaxPossibleFileDescriptors; i++)
close(i); //close all dup'ed filedescriptors
if (execl("/bin/sh", "sh", "-c", Command, NULL) == -1)
{
LOG_ERROR_STR(Command);
close(fd_stderr[1]);
close(fd_stdout[1]);
_exit(-1);
}
_exit(0);
}
}
int cExtPipe::Close(int &status)
{
int ret = -1;
if (f_stderr!=-1)
{
close(f_stderr);
f_stderr = -1;
}
if (f_stdout!=-1)
{
close(f_stdout);
f_stdout=-1;
}
if (pid > 0)
{
int i = 5;
while (i > 0)
{
ret = waitpid(pid, &status, WNOHANG);
if (ret < 0)
{
if (errno != EINTR && errno != ECHILD)
{
LOG_ERROR;
break;
}
else if (errno == ECHILD)
{
ret = pid;
break;
}
}
else if (ret == pid)
break;
i--;
cCondWait::SleepMs(100);
}
if (!i)
{
kill(pid, SIGKILL);
ret = -1;
}
else if (ret == -1 || !WIFEXITED(status))
ret = -1;
pid = -1;
}
return ret;
}<|fim▁end|>
|
bool cExtPipe::Open(const char *Command)
{
|
<|file_name|>eq.js<|end_file_name|><|fim▁begin|>var _curry2 = require('./internal/_curry2');
/**
* Tests if two items are equal. Equality is strict here, meaning reference equality for objects and
* non-coercing equality for primitives.
*
* @func
* @memberOf R
* @category Relation
* @sig a -> b -> Boolean
* @param {*} a
* @param {*} b<|fim▁hole|> *
* var o = {};
* R.eq(o, o); //=> true
* R.eq(o, {}); //=> false
* R.eq(1, 1); //=> true
* R.eq(1, '1'); //=> false
*/
module.exports = _curry2(function eq(a, b) { return a === b; });<|fim▁end|>
|
* @return {Boolean}
* @example
|
<|file_name|>0005_workertype_is_active.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20161116_1209'),
]
operations = [
migrations.AddField(
model_name='workertype',
name='is_active',
field=models.BooleanField(default=True),
),
]<|fim▁end|>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-16 12:50
|
<|file_name|>info_base.rs<|end_file_name|><|fim▁begin|>use super::{imports::Imports, *};
use crate::{codegen::Visibility, library, version::Version};
#[derive(Debug, Default)]
pub struct InfoBase {
pub full_name: String,
pub type_id: library::TypeId,
pub name: String,
pub functions: Vec<functions::Info>,
pub specials: special_functions::Infos,
pub imports: Imports,
pub version: Option<Version>,
pub deprecated_version: Option<Version>,
pub cfg_condition: Option<String>,
pub concurrency: library::Concurrency,
pub visibility: Visibility,
}
<|fim▁hole|> pub fn constructors(&self) -> Vec<&functions::Info> {
self.functions
.iter()
.filter(|f| f.status.need_generate() && f.kind == library::FunctionKind::Constructor)
.collect()
}
pub fn methods(&self) -> Vec<&functions::Info> {
self.functions
.iter()
.filter(|f| f.status.need_generate() && f.kind == library::FunctionKind::Method)
.collect()
}
pub fn functions(&self) -> Vec<&functions::Info> {
self.functions
.iter()
.filter(|f| f.status.need_generate() && f.kind == library::FunctionKind::Function)
.collect()
}
}<|fim▁end|>
|
impl InfoBase {
///TODO: return iterator
|
<|file_name|>plot_circos_plot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Read the list of chimeric interactions and generate a file that can be read
by circos.
"""
import sys
import argparse
from collections import defaultdict
from math import log
import pro_clash
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
<|fim▁hole|> # initialize the parser object, replace the description
parser = argparse.ArgumentParser(
description='Generate circos data file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'reads_in',
help='An output file of map_chimeric_fragments.py with the chimeric'
' fragments.')
parser.add_argument(
'-r', '--region', type=int, default=200,
help='Split the genome to windows of this size.')
parser.add_argument(
'-c', '--chrn', default='chr',
help='Name of chromosome to plot.')
parser.add_argument(
'-p', '--print_chr', default='ecmain',
help='Name of chromosome in circos.')
parser.add_argument(
'-m', '--min_interactions', type=int, default=100,
help='Minimum number of interactions between two regions to plot.')
settings = parser.parse_args(argv)
return settings
def main(argv=None):
settings = process_command_line(argv)
region_interactions, _, _, _=\
pro_clash.read_reads_table(open(settings.reads_in), settings.region)
both_strs = defaultdict(lambda: defaultdict(int))
for reg1 in region_interactions:
if reg1[2] != settings.chrn:
continue
for reg2 in region_interactions[reg1]:
if reg2[2] != settings.chrn:
continue
both_strs[reg1[0]][reg2[0]] += len(region_interactions[reg1][reg2])
for r1 in both_strs:
for r2 in both_strs[r1]:
if both_strs[r1][r2] > settings.min_interactions:
sys.stdout.write('%s %d %d %s %d %d thickness=%dp\n'%(
settings.print_chr, r1+1, r1+settings.region,
settings.print_chr, r2+1, r2+settings.region,
log(both_strs[r1][r2])/log(10)))
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)<|fim▁end|>
| |
<|file_name|>die.py<|end_file_name|><|fim▁begin|>from random import randint<|fim▁hole|>
class Die():
'''A class representing a single die'''
def __init__(self, num_sides=6):
'''assume a six-side die'''
self.num_sides = num_sides
def roll(self):
'''return a random number between 1 and number of sides'''
return randint(1, self.num_sides)<|fim▁end|>
| |
<|file_name|>where-clauses.rs<|end_file_name|><|fim▁begin|>// run-pass
trait Equal {
fn equal(&self, other: &Self) -> bool;
fn equals<T,U>(&self, this: &T, that: &T, x: &U, y: &U) -> bool
where T: Eq, U: Eq;
}
impl<T> Equal for T where T: Eq {
fn equal(&self, other: &T) -> bool {
self == other
}
fn equals<U,X>(&self, this: &U, other: &U, x: &X, y: &X) -> bool
where U: Eq, X: Eq {
this == other && x == y
}
}
fn equal<T>(x: &T, y: &T) -> bool where T: Eq {
x == y
}<|fim▁hole|> println!("{}", equal(&1, &2));
println!("{}", equal(&1, &1));
println!("{}", "hello".equal(&"hello"));
println!("{}", "hello".equals::<isize,&str>(&1, &1, &"foo", &"bar"));
}<|fim▁end|>
|
fn main() {
|
<|file_name|>metrics_confusion_matrix_test.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)<|fim▁hole|> result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegexp(ValueError,
'Invalid AUC curve value "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid AUC summation method value "Invalid".'):
metrics.AUC(summation_method='Invalid')
if __name__ == '__main__':
test.main()<|fim▁end|>
|
self.evaluate(update_op)
|
<|file_name|>predicates_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package predicates
import (
"fmt"
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
)
type FakeNodeInfo api.Node
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
node := api.Node(n)
return &node, nil
}
type FakeNodeListInfo []api.Node
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
for _, node := range nodes {
if node.Name == nodeName {
return &node, nil
}
}
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
}
func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources {
return api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
},
}
}
func newResourcePod(usage ...resourceRequest) *api.Pod {
containers := []api.Container{}
for _, req := range usage {
containers = append(containers, api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
},
},
})
}
return &api.Pod{
Spec: api.PodSpec{
Containers: containers,
},
}
}
func TestPodFitsResources(t *testing.T) {
enoughPodsTests := []struct {
pod *api.Pod
existingPods []*api.Pod
fits bool
test string
}{
{
pod: &api.Pod{},
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
},
fits: true,
test: "no resources requested always fits",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
},
fits: false,
test: "too many resources fails",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
},
fits: true,
test: "both resources fit",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
},
fits: false,
test: "one resources fits",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
},
fits: true,
test: "equal edge case",
},
}
for _, test := range enoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity}}
fit := ResourceFit{FakeNodeInfo(node)}
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
}
}
notEnoughPodsTests := []struct {
pod *api.Pod
existingPods []*api.Pod
fits bool
test string
}{
{
pod: &api.Pod{},
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
},
fits: false,
test: "even without specified resources predicate fails when there's no available ips",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
},
fits: false,<|fim▁hole|> test: "even if both resources fit predicate fails when there's no available ips",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
existingPods: []*api.Pod{
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
},
fits: false,
test: "even for equal edge case predicate fails when there's no available ips",
},
}
for _, test := range notEnoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1).Capacity}}
fit := ResourceFit{FakeNodeInfo(node)}
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
}
}
}
func TestPodFitsHost(t *testing.T) {
tests := []struct {
pod *api.Pod
node string
fits bool
test string
}{
{
pod: &api.Pod{},
node: "foo",
fits: true,
test: "no host specified",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeName: "foo",
},
},
node: "foo",
fits: true,
test: "host matches",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeName: "bar",
},
},
node: "foo",
fits: false,
test: "host doesn't match",
},
}
for _, test := range tests {
result, err := PodFitsHost(test.pod, []*api.Pod{}, test.node)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != test.fits {
t.Errorf("unexpected difference for %s: got: %v expected %v", test.test, test.fits, result)
}
}
}
func newPod(host string, hostPorts ...int) *api.Pod {
networkPorts := []api.ContainerPort{}
for _, port := range hostPorts {
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
}
return &api.Pod{
Spec: api.PodSpec{
NodeName: host,
Containers: []api.Container{
{
Ports: networkPorts,
},
},
},
}
}
func TestPodFitsHostPorts(t *testing.T) {
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
fits bool
test string
}{
{
pod: &api.Pod{},
existingPods: []*api.Pod{},
fits: true,
test: "nothing running",
},
{
pod: newPod("m1", 8080),
existingPods: []*api.Pod{
newPod("m1", 9090),
},
fits: true,
test: "other port",
},
{
pod: newPod("m1", 8080),
existingPods: []*api.Pod{
newPod("m1", 8080),
},
fits: false,
test: "same port",
},
{
pod: newPod("m1", 8000, 8080),
existingPods: []*api.Pod{
newPod("m1", 8080),
},
fits: false,
test: "second port",
},
{
pod: newPod("m1", 8000, 8080),
existingPods: []*api.Pod{
newPod("m1", 8001, 8080),
},
fits: false,
test: "second port",
},
}
for _, test := range tests {
fits, err := PodFitsHostPorts(test.pod, test.existingPods, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if test.fits != fits {
t.Errorf("%s: expected %v, saw %v", test.test, test.fits, fits)
}
}
}
func TestGetUsedPorts(t *testing.T) {
tests := []struct {
pods []*api.Pod
ports map[int]bool
}{
{
[]*api.Pod{
newPod("m1", 9090),
},
map[int]bool{9090: true},
},
{
[]*api.Pod{
newPod("m1", 9090),
newPod("m1", 9091),
},
map[int]bool{9090: true, 9091: true},
},
{
[]*api.Pod{
newPod("m1", 9090),
newPod("m2", 9091),
},
map[int]bool{9090: true, 9091: true},
},
}
for _, test := range tests {
ports := getUsedPorts(test.pods...)
if !reflect.DeepEqual(test.ports, ports) {
t.Errorf("expect %v, got %v", test.ports, ports)
}
}
}
func TestDiskConflicts(t *testing.T) {
volState := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "foo",
},
},
},
},
}
volState2 := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "bar",
},
},
},
},
}
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
isOk bool
test string
}{
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, test.existingPods, "machine")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %v %s", test.pod, test.existingPods, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %v %s", test.pod, test.existingPods, test.test)
}
}
}
func TestAWSDiskConflicts(t *testing.T) {
volState := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: "foo",
},
},
},
},
}
volState2 := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: "bar",
},
},
},
},
}
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
isOk bool
test string
}{
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, test.existingPods, "machine")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %v %s", test.pod, test.existingPods, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %v %s", test.pod, test.existingPods, test.test)
}
}
}
func TestRBDDiskConflicts(t *testing.T) {
volState := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
volState2 := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"c", "d"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
isOk bool
test string
}{
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, test.existingPods, "machine")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %v %s", test.pod, test.existingPods, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %v %s", test.pod, test.existingPods, test.test)
}
}
}
func TestPodFitsSelector(t *testing.T) {
tests := []struct {
pod *api.Pod
labels map[string]string
fits bool
test string
}{
{
pod: &api.Pod{},
fits: true,
test: "no selector",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
},
},
},
fits: false,
test: "missing labels",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
},
},
},
labels: map[string]string{
"foo": "bar",
},
fits: true,
test: "same labels",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
},
},
},
labels: map[string]string{
"foo": "bar",
"baz": "blah",
},
fits: true,
test: "node labels are superset",
},
{
pod: &api.Pod{
Spec: api.PodSpec{
NodeSelector: map[string]string{
"foo": "bar",
"baz": "blah",
},
},
},
labels: map[string]string{
"foo": "bar",
},
fits: false,
test: "node labels are subset",
},
}
for _, test := range tests {
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
fit := NodeSelector{FakeNodeInfo(node)}
fits, err := fit.PodSelectorMatches(test.pod, []*api.Pod{}, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
}
}
}
func TestNodeLabelPresence(t *testing.T) {
label := map[string]string{"foo": "bar", "bar": "foo"}
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
labels []string
presence bool
fits bool
test string
}{
{
labels: []string{"baz"},
presence: true,
fits: false,
test: "label does not match, presence true",
},
{
labels: []string{"baz"},
presence: false,
fits: true,
test: "label does not match, presence false",
},
{
labels: []string{"foo", "baz"},
presence: true,
fits: false,
test: "one label matches, presence true",
},
{
labels: []string{"foo", "baz"},
presence: false,
fits: false,
test: "one label matches, presence false",
},
{
labels: []string{"foo", "bar"},
presence: true,
fits: true,
test: "all labels match, presence true",
},
{
labels: []string{"foo", "bar"},
presence: false,
fits: false,
test: "all labels match, presence false",
},
}
for _, test := range tests {
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: label}}
labelChecker := NodeLabelChecker{FakeNodeInfo(node), test.labels, test.presence}
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, test.existingPods, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
}
}
}
func TestServiceAffinity(t *testing.T) {
selector := map[string]string{"foo": "bar"}
labels1 := map[string]string{
"region": "r1",
"zone": "z11",
}
labels2 := map[string]string{
"region": "r1",
"zone": "z12",
}
labels3 := map[string]string{
"region": "r2",
"zone": "z21",
}
labels4 := map[string]string{
"region": "r2",
"zone": "z22",
}
node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}}
node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labels2}}
node3 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labels3}}
node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}}
node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}}
tests := []struct {
pod *api.Pod
pods []*api.Pod
services []api.Service
node string
labels []string
fits bool
test string
}{
{
pod: new(api.Pod),
node: "machine1",
fits: true,
labels: []string{"region"},
test: "nothing scheduled",
},
{
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
node: "machine1",
fits: true,
labels: []string{"region"},
test: "pod with region label match",
},
{
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
node: "machine1",
fits: false,
labels: []string{"region"},
test: "pod with region label mismatch",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region"},
test: "service pod on same node",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region"},
test: "service pod on different node, region match",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false,
labels: []string{"region"},
test: "service pod on different node, region mismatch",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}},
fits: true,
labels: []string{"region"},
test: "service in different namespace, region mismatch",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
fits: true,
labels: []string{"region"},
test: "pod in different namespace, region mismatch",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
fits: false,
labels: []string{"region"},
test: "service and pod in same namespace, region mismatch",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false,
labels: []string{"region", "zone"},
test: "service pod on different node, multiple labels, not all match",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine4",
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region", "zone"},
test: "service pod on different node, multiple labels, all match",
},
}
for _, test := range tests {
nodes := []api.Node{node1, node2, node3, node4, node5}
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
}
}
}<|fim▁end|>
| |
<|file_name|>csr.go<|end_file_name|><|fim▁begin|>package openssl
import (
"fmt"
"io/ioutil"
"os/exec"
"regexp"
log "github.com/cihub/seelog"
)
type CSR struct {
//path string
//key string
content []byte
contentKey []byte
}
func (o *Openssl) LoadCSR(filename, keyfile string) (*CSR, error) {
var err error
o.Init()
filename = o.Path + "/" + filename
keyfile = o.Path + "/" + keyfile
c := &CSR{}
c.content, err = ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
c.contentKey, err = ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
return c, nil
}
func (o *Openssl) CreateCSR(cn string, server bool) (*CSR, error) {
var err error
o.Init()<|fim▁hole|>
log.Info("Create CSR")
c := &CSR{}
args := []string{
"req",
"-days", "3650",
"-nodes",
"-new",
"-keyout", "/dev/stdout",
"-out", "/dev/stdout",
"-config", o.GetConfigFile(),
"-batch",
"-utf8",
"-subj", "/C=" + o.Country + "/ST=" + o.Province + "/L=" + o.City + "/O=" + o.Organization + "/CN=" + cn + "/emailAddress=" + o.Email,
}
if server {
args = append(args, "-extensions", "server")
}
content, err := exec.Command("openssl", args...).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("openssl req: " + err.Error() + " (" + string(content) + ")")
}
reCert := regexp.MustCompile("(?ms)-----BEGIN CERTIFICATE REQUEST-----(.+)-----END CERTIFICATE REQUEST-----")
reKey := regexp.MustCompile("(?ms)-----BEGIN PRIVATE KEY-----(.+)-----END PRIVATE KEY-----")
c.content = reCert.Find(content)
c.contentKey = reKey.Find(content)
if len(c.content) == 0 {
err = fmt.Errorf("Generated csr is 0 long")
return nil, err
}
if len(c.contentKey) == 0 {
err = fmt.Errorf("Generated csr key is 0 long")
return nil, err
}
//if err = ioutil.WriteFile(c.key, c.contentKey, 0600); err != nil {
//return nil, err
//}
return c, nil
}
func (csr *CSR) Save(filename string) error {
if err := ioutil.WriteFile(filename, csr.content, 0600); err != nil {
return err
}
return nil
}
func (csr *CSR) SaveKey(filename string) error {
if err := ioutil.WriteFile(filename, csr.contentKey, 0600); err != nil {
return err
}
return nil
}
func (csr *CSR) String() string {
if csr != nil {
return string(csr.content)
}
return ""
}
func (csr *CSR) KeyString() string {
if csr != nil {
return string(csr.contentKey)
}
return ""
}<|fim▁end|>
| |
<|file_name|>classpath.rs<|end_file_name|><|fim▁begin|>// rustyVM - Java VM written in pure Rust
// Copyright (c) 2013 Alexander Gessler
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
extern mod std;
extern mod extra;
use extra::arc::{Arc};
use std::io::{File,result, IoError};
use std::path::{PosixPath};
pub struct ClassPath {
priv elems : Arc<~[~str]>,
}
impl ClassPath {
// ----------------------------------------------
/** Convert from semicolon-separated list of paths to a ClassPath instance */
pub fn new_from_string(invar : &str) -> ClassPath
{
// current folder is always included
let mut v = ~[~"."];
// TODO: how to construct a vector directly from an iter?
for s in invar.split_str(";")
.map(|s : &str| { s.trim().to_owned() })
.filter(|s : &~str| {s.len() > 0}){
v.push(s);
}
ClassPath {
elems : Arc::new(v)
}
}
<|fim▁hole|> {
return self.elems.get();
}
// ----------------------------------------------
/** Locate a given class (given by fully qualified name) and return
* the bytes of its classfile. */
pub fn locate_and_read(&self, name : &str) -> Option<~[u8]>
{
let cname = name.to_owned();
let pname = cname.replace(&".", "/") + ".class";
for path in self.elems.get().iter() {
match result(|| {
let p = *path + "/" + pname;
debug!("locate class {}, trying path {}", cname, p);
File::open(&PosixPath::new(p)).read_to_end()
}) {
Err(e) => continue,
Ok(bytes) => {
debug!("found .class file");
return Some(bytes)
}
};
}
return None
}
}
impl Clone for ClassPath {
fn clone(&self) -> ClassPath {
ClassPath {
elems : self.elems.clone()
}
}
}
#[cfg(test)]
mod tests {
use classpath::*;
#[test]
fn test_class_path_decomposition() {
let cp = ClassPath::new_from_string("~/some/other/bar; /bar/baz;dir ;");
assert_eq!(*cp.get_paths(),~[~".",~"~/some/other/bar", ~"/bar/baz", ~"dir"]);
assert_eq!(*cp.get_paths(),~[~".",~"~/some/other/bar", ~"/bar/baz", ~"dir"]);
}
}<|fim▁end|>
|
// ----------------------------------------------
pub fn get_paths<'a>(&'a self) -> &'a ~[~str]
|
<|file_name|>cms_plugins.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import get_language, to_locale
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .conf import settings
from .models import FacebookComments
<|fim▁hole|> module = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE
name = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_NAME
model = FacebookComments
render_template = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE
def render(self, context, instance, placeholder):
context = super(FacebookCommentsPlugin, self).render(context, instance, placeholder)
request = context.get('request')
context['language_code'] = to_locale(get_language())
context['page_url'] = request.build_absolute_uri(location=request.path_info)
return context
class Media:
css = {
'all': ('css/djangocms_fbcomments/admin/djangocms_fbcomments.css',)
}
js = ('js/djangocms_fbcomments/admin/djangocms_fbcomments.js',)
plugin_pool.register_plugin(FacebookCommentsPlugin)<|fim▁end|>
|
class FacebookCommentsPlugin(CMSPluginBase):
|
<|file_name|>course.py<|end_file_name|><|fim▁begin|>import json
import os
import re
import shutil
import xmltodict
import zipfile
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext_lazy as _
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication, Authentication
from tastypie.authorization import ReadOnlyAuthorization, Authorization
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from api.serializers import CourseJSONSerializer
from oppia.models import Tracker, Course, CourseCategory
from oppia.signals import course_downloaded
STR_COURSE_NOT_FOUND = _(u"Course not found")
def get_course_from_shortname(resource, bundle, lookup):
object_list = resource.apply_filters(bundle.request,
{'shortname': lookup})
if len(object_list) <= 0:
raise resource._meta.object_class.DoesNotExist(
"Couldn't find an course with shortname '%s'." % (lookup))
elif len(object_list) > 1:
raise MultipleObjectsReturned(
"More than one course with shortname '%s'." % (lookup))
return object_list
class CourseResource(ModelResource):<|fim▁hole|> queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
fields = ['id',
'title',
'version',
'shortname',
'priority',
'is_draft',
'description',
'author',
'username',
'organisation']
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
else:
return super().obj_get(bundle, **kwargs)
def get_object_list(self, request):
if request.user.is_staff:
return Course.objects.filter(is_archived=False) \
.order_by('-priority', 'title')
else:
return Course.objects.filter(is_archived=False) \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user))) \
.order_by('-priority', 'title')
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_course'), name="api_download_course"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/activity%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_activity'),
name="api_download_activity"),
]
def get_course(self, request, **kwargs):
self.is_authenticated(request)
self.throttle_check(request)
pk = kwargs.pop('pk', None)
try:
if request.user.is_staff:
course = self._meta.queryset.get(pk=pk, is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(pk=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
except ValueError:
try:
if request.user.is_staff:
course = self._meta.queryset.get(shortname=pk,
is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(shortname=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
return course
def download_course(self, request, **kwargs):
course = self.get_course(request, **kwargs)
file_to_download = course.getAbsPath()
has_completed_trackers = Tracker.has_completed_trackers(course,
request.user)
try:
if has_completed_trackers:
file_to_download = os.path.join(
settings.COURSE_UPLOAD_DIR,
"temp",
str(request.user.id) + "-" + course.filename)
shutil.copy2(course.getAbsPath(), file_to_download)
course_zip = zipfile.ZipFile(file_to_download, 'a')
if has_completed_trackers:
course_zip.writestr(course.shortname + "/tracker.xml",
Tracker.to_xml_string(course,
request.user))
course_zip.close()
binary_file = open(file_to_download, 'rb')
response = HttpResponse(binary_file.read(),
content_type='application/zip')
binary_file.close()
response['Content-Length'] = os.path.getsize(file_to_download)
response['Content-Disposition'] = \
'attachment; filename="%s"' % (course.filename)
except IOError:
raise Http404(STR_COURSE_NOT_FOUND)
course_downloaded.send(sender=self, course=course, request=request)
return response
def download_activity(self, request, **kwargs):
course = self.get_course(request, **kwargs)
return HttpResponse(Tracker.to_xml_string(course,
request.user),
content_type='text/xml')
def dehydrate(self, bundle):
bundle.data['url'] = bundle.request.build_absolute_uri(
bundle.data['resource_uri'] + 'download/')
# make sure title is shown as json object (not string representation \
# of one)
bundle.data['title'] = json.loads(bundle.data['title'])
try:
bundle.data['description'] = json.loads(bundle.data['description'])
except json.JSONDecodeError:
pass
course = Course.objects.get(pk=bundle.obj.pk)
if course and course.user:
bundle.data['author'] = course.user.first_name \
+ " " \
+ course.user.last_name
bundle.data['username'] = course.user.username
bundle.data['organisation'] = course.user.userprofile.organisation
return bundle
class CourseCategoryResource(ModelResource):
course = fields.ToOneField('api.resource.course.CourseResource',
'course',
full=True)
class Meta:
queryset = CourseCategory.objects.all()
allowed_methods = ['get']
resource_name = 'coursetag'
fields = ['id', 'course', 'category']
include_resource_uri = False
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
always_return_data = True
class CourseStructureResource(ModelResource):
class Meta:
queryset = Course.objects.filter(is_draft=False, is_archived=False)
resource_name = 'coursestructure'
allowed_methods = ['get']
fields = ['shortname',
'id',
'structure']
authentication = Authentication()
authorization = Authorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
return_obj = object_list[0]
else:
return_obj = super().obj_get(bundle, **kwargs)
# check the module.xml is on disk
path = os.path.join(settings.MEDIA_ROOT,
'courses',
return_obj.shortname,
'module.xml')
if not os.path.isfile(path):
raise self._meta.object_class.DoesNotExist()
return return_obj
def dehydrate(self, bundle):
path = os.path.join(settings.MEDIA_ROOT,
'courses',
bundle.obj.shortname,
'module.xml')
with open(path) as fd:
doc = xmltodict.parse(fd.read())
bundle.data['structure'] = json.dumps(doc)
return bundle<|fim▁end|>
|
class Meta:
|
<|file_name|>users.js<|end_file_name|><|fim▁begin|>// Constants
export const USERS_INCREMENT = 'USERS_INCREMENT'
export const USERS_DOUBLE_ASYNC = 'USERS_DOUBLE_ASYNC'<|fim▁hole|>// Actions
export function increment(value = 1) {
return {
type: USERS_INCREMENT,
payload: value
}
}
export const doubleAsync = () => {
return (dispatch, getState) => {
return new Promise((resolve) => {
setTimeout(() => {
dispatch({
type: USERS_DOUBLE_ASYNC,
payload: getState().users
})
resolve()
}, 200)
})
}
}
export const actions = {
increment,
doubleAsync
}
// Action Handlers
const ACTION_HANDLERS = {
[USERS_INCREMENT]: (state, action) => state + action.payload,
[USERS_DOUBLE_ASYNC]: (state) => state * 2
}
// Reducer
const initialState = 0
export default function usersReducer(state = initialState, action) {
const handler = ACTION_HANDLERS[action.type]
return handler ? handler(state, action) : state
}<|fim▁end|>
| |
<|file_name|>camera.rs<|end_file_name|><|fim▁begin|>use glfw;
use std::uint;
use std::libc::c_int;
use std::hashmap::HashMap;
use math::{Vec3, Mat4};
use gl::Mesh;
use gl::shader::{Shader, AttribLocation, UniformLocation};
use glfw::Window;
pub struct Camera {
window: ~Window,
translation: Vec3<f32>,
rotation: Vec3<f32>,
eye: Vec3<f32>,
center: Vec3<f32>,
up: Vec3<f32>,
fovy: f32,
aspect: f32,
z_near: f32,
z_far: f32,
program: ~Shader,<|fim▁hole|> uniforms: ~HashMap<~str, UniformLocation>,
meshes: ~[Mesh],
}
impl Camera {
pub fn new(window: ~Window, shader_name: &str) -> Camera {
let (width, height) = window.get_size();
let program = Shader::from_files(fmt!("%s.v.glsl", shader_name), fmt!("%s.f.glsl", shader_name));
let zero_vec = Vec3::new(0.0f32, 0.0, 0.0);
let mut attribs = HashMap::new();
attribs.insert(~"v_coord", program.get_attrib_location("v_coord"));
attribs.insert(~"v_normal", program.get_attrib_location("v_normal"));
attribs.insert(~"v_color", program.get_attrib_location("v_color"));
let mut uniforms = HashMap::new();
uniforms.insert(~"m_orig", program.get_uniform_location("m_orig"));
uniforms.insert(~"m", program.get_uniform_location("m"));
uniforms.insert(~"v", program.get_uniform_location("v"));
uniforms.insert(~"p", program.get_uniform_location("p"));
uniforms.insert(~"m_inv_transp", program.get_uniform_location("m_inv_transp"));
Camera {
window: window,
translation: zero_vec.clone(),
rotation: zero_vec.clone(),
eye: zero_vec.clone(),
center: zero_vec.clone(),
up: zero_vec.clone(),
fovy: 0.0,
aspect: (width as f32) / (height as f32),
z_near: 0.0,
z_far: 0.0,
program: ~program,
attribs: ~attribs,
uniforms: ~uniforms,
meshes: ~[],
}
}
pub fn translate(&mut self, translation: Vec3<f32>) {
self.translation = self.translation + translation;
}
pub fn rotate(&mut self, x: f32, y: f32, z: f32) {
self.rotation = self.rotation + Vec3::new(x, y, z);
}
pub fn calc_model(&self) -> Mat4<f32> {
let mut mat = Mat4::ident().translate(self.translation);
mat = mat.rotate(self.rotation.x, Vec3::new(1.0, 0.0, 0.0));
mat = mat.rotate(self.rotation.y, Vec3::new(0.0, 1.0, 0.0));
mat = mat.rotate(self.rotation.z, Vec3::new(0.0, 0.0, 1.0));
mat
}
pub fn look_at(&mut self, eye: Vec3<f32>, center: Vec3<f32>, up: Vec3<f32>) {
self.eye = eye;
self.center = center;
self.up = up;
}
pub fn calc_view(&self) -> Mat4<f32> {
let f = (self.center - self.eye).normalize();
let s = f.cross(&self.up.normalize()).normalize();
let u = s.cross(&f);
let mut result = Mat4::from_elem(1.0f32);
result.data[0][0] = s.x.clone();
result.data[1][0] = s.y.clone();
result.data[2][0] = s.z.clone();
result.data[0][1] = u.x.clone();
result.data[1][1] = u.y.clone();
result.data[2][1] = u.z.clone();
result.data[0][2] = -f.x.clone();
result.data[1][2] = -f.y.clone();
result.data[2][2] = -f.z.clone();
result.data[3][0] = -s.dot(&self.eye).clone();
result.data[3][1] = -u.dot(&self.eye).clone();
result.data[3][2] = f.dot(&self.eye).clone();
result
}
pub fn perspective(&mut self, fovy: f32, z_near: f32, z_far: f32) {
self.fovy = fovy;
self.z_near = z_near;
self.z_far = z_far;
}
pub fn calc_projection(&self) -> Mat4<f32> {
let tan_half_fovy = (self.fovy / 2.0).tan();
let mut result = Mat4::from_elem(0.0f32);
result.data[0][0] = 1.0 / (self.aspect * tan_half_fovy);
result.data[1][1] = 1.0 / tan_half_fovy;
result.data[2][2] = - (self.z_far + self.z_near) / (self.z_far - self.z_near);
result.data[2][3] = -1.0;
result.data[3][2] = - (2.0 * self.z_far * self.z_near) / (self.z_far - self.z_near);
result
}
pub fn add_mesh(&mut self, mesh: Mesh) {
self.meshes.push(mesh);
}
pub fn draw(&mut self) {
let model = self.calc_model();
let view = self.calc_view();
let projection = self.calc_projection();
self.uniforms.find_equiv(&("v")).get().update_mat4_f32(view);
self.uniforms.find_equiv(&("p")).get().update_mat4_f32(projection);
for uint::range(0, self.meshes.len()) |i| {
if !self.meshes[i].uploaded() { self.meshes[i].upload(); }
self.meshes[i].draw(model, self.attribs, self.uniforms);
}
self.window.swap_buffers();
}
pub fn is_key_down(&self, key: c_int) -> bool {
match self.window.get_key(key) {
glfw::PRESS => true,
_ => false,
}
}
pub fn resize(&mut self, _size: (int, int)) {
//let (width, height) = size;
}
pub fn should_close(&self) -> bool {
self.window.should_close()
}
}<|fim▁end|>
|
attribs: ~HashMap<~str, AttribLocation>,
|
<|file_name|>statistics.cpp<|end_file_name|><|fim▁begin|>/*
* (c)2016-2017, Cris Luengo.
* Based on original DIPlib code: (c)1995-2014, Delft University of Technology.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "diplib.h"
#include "diplib/statistics.h"
#include "diplib/math.h"
#include "diplib/framework.h"
#include "diplib/overload.h"
namespace dip {
namespace {
class CountLineFilter : public Framework::ScanLineFilter {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 2; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
bin const* in = static_cast< bin const* >( params.inBuffer[ 0 ].buffer );
dip::uint count = 0;
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask && *in ) {
++count;
}
in += inStride;
mask += maskStride;
}
} else {
// Otherwise we don't.
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *in ) {
++count;
}
in += inStride;
}
}
counts_[ params.thread ] += count;
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
counts_.resize( threads );
}
dip::uint GetResult() {
dip::uint out = counts_[ 0 ];
for( dip::uint ii = 1; ii < counts_.size(); ++ii ) {
out += counts_[ ii ];
}
return out;
}
private:
std::vector< dip::uint > counts_;
};
} // namespace
dip::uint Count(
Image const& in,
Image const& mask
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( !in.IsScalar(), E::IMAGE_NOT_SCALAR );
CountLineFilter scanLineFilter;
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, DT_BIN, scanLineFilter ));
return scanLineFilter.GetResult();
}
namespace {
class MaxMinPixelLineFilter : public Framework::ScanLineFilter {
public:
virtual UnsignedArray GetResult() = 0;
};
template< typename TPI >
class MaxPixelLineFilter : public MaxMinPixelLineFilter {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 2; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
UnsignedArray coord( params.position.size() );
TPI value = std::numeric_limits< TPI >::lowest();
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
if( first_ ) {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask && ( *in > value )) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
mask += maskStride;
}
} else {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask && ( *in >= value )) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
mask += maskStride;
}
}
} else {
// Otherwise we don't.
if( first_ ) {<|fim▁hole|> if( *in > value ) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
}
} else {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *in >= value ) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
}
}
}
if( coord_[ params.thread ].empty() ) {
// Ensure we always have something in `coord_`, even if the whole image is NaN.
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
} else {
if( first_ ) {
if( value > value_[ params.thread ] ) {
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
}
} else {
if( value >= value_[ params.thread ] ) {
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
}
}
}
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
coord_.resize( threads );
value_.resize( threads, std::numeric_limits< TPI >::lowest() );
}
MaxPixelLineFilter( bool first ) : first_( first ) {}
virtual UnsignedArray GetResult() override {
dip::uint index = 0;
for( dip::uint ii = 1; ii < coord_.size(); ++ii ) {
if( first_ ? value_[ ii ] > value_[ index ] : value_[ ii ] >= value_[ index ] ) {
index = ii;
}
}
return coord_[ index ];
}
private:
std::vector< UnsignedArray > coord_;
std::vector< TPI > value_;
bool first_;
};
template< typename TPI >
class MinPixelLineFilter : public MaxMinPixelLineFilter {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 2; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
UnsignedArray coord( params.position.size() );
TPI value = std::numeric_limits< TPI >::max();
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
if( first_ ) {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask && ( *in < value )) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
mask += maskStride;
}
} else {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask && ( *in <= value )) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
mask += maskStride;
}
}
} else {
// Otherwise we don't.
if( first_ ) {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *in < value ) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
}
} else {
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *in <= value ) {
value = *in;
coord = params.position;
coord[ params.dimension ] += ii;
}
in += inStride;
}
}
}
if( coord_[ params.thread ].empty() ) {
// Ensure we always have something in `coord_`, even if the whole image is NaN.
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
} else {
if( first_ ) {
if( value < value_[ params.thread ] ) {
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
}
} else {
if( value <= value_[ params.thread ] ) {
value_[ params.thread ] = value;
coord_[ params.thread ] = coord;
}
}
}
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
coord_.resize( threads );
value_.resize( threads, std::numeric_limits< TPI >::max() );
}
MinPixelLineFilter( bool first ) : first_( first ) {}
virtual UnsignedArray GetResult() override {
dip::uint index = 0;
for( dip::uint ii = 1; ii < coord_.size(); ++ii ) {
if( first_ ? value_[ ii ] < value_[ index ] : value_[ ii ] <= value_[ index ] ) {
index = ii;
}
}
return coord_[ index ];
}
private:
std::vector< UnsignedArray > coord_;
std::vector< TPI > value_;
bool first_;
};
} // namespace
UnsignedArray MaximumPixel( Image const& in, Image const& mask, String const& positionFlag ) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( !in.IsScalar(), E::IMAGE_NOT_SCALAR );
bool first;
DIP_STACK_TRACE_THIS( first = BooleanFromString( positionFlag, S::FIRST, S::LAST ));
DataType dataType = DataType::SuggestReal( in.DataType() );
std::unique_ptr< MaxMinPixelLineFilter > scanLineFilter;
DIP_OVL_NEW_REAL( scanLineFilter, MaxPixelLineFilter, ( first ), dataType );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, dataType, *scanLineFilter,
Framework::ScanOption::NeedCoordinates ));
return scanLineFilter->GetResult();
}
UnsignedArray MinimumPixel( Image const& in, Image const& mask, String const& positionFlag ) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( !in.IsScalar(), E::IMAGE_NOT_SCALAR );
bool first;
DIP_STACK_TRACE_THIS( first = BooleanFromString( positionFlag, S::FIRST, S::LAST ));
DataType dataType = DataType::SuggestReal( in.DataType() );
std::unique_ptr< MaxMinPixelLineFilter > scanLineFilter;
DIP_OVL_NEW_REAL( scanLineFilter, MinPixelLineFilter, ( first ), dataType );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, dataType, *scanLineFilter,
Framework::ScanOption::NeedCoordinates ));
return scanLineFilter->GetResult();
}
namespace {
template< typename TPI >
class CumSumFilter : public Framework::SeparableLineFilter {
public:
virtual dip::uint GetNumberOfOperations( dip::uint lineLength, dip::uint, dip::uint, dip::uint ) override {
return lineLength;
}
virtual void Filter( Framework::SeparableLineFilterParameters const& params ) override {
TPI* in = static_cast< TPI* >( params.inBuffer.buffer );
dip::uint length = params.inBuffer.length;
dip::sint inStride = params.inBuffer.stride;
TPI* out = static_cast< TPI* >( params.outBuffer.buffer );
dip::sint outStride = params.outBuffer.stride;
TPI sum = 0;
for( dip::uint ii = 0; ii < length; ++ii ) {
sum += *in;
*out = sum;
in += inStride;
out += outStride;
}
}
};
} // namespace
void CumulativeSum(
Image const& in,
Image const& mask,
Image& out,
BooleanArray const& process
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( in.Dimensionality() < 1, E::DIMENSIONALITY_NOT_SUPPORTED );
DataType dataType = DataType::SuggestFlex( in.DataType() );
std::unique_ptr< Framework::SeparableLineFilter > lineFilter;
DIP_OVL_NEW_FLEX( lineFilter, CumSumFilter, (), dataType );
if( mask.IsForged() ) {
Select( in, Image( 0, dataType ), mask, out );
DIP_STACK_TRACE_THIS( Framework::Separable( out, out, dataType, dataType, process, { 0 }, {}, *lineFilter,
Framework::SeparableOption::AsScalarImage ));
} else {
DIP_STACK_TRACE_THIS( Framework::Separable( in, out, dataType, dataType, process, { 0 }, {}, *lineFilter,
Framework::SeparableOption::AsScalarImage ));
}
}
namespace {
class MaximumAndMinimumLineFilterBase : public Framework::ScanLineFilter {
public:
virtual MinMaxAccumulator GetResult() = 0;
};
template< typename TPI >
class MaximumAndMinimumLineFilter : public MaximumAndMinimumLineFilterBase {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 3; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
MinMaxAccumulator vars;
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask ) {
vars.Push( static_cast< dfloat >( *in ));
}
in += inStride;
mask += maskStride;
}
} else {
// Otherwise we don't.
dip::uint ii = 0;
for( ; ii < bufferLength - 1; ii += 2 ) {
TPI v = *in;
in += inStride;
vars.Push( static_cast< dfloat >( v ), static_cast< dfloat >( *in ));
in += inStride;
}
if( ii < bufferLength ) {
vars.Push( static_cast< dfloat >( *in ));
}
}
accArray_[ params.thread ] += vars;
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
accArray_.resize( threads );
}
virtual MinMaxAccumulator GetResult() override {
MinMaxAccumulator out = accArray_[ 0 ];
for( dip::uint ii = 1; ii < accArray_.size(); ++ii ) {
out += accArray_[ ii ];
}
return out;
}
private:
std::vector< MinMaxAccumulator > accArray_;
};
} // namespace
MinMaxAccumulator MaximumAndMinimum(
Image const& in,
Image const& mask
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
// In case of complex images, separate them as a new dimension.
Image c_in = in.QuickCopy();
if( c_in.DataType().IsComplex() ) {
c_in.SplitComplex();
// Note that mask will be singleton-expanded, which allows adding dimensions at the end.
}
std::unique_ptr< MaximumAndMinimumLineFilterBase > scanLineFilter;
DIP_OVL_NEW_NONCOMPLEX( scanLineFilter, MaximumAndMinimumLineFilter, (), c_in.DataType() );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( c_in, mask, c_in.DataType(), *scanLineFilter,
Framework::ScanOption::TensorAsSpatialDim ));
return scanLineFilter->GetResult();
}
namespace {
class SampleStatisticsLineFilterBase : public Framework::ScanLineFilter {
public:
virtual StatisticsAccumulator GetResult() = 0;
};
template< typename TPI >
class SampleStatisticsLineFilter : public SampleStatisticsLineFilterBase {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 23; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
StatisticsAccumulator vars;
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask ) {
vars.Push( static_cast< dfloat >( *in ));
}
in += inStride;
mask += maskStride;
}
} else {
// Otherwise we don't.
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
vars.Push( static_cast< dfloat >( *in ));
in += inStride;
}
}
accArray_[ params.thread ] += vars;
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
accArray_.resize( threads );
}
virtual StatisticsAccumulator GetResult() override {
StatisticsAccumulator out = accArray_[ 0 ];
for( dip::uint ii = 1; ii < accArray_.size(); ++ii ) {
out += accArray_[ ii ];
}
return out;
}
private:
std::vector< StatisticsAccumulator > accArray_;
};
} // namespace
StatisticsAccumulator SampleStatistics(
Image const& in,
Image const& mask
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
std::unique_ptr< SampleStatisticsLineFilterBase > scanLineFilter;
DIP_OVL_NEW_REAL( scanLineFilter, SampleStatisticsLineFilter, (), in.DataType() );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, in.DataType(), *scanLineFilter,
Framework::ScanOption::TensorAsSpatialDim ));
return scanLineFilter->GetResult();
}
namespace {
class CovarianceLineFilterBase : public Framework::ScanLineFilter {
public:
virtual CovarianceAccumulator GetResult() = 0;
};
template< typename TPI >
class CovarianceLineFilter : public CovarianceLineFilterBase {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return 10; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in1 = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
TPI const* in2 = static_cast< TPI const* >( params.inBuffer[ 1 ].buffer );
CovarianceAccumulator vars;
auto bufferLength = params.bufferLength;
auto in1Stride = params.inBuffer[ 0 ].stride;
auto in2Stride = params.inBuffer[ 1 ].stride;
if( params.inBuffer.size() > 2 ) {
// If there's three input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 2 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 2 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask ) {
vars.Push( static_cast< dfloat >( *in1 ), static_cast< dfloat >( *in2 ));
}
in1 += in1Stride;
in2 += in2Stride;
mask += maskStride;
}
} else {
// Otherwise we don't.
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
vars.Push( static_cast< dfloat >( *in1 ), static_cast< dfloat >( *in2 ));
in1 += in1Stride;
in2 += in2Stride;
}
}
accArray_[ params.thread ] += vars;
}
virtual void SetNumberOfThreads( dip::uint threads ) override {
accArray_.resize( threads );
}
virtual CovarianceAccumulator GetResult() override {
CovarianceAccumulator out = accArray_[ 0 ];
for( dip::uint ii = 1; ii < accArray_.size(); ++ii ) {
out += accArray_[ ii ];
}
return out;
}
private:
std::vector< CovarianceAccumulator > accArray_;
};
} // namespace
CovarianceAccumulator Covariance(
Image const& in1,
Image const& in2,
Image const& c_mask ) {
DIP_THROW_IF( !in1.IsForged() || !in2.IsForged(), E::IMAGE_NOT_FORGED );
DIP_STACK_TRACE_THIS( in1.CompareProperties( in2, Option::CmpProp::AllSizes ));
DataType ovlDataType = DataType::SuggestDyadicOperation( in1.DataType(), in2.DataType() );
ImageConstRefArray inar;
inar.reserve( 3 );
inar.push_back( in1 );
inar.push_back( in2 );
DataTypeArray inBufT{ ovlDataType, ovlDataType };
Image mask;
if( c_mask.IsForged() ) {
// If we have a mask, add it to the input array.
mask = c_mask.QuickCopy();
DIP_START_STACK_TRACE
mask.CheckIsMask( in1.Sizes(), Option::AllowSingletonExpansion::DO_ALLOW, Option::ThrowException::DO_THROW );
mask.ExpandSingletonDimensions( in1.Sizes() );
DIP_END_STACK_TRACE
inar.push_back( mask );
inBufT.push_back( mask.DataType() );
}
ImageRefArray outar{};
std::unique_ptr< CovarianceLineFilterBase > scanLineFilter;
DIP_OVL_NEW_REAL( scanLineFilter, CovarianceLineFilter, (), ovlDataType );
DIP_STACK_TRACE_THIS( Framework::Scan( inar, outar, inBufT, {}, {}, {}, *scanLineFilter,
Framework::ScanOption::TensorAsSpatialDim ));
return scanLineFilter->GetResult();
}
namespace {
template< typename TPI >
std::vector< dip::uint > ComputeRank( void const* ptr, std::vector< dip::uint >& indices ) {
// First sort the indices
// NOTE!!! The indices must be contiguous, starting at 0, and with max_element(indices) == indices.size()-1.
TPI const* data = static_cast< TPI const* >( ptr );
std::sort( indices.begin(), indices.end(), [ & ]( dip::uint const& a, dip::uint const& b ) {
return data[ a ] < data[ b ];
} );
// Next find the ranks
std::vector< dip::uint > rank( indices.size() );
for( dip::uint ii = 0; ii < indices.size(); ++ii ) {
// Identify the equal-valued pixels
dip::uint rr = ii + 1;
while(( rr < indices.size()) && ( data[ indices[ rr ]] == data[ indices[ ii ]] )) {
++rr;
}
// Assign the mean rank to all these pixels
dip::uint mean = ( rr + ii - 1 ) / 2;
for( dip::uint jj = ii; jj < rr; ++jj ) {
rank[ indices[ jj ]] = mean;
}
// Advance to next group of equal-valued pixels
ii = rr - 1;
}
return rank;
}
std::vector< dip::uint > CreateRankArray( Image const& img ) {
DIP_ASSERT( img.HasContiguousData() );
// Create indices array to each sample in the image
std::vector< dip::uint > indices( img.Sizes().product() * img.TensorElements() );
std::iota( indices.begin(), indices.end(), dip::uint( 0 ));
// Get the rank for each pixel
std::vector< dip::uint > rank;
DIP_OVL_CALL_ASSIGN_REAL( rank, ComputeRank, ( img.Origin(), indices ), img.DataType() );
return rank;
}
} // namespace
dfloat SpearmanRankCorrelation( Image const& in1, Image const& in2, Image const& mask ) {
DIP_THROW_IF( !in1.IsForged() || !in2.IsForged(), E::IMAGE_NOT_FORGED );
DIP_STACK_TRACE_THIS( in1.CompareProperties( in2, Option::CmpProp::AllSizes ));
// Get the data in normal stride order. We need the data to be contiguous and the two images to have
// the same strides. This is a simple way of accomplishing that.
Image in1_c;
Image in2_c;
if( mask.IsForged() ) {
DIP_START_STACK_TRACE
in1_c = in1.At( mask );
in2_c = in2.At( mask );
DIP_END_STACK_TRACE
} else {
in1_c = in1.QuickCopy();
in2_c = in2.QuickCopy();
}
in1_c.ForceNormalStrides(); // Might copy the data, but if we already copied it (through `mask`) it won't need to,
in2_c.ForceNormalStrides(); // so we're guaranteed to copy the image data at most once.
// Find the rank for each pixel
auto idx1 = CreateRankArray( in1_c );
auto idx2 = CreateRankArray( in2_c );
// Now compute correlation between the two sorted index arrays.
// We're not using the cheaper formula because we're not guaranteed a unique sort order (some pixels can have
// the same value).
CovarianceAccumulator vars;
for( auto it1 = idx1.begin(), it2 = idx2.begin(); it1 != idx1.end(); ++it1, ++it2 ) {
vars.Push( static_cast< dfloat >( *it1 ), static_cast< dfloat >( *it2 ));
}
return vars.Correlation();
}
namespace {
class CenterOfMassLineFilterBase : public Framework::ScanLineFilter {
public:
virtual FloatArray GetResult() = 0;
};
template< typename TPI >
class CenterOfMassLineFilter : public CenterOfMassLineFilterBase {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override { return nD_ + 1; }
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
FloatArray vars( nD_ + 1, 0.0 );
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
UnsignedArray pos = params.position;
dip::uint procDim = params.dimension;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask ) {
for( dip::uint jj = 0; jj < nD_; ++jj ) {
vars[ jj ] += static_cast< dfloat >( pos[ jj ] ) * static_cast< dfloat >( *in );
}
vars[ nD_ ] += static_cast< dfloat >( *in );
}
in += inStride;
mask += maskStride;
++( pos[ procDim ] );
}
} else {
// Otherwise we don't.
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
for( dip::uint jj = 0; jj < nD_; ++jj ) {
vars[ jj ] += static_cast< dfloat >( pos[ jj ] ) * static_cast< dfloat >( *in );
}
vars[ nD_ ] += static_cast< dfloat >( *in );
in += inStride;
++( pos[ procDim ] );
}
}
accArray_[ params.thread ] += vars;
}
CenterOfMassLineFilter( dip::uint nD ) : nD_( nD ) {}
virtual void SetNumberOfThreads( dip::uint threads ) override {
accArray_.resize( threads );
for( dip::uint ii = 0; ii < threads; ++ii ) {
accArray_[ ii ].resize( nD_ + 1, 0.0 );
}
}
virtual FloatArray GetResult() override {
FloatArray out = accArray_[ 0 ];
for( dip::uint ii = 1; ii < accArray_.size(); ++ii ) {
out += accArray_[ ii ];
}
dfloat n = out[ nD_ ];
out.resize( nD_ );
if( n != 0 ) {
out /= n;
} else {
out.fill( 0.0 );
}
return out;
}
private:
std::vector< FloatArray > accArray_; // one per thread, each one contains: sum(I*x),sum(I*y),...,sum(I)
dip::uint nD_;
};
} // namespace
FloatArray CenterOfMass(
Image const& in,
Image const& mask
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( !in.IsScalar(), E::IMAGE_NOT_SCALAR );
std::unique_ptr< CenterOfMassLineFilterBase > scanLineFilter;
DIP_OVL_NEW_NONCOMPLEX( scanLineFilter, CenterOfMassLineFilter, ( in.Dimensionality() ), in.DataType() );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, in.DataType(), *scanLineFilter,
Framework::ScanOption::NeedCoordinates ));
return scanLineFilter->GetResult();
}
namespace {
class MomentsLineFilterBase : public Framework::ScanLineFilter {
public:
virtual MomentAccumulator GetResult() = 0;
};
template< typename TPI >
class MomentsLineFilter : public MomentsLineFilterBase {
public:
virtual dip::uint GetNumberOfOperations( dip::uint, dip::uint, dip::uint ) override {
return nD_ * ( nD_ + 1 ) / 2 * 3 + nD_ + 2;
}
virtual void Filter( Framework::ScanLineFilterParameters const& params ) override {
TPI const* in = static_cast< TPI const* >( params.inBuffer[ 0 ].buffer );
MomentAccumulator vars( nD_ );
auto bufferLength = params.bufferLength;
auto inStride = params.inBuffer[ 0 ].stride;
FloatArray pos{ params.position };
dip::uint procDim = params.dimension;
if( params.inBuffer.size() > 1 ) {
// If there's two input buffers, we have a mask image.
auto maskStride = params.inBuffer[ 1 ].stride;
bin const* mask = static_cast< bin const* >( params.inBuffer[ 1 ].buffer );
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
if( *mask ) {
vars.Push( pos, static_cast< dfloat >( *in ));
}
in += inStride;
mask += maskStride;
++( pos[ procDim ] );
}
} else {
// Otherwise we don't.
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
vars.Push( pos, static_cast< dfloat >( *in ));
in += inStride;
++( pos[ procDim ] );
}
}
accArray_[ params.thread ] += vars;
}
MomentsLineFilter( dip::uint nD ) : nD_( nD ) {}
virtual void SetNumberOfThreads( dip::uint threads ) override {
accArray_.resize( threads, MomentAccumulator( nD_ ));
}
virtual MomentAccumulator GetResult() override {
MomentAccumulator out = accArray_[ 0 ];
for( dip::uint ii = 1; ii < accArray_.size(); ++ii ) {
out += accArray_[ ii ];
}
return out;
}
private:
std::vector< MomentAccumulator > accArray_;
dip::uint nD_;
};
} // namespace
MomentAccumulator Moments(
Image const& in,
Image const& mask
) {
DIP_THROW_IF( !in.IsForged(), E::IMAGE_NOT_FORGED );
DIP_THROW_IF( !in.IsScalar(), E::IMAGE_NOT_SCALAR );
std::unique_ptr< MomentsLineFilterBase > scanLineFilter;
DIP_OVL_NEW_NONCOMPLEX( scanLineFilter, MomentsLineFilter, ( in.Dimensionality() ), in.DataType() );
DIP_STACK_TRACE_THIS( Framework::ScanSingleInput( in, mask, in.DataType(), *scanLineFilter,
Framework::ScanOption::NeedCoordinates ));
return scanLineFilter->GetResult();
}
} // namespace dip<|fim▁end|>
|
for( dip::uint ii = 0; ii < bufferLength; ++ii ) {
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>"""Automabot bot for Discord."""
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='automabot',
version='0.0.1.dev20170604', # see PEP-0440
python_requires='>=3.6',
author='Maël Pedretti & Chea Dany',
author_email='[email protected] & [email protected]',
url='https://github.com/73VW/AutomaBot',
license='https://opensource.org/licenses/BSD-3-Clause',
description=__doc__,
long_description=long_description,
packages=find_packages(exclude=('contrib', 'docs', 'tests')),
keywords='discord asyncio bot',
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Education',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Home Automation'
),
install_requires=(
'discord.py>=0.16.8',
'aiohttp>=1.0.0,<1.1.0',
'pyfiglet>=0.7.5',
'toml>=0.9.2'
),
extras_require={
'fast': ('cchardet', 'aiodns'), # making it faster (recommended)
'qa': ('flake8', 'isort', 'pycodestyle', 'pydocstyle', 'rstcheck'),<|fim▁hole|><|fim▁end|>
|
'docs': ('Sphinx>=1.6.0', 'sphinxcontrib-trio')
},
)
|
<|file_name|>ClusterBean.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.stratos.common.beans.topology;
import org.apache.stratos.common.beans.PropertyBean;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.List;
@XmlRootElement(name = "clusters")
public class ClusterBean {
private String alias;
private String serviceName;
private String clusterId;
private List<MemberBean> member;
private String tenantRange;
private List<String> hostNames;
private boolean isLbCluster;
private List<PropertyBean> property;
private List<InstanceBean> instances;
public List<InstanceBean> getInstances() {
return instances;
}
public void setInstances(List<InstanceBean> instances) {
this.instances = instances;
}
@Override
public String toString() {
return "Cluster [serviceName=" + getServiceName() + ", clusterId=" + getClusterId() + ", member=" + getMember()
+ ", tenantRange=" + getTenantRange() + ", hostNames=" + getHostNames() + ", isLbCluster=" + isLbCluster()
+ ", property=" + getProperty() + "]";
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getClusterId() {
return clusterId;
}
public void setClusterId(String clusterId) {
this.clusterId = clusterId;
}
public List<MemberBean> getMember() {
return member;
}
public void setMember(List<MemberBean> member) {
this.member = member;
}
public String getTenantRange() {
return tenantRange;
}
public void setTenantRange(String tenantRange) {
this.tenantRange = tenantRange;
}
public List<String> getHostNames() {
return hostNames;
}
public void setHostNames(List<String> hostNames) {
this.hostNames = hostNames;
}
public boolean isLbCluster() {
return isLbCluster;
}
<|fim▁hole|> public void setLbCluster(boolean isLbCluster) {
this.isLbCluster = isLbCluster;
}
public List<PropertyBean> getProperty() {
return property;
}
public void setProperty(List<PropertyBean> property) {
this.property = property;
}
}<|fim▁end|>
| |
<|file_name|>base_handler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2017-2019 - Edoardo Morassutto <[email protected]>
# Copyright 2017 - Luca Versari <[email protected]>
# Copyright 2018 - William Di Luigi <[email protected]>
import json
from datetime import datetime
from werkzeug.exceptions import HTTPException, BadRequest
from werkzeug.wrappers import Response
from ..handler_params import HandlerParams
from ..config import Config
from ..database import Database
from ..logger import Logger
class BaseHandler:
@staticmethod
def raise_exc(cls, code, message):
"""
Raise an HTTPException with a code and a message sent in a json like
{
"code": code
"message": message
}
:param cls: HTTPException of the error, for example NotFound, BadRequest, NotAuthorized
:param code: A brief message for the exception, like MISSING_PARAMETER
:param message: A longer description of the error
:return: Nothing, raise the provided exception with the correct response
"""
response = Response()
response.mimetype = "application/json"
response.status_code = cls.code
response.data = json.dumps({
"code": code,
"message": message
})
Logger.warning(cls.__name__.upper(), code + ": " + message)
raise cls(response=response)<|fim▁hole|> def handle(self, endpoint, route_args, request):
"""
Handle a request in the derived handler. The request is routed to the correct method using *endpoint*
:param endpoint: A string with the name of the class method to call with (route_args, request) as parameters,
this method should return a Response or call self.raise_exc. *NOTE*: the method MUST be implemented in the
derived class
:param route_args: The route parameters, the parameters extracted from the matching route in the URL
:param request: The Request object, request.args contains the query parameters of the request
:return: Return a Response if the request is successful, an HTTPException if an error occurred
"""
try:
data = BaseHandler._call(self.__getattribute__(endpoint), route_args, request)
response = Response()
if data is not None:
response.code = 200
response.mimetype = "application/json"
response.data = json.dumps(data)
else:
response.code = 204
return response
except HTTPException as e:
return e
def parse_body(self, request):
"""
Parse the body part of the request in JSON
:param request: The request to be parsed
:return: A dict with the content of the body
"""
return request.form
@staticmethod
def get_end_time(user_extra_time):
"""
Compute the end time for a user
:param user_extra_time: Extra time specific for the user in seconds
:return: The timestamp at which the contest will be finished for this user
"""
start = Database.get_meta("start_time", type=int)
if start is None:
return None
contest_duration = Database.get_meta("contest_duration", type=int, default=0)
contest_extra_time = Database.get_meta("extra_time", type=int, default=0)
if user_extra_time is None:
user_extra_time = 0
return start + contest_duration + contest_extra_time + user_extra_time
@staticmethod
def get_window_end_time(user_extra_time, start_delay):
"""
Compute the end time for a window started after `start_delay` and with `extra_time` delay for the user.
Note that this time may exceed the contest end time, additional checks are required.
:param user_extra_time: Extra time specific for the user in seconds
:param start_delay: The time (in seconds) after the start of the contest of when the window started
:return: The timestamp at which the window ends. If the contest has no window None is returned.
"""
if start_delay is None:
return None
start = Database.get_meta("start_time", type=int)
if start is None:
return None
window_duration = Database.get_meta("window_duration", None, type=int)
if window_duration is None:
return None
if user_extra_time is None:
user_extra_time = 0
return start + user_extra_time + start_delay + window_duration
@staticmethod
def format_dates(dct, fields=["date"]):
"""
Given a dict, format all the *fields* fields from int to iso format. The original dict is modified
:param dct: dict to format
:param fields: list of the names of the fields to format
:return: The modified dict
"""
for k, v in dct.items():
if isinstance(v, dict):
dct[k] = BaseHandler.format_dates(v, fields)
elif isinstance(v, list):
for item in v:
BaseHandler.format_dates(item, fields)
elif k in fields and v is not None:
dct[k] = datetime.fromtimestamp(v).isoformat()
return dct
@staticmethod
def _call(method, route_args, request):
"""
This function is MAGIC!
It takes a method, reads it's parameters and automagically fetch from the request the values. Type-annotation
is also supported for a simple type validation.
The values are fetched, in order, from:
- route_args
- request.form
- general_attrs
- default values
If a parameter is required but not sent a BadRequest (MISSING_PARAMETERS) error is thrown, if a parameter cannot
be converted to the annotated type a BadRequest (FORMAT_ERROR) is thrown.
:param method: Method to be called
:param route_args: Arguments of the route
:param request: Request object
:return: The return value of method
"""
kwargs = {}
params = HandlerParams.get_handler_params(method)
general_attrs = {
'_request': request,
'_route_args': route_args,
'_file': {
"content": BaseHandler._get_file_content(request),
"name": BaseHandler._get_file_name(request)
},
'_ip': BaseHandler.get_ip(request)
}
missing_parameters = []
for name, data in params.items():
if name in route_args and name[0] != "_":
kwargs[name] = route_args[name]
elif name in request.form and name[0] != "_":
kwargs[name] = request.form[name]
elif name in general_attrs:
kwargs[name] = general_attrs[name]
elif name == "file" and general_attrs["_file"]["name"] is not None:
kwargs[name] = general_attrs["_file"]
elif data["required"]:
missing_parameters.append(name)
if len(missing_parameters) > 0:
BaseHandler.raise_exc(BadRequest, "MISSING_PARAMETERS",
"The missing parameters are: " + ", ".join(missing_parameters))
for key, value in kwargs.items():
type = params[key]["type"]
if type is None: continue
try:
kwargs[key] = type(value)
except ValueError:
BaseHandler.raise_exc(BadRequest, "FORMAT_ERROR",
"The parameter %s cannot be converted to %s" % (key, type.__name__))
Logger.debug(
"HTTP",
"Received request from %s for endpoint %s%s" %
(
general_attrs['_ip'],
method.__name__,
", with parameters " + ", ".join(
"=".join((kv[0], str(kv[1]))) for kv in kwargs.items()
if not kv[0].startswith("_") and not kv[0] == "file"
) if len(kwargs) > 0 else ""
)
)
return method(**kwargs)
@staticmethod
def _get_file_name(request):
"""
Extract the name of the file from the multipart body
:param request: The Request object
:return: The filename in the request
"""
if "file" not in request.files:
return None
return request.files["file"].filename
@staticmethod
def _get_file_content(request):
"""
Extract the content of the file from the multipart of the body
:param request: The Request object
:return: A *bytes* with the content of the file
"""
if "file" not in request.files:
return None
return request.files["file"].stream.read()
@staticmethod
def get_ip(request):
"""
Return the real IP of the client
:param request: The Request object
:return: A string with the IP of the client
"""
num_proxies = Config.num_proxies
if num_proxies == 0 or len(request.access_route) < num_proxies:
return request.remote_addr
return request.access_route[-num_proxies]<|fim▁end|>
| |
<|file_name|>flatten.rs<|end_file_name|><|fim▁begin|>use crate::fmt;
use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
use crate::ops::Try;
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
///
/// This `struct` is created by [`Iterator::flat_map`]. See its documentation
/// for more.
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct FlatMap<I, U: IntoIterator, F> {
inner: FlattenCompat<Map<I, F>, <U as IntoIterator>::IntoIter>,
}
impl<I: Iterator, U: IntoIterator, F: FnMut(I::Item) -> U> FlatMap<I, U, F> {
pub(in crate::iter) fn new(iter: I, f: F) -> FlatMap<I, U, F> {
FlatMap { inner: FlattenCompat::new(iter.map(f)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Clone, U, F: Clone> Clone for FlatMap<I, U, F>
where
U: Clone + IntoIterator<IntoIter: Clone>,
{
fn clone(&self) -> Self {
FlatMap { inner: self.inner.clone() }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<I: fmt::Debug, U, F> fmt::Debug for FlatMap<I, U, F>
where
U: IntoIterator<IntoIter: fmt::Debug>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMap").field("inner", &self.inner).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
where
F: FnMut(I::Item) -> U,
{
type Item = U::Item;
#[inline]
fn next(&mut self) -> Option<U::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
#[inline]
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.inner.try_fold(init, fold)
}
#[inline]
fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.inner.fold(init, fold)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F>
where
F: FnMut(I::Item) -> U,
U: IntoIterator<IntoIter: DoubleEndedIterator>,
{
#[inline]
fn next_back(&mut self) -> Option<U::Item> {
self.inner.next_back()
}
#[inline]
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.inner.try_rfold(init, fold)
}
#[inline]
fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.inner.rfold(init, fold)
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<I, U, F> FusedIterator for FlatMap<I, U, F>
where
I: FusedIterator,
U: IntoIterator,
F: FnMut(I::Item) -> U,
{
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, I, F, const N: usize> TrustedLen for FlatMap<I, [T; N], F>
where
I: TrustedLen,
F: FnMut(I::Item) -> [T; N],
{
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<'a, T, I, F, const N: usize> TrustedLen for FlatMap<I, &'a [T; N], F>
where
I: TrustedLen,
F: FnMut(I::Item) -> &'a [T; N],
{
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<'a, T, I, F, const N: usize> TrustedLen for FlatMap<I, &'a mut [T; N], F>
where
I: TrustedLen,
F: FnMut(I::Item) -> &'a mut [T; N],
{
}
/// An iterator that flattens one level of nesting in an iterator of things
/// that can be turned into iterators.
///
/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its
/// documentation for more.
///
/// [`flatten`]: Iterator::flatten()
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "iterator_flatten", since = "1.29.0")]
pub struct Flatten<I: Iterator<Item: IntoIterator>> {
inner: FlattenCompat<I, <I::Item as IntoIterator>::IntoIter>,
}
impl<I: Iterator<Item: IntoIterator>> Flatten<I> {
pub(in super::super) fn new(iter: I) -> Flatten<I> {
Flatten { inner: FlattenCompat::new(iter) }
}
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
impl<I, U> fmt::Debug for Flatten<I>
where
I: fmt::Debug + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: fmt::Debug + Iterator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Flatten").field("inner", &self.inner).finish()
}
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
impl<I, U> Clone for Flatten<I>
where
I: Clone + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: Clone + Iterator,
{
fn clone(&self) -> Self {
Flatten { inner: self.inner.clone() }
}
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
impl<I, U> Iterator for Flatten<I>
where
I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: Iterator,
{
type Item = U::Item;
#[inline]
fn next(&mut self) -> Option<U::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
#[inline]
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.inner.try_fold(init, fold)
}
#[inline]
fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.inner.fold(init, fold)
}
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
impl<I, U> DoubleEndedIterator for Flatten<I>
where
I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<U::Item> {
self.inner.next_back()
}
#[inline]
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.inner.try_rfold(init, fold)
}
#[inline]
fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.inner.rfold(init, fold)
}
}
#[stable(feature = "iterator_flatten", since = "1.29.0")]
impl<I, U> FusedIterator for Flatten<I>
where
I: FusedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: Iterator,
{
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I> TrustedLen for Flatten<I>
where
I: TrustedLen,
<I as Iterator>::Item: TrustedConstSize,
{
}
/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
/// this type.
#[derive(Clone, Debug)]
struct FlattenCompat<I, U> {
iter: Fuse<I>,
frontiter: Option<U>,
backiter: Option<U>,
}
impl<I, U> FlattenCompat<I, U>
where
I: Iterator,
{<|fim▁hole|> FlattenCompat { iter: iter.fuse(), frontiter: None, backiter: None }
}
}
impl<I, U> Iterator for FlattenCompat<I, U>
where
I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: Iterator,
{
type Item = U::Item;
#[inline]
fn next(&mut self) -> Option<U::Item> {
loop {
if let Some(ref mut inner) = self.frontiter {
match inner.next() {
None => self.frontiter = None,
elt @ Some(_) => return elt,
}
}
match self.iter.next() {
None => match self.backiter.as_mut()?.next() {
None => {
self.backiter = None;
return None;
}
elt @ Some(_) => return elt,
},
Some(inner) => self.frontiter = Some(inner.into_iter()),
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), U::size_hint);
let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), U::size_hint);
let lo = flo.saturating_add(blo);
if let Some(fixed_size) = <<I as Iterator>::Item as ConstSizeIntoIterator>::size() {
let (lower, upper) = self.iter.size_hint();
let lower = lower.saturating_mul(fixed_size).saturating_add(lo);
let upper =
try { fhi?.checked_add(bhi?)?.checked_add(fixed_size.checked_mul(upper?)?)? };
return (lower, upper);
}
match (self.iter.size_hint(), fhi, bhi) {
((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)),
_ => (lo, None),
}
}
#[inline]
fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
frontiter: &'a mut Option<T::IntoIter>,
fold: &'a mut impl FnMut(Acc, T::Item) -> R,
) -> impl FnMut(Acc, T) -> R + 'a {
move |acc, x| {
let mut mid = x.into_iter();
let r = mid.try_fold(acc, &mut *fold);
*frontiter = Some(mid);
r
}
}
if let Some(ref mut front) = self.frontiter {
init = front.try_fold(init, &mut fold)?;
}
self.frontiter = None;
init = self.iter.try_fold(init, flatten(&mut self.frontiter, &mut fold))?;
self.frontiter = None;
if let Some(ref mut back) = self.backiter {
init = back.try_fold(init, &mut fold)?;
}
self.backiter = None;
try { init }
}
#[inline]
fn fold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn flatten<T: IntoIterator, Acc>(
fold: &mut impl FnMut(Acc, T::Item) -> Acc,
) -> impl FnMut(Acc, T) -> Acc + '_ {
move |acc, x| x.into_iter().fold(acc, &mut *fold)
}
if let Some(front) = self.frontiter {
init = front.fold(init, &mut fold);
}
init = self.iter.fold(init, flatten(&mut fold));
if let Some(back) = self.backiter {
init = back.fold(init, &mut fold);
}
init
}
}
impl<I, U> DoubleEndedIterator for FlattenCompat<I, U>
where
I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
U: DoubleEndedIterator,
{
#[inline]
fn next_back(&mut self) -> Option<U::Item> {
loop {
if let Some(ref mut inner) = self.backiter {
match inner.next_back() {
None => self.backiter = None,
elt @ Some(_) => return elt,
}
}
match self.iter.next_back() {
None => match self.frontiter.as_mut()?.next_back() {
None => {
self.frontiter = None;
return None;
}
elt @ Some(_) => return elt,
},
next => self.backiter = next.map(IntoIterator::into_iter),
}
}
}
#[inline]
fn try_rfold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn flatten<'a, T: IntoIterator, Acc, R: Try<Output = Acc>>(
backiter: &'a mut Option<T::IntoIter>,
fold: &'a mut impl FnMut(Acc, T::Item) -> R,
) -> impl FnMut(Acc, T) -> R + 'a
where
T::IntoIter: DoubleEndedIterator,
{
move |acc, x| {
let mut mid = x.into_iter();
let r = mid.try_rfold(acc, &mut *fold);
*backiter = Some(mid);
r
}
}
if let Some(ref mut back) = self.backiter {
init = back.try_rfold(init, &mut fold)?;
}
self.backiter = None;
init = self.iter.try_rfold(init, flatten(&mut self.backiter, &mut fold))?;
self.backiter = None;
if let Some(ref mut front) = self.frontiter {
init = front.try_rfold(init, &mut fold)?;
}
self.frontiter = None;
try { init }
}
#[inline]
fn rfold<Acc, Fold>(self, mut init: Acc, mut fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn flatten<T: IntoIterator, Acc>(
fold: &mut impl FnMut(Acc, T::Item) -> Acc,
) -> impl FnMut(Acc, T) -> Acc + '_
where
T::IntoIter: DoubleEndedIterator,
{
move |acc, x| x.into_iter().rfold(acc, &mut *fold)
}
if let Some(back) = self.backiter {
init = back.rfold(init, &mut fold);
}
init = self.iter.rfold(init, flatten(&mut fold));
if let Some(front) = self.frontiter {
init = front.rfold(init, &mut fold);
}
init
}
}
trait ConstSizeIntoIterator: IntoIterator {
// FIXME(#31844): convert to an associated const once specialization supports that
fn size() -> Option<usize>;
}
impl<T> ConstSizeIntoIterator for T
where
T: IntoIterator,
{
#[inline]
default fn size() -> Option<usize> {
None
}
}
impl<T, const N: usize> ConstSizeIntoIterator for [T; N] {
#[inline]
fn size() -> Option<usize> {
Some(N)
}
}
impl<T, const N: usize> ConstSizeIntoIterator for &[T; N] {
#[inline]
fn size() -> Option<usize> {
Some(N)
}
}
impl<T, const N: usize> ConstSizeIntoIterator for &mut [T; N] {
#[inline]
fn size() -> Option<usize> {
Some(N)
}
}
#[doc(hidden)]
#[unstable(feature = "std_internals", issue = "none")]
// FIXME(#20400): Instead of this helper trait there should be multiple impl TrustedLen for Flatten<>
// blocks with different bounds on Iterator::Item but the compiler erroneously considers them overlapping
pub unsafe trait TrustedConstSize: IntoIterator {}
#[unstable(feature = "std_internals", issue = "none")]
unsafe impl<T, const N: usize> TrustedConstSize for [T; N] {}
#[unstable(feature = "std_internals", issue = "none")]
unsafe impl<T, const N: usize> TrustedConstSize for &'_ [T; N] {}
#[unstable(feature = "std_internals", issue = "none")]
unsafe impl<T, const N: usize> TrustedConstSize for &'_ mut [T; N] {}<|fim▁end|>
|
/// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`.
fn new(iter: I) -> FlattenCompat<I, U> {
|
<|file_name|>HookshotState.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2009 Christopho, Solarus - http://www.solarus-engine.org
*
* Solarus is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Solarus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/<|fim▁hole|>#include "entities/MapEntities.h"
#include "entities/Hookshot.h"
/**
* @brief Constructor.
* @param hero the hero controlled by this state
*/
Hero::HookshotState::HookshotState(Hero &hero):
State(hero),
hookshot(NULL) {
}
/**
* @brief Destructor.
*/
Hero::HookshotState::~HookshotState() {
}
/**
* @brief Starts this state.
* @param previous_state the previous state
*/
void Hero::HookshotState::start(State *previous_state) {
State::start(previous_state);
get_sprites().set_animation_hookshot();
hookshot = new Hookshot(hero);
get_entities().add_entity(hookshot);
}
/**
* @brief Ends this state.
* @param next_state the next state (for information)
*/
void Hero::HookshotState::stop(State *next_state) {
State::stop(next_state);
if (!hookshot->is_being_removed()) {
// the hookshot state was stopped by something other than the hookshot (e.g. an enemy)
hookshot->remove_from_map();
hero.clear_movement();
}
}
/**
* @brief Returns whether the hero is touching the ground in the current state.
* @return true if the hero is touching the ground in the current state
*/
bool Hero::HookshotState::is_touching_ground() {
return false;
}
/**
* @brief Returns whether the hero ignores the effect of deep water in this state.
* @return true if the hero ignores the effect of deep water in the current state
*/
bool Hero::HookshotState::can_avoid_deep_water() {
return true;
}
/**
* @brief Returns whether the hero ignores the effect of holes in this state.
* @return true if the hero ignores the effect of holes in the current state
*/
bool Hero::HookshotState::can_avoid_hole() {
return true;
}
/**
* @brief Returns whether the hero ignores the effect of lava in this state.
* @return true if the hero ignores the effect of lava in the current state
*/
bool Hero::HookshotState::can_avoid_lava() {
return true;
}
/**
* @brief Returns whether the hero ignores the effect of prickles in this state.
* @return true if the hero ignores the effect of prickles in the current state
*/
bool Hero::HookshotState::can_avoid_prickle() {
return true;
}
/**
* @brief Returns whether the hero ignores the effect of teletransporters in this state.
* @return true if the hero ignores the effect of teletransporters in this state
*/
bool Hero::HookshotState::can_avoid_teletransporter() {
return true;
}
/**
* @brief Returns whether the hero ignores the effect of conveyor belts in this state.
* @return true if the hero ignores the effect of conveyor belts in this state
*/
bool Hero::HookshotState::can_avoid_conveyor_belt() {
return true;
}
/**
* @brief Returns whether some stairs are considered as obstacle in this state.
* @param stairs some stairs
* @return true if the stairs are obstacle in this state
*/
bool Hero::HookshotState::is_stairs_obstacle(Stairs& stairs) {
// allow to fly over stairs covered by water
return hero.get_ground() != GROUND_DEEP_WATER;
}
/**
* @brief Returns whether a sensor is considered as an obstacle in this state.
* @param sensor a sensor
* @return true if the sensor is an obstacle in this state
*/
bool Hero::HookshotState::is_sensor_obstacle(Sensor& sensor) {
return true;
}
/**
* @brief Returns whether a jump sensor is considered as an obstacle in this state.
* @param jump_sensor a jump sensor
* @return true if the sensor is an obstacle in this state
*/
bool Hero::HookshotState::is_jump_sensor_obstacle(JumpSensor& jump_sensor) {
return false;
}
/**
* @brief Returns whether the hero ignores the effect of switches in this state.
* @return true if the hero ignores the effect of switches in this state
*/
bool Hero::HookshotState::can_avoid_switch() {
return true;
}
/**
* @brief Returns whether the hero can be hurt in this state.
* @return true if the hero can be hurt in this state
*/
bool Hero::HookshotState::can_be_hurt() {
return true;
}
/**
* @brief Notifies this state that the hero has just tried to change his position.
* @param success true if the position has actually just changed
*/
void Hero::HookshotState::notify_movement_tried(bool success) {
if (!success) {
// an unexpected obstacle was reached (e.g. a moving NPC)
hero.set_state(new FreeState(hero));
}
}<|fim▁end|>
|
#include "hero/HookshotState.h"
#include "hero/FreeState.h"
#include "hero/HeroSprites.h"
|
<|file_name|>UsuarioControllerTest.js<|end_file_name|><|fim▁begin|>/**
* Usuario Controller Test Suite
*
* @author Thiago Paes <[email protected]>
* @license MIT
*/
'use strict';
var connection = require('../test');
var Usuario = require('../../src/controllers/UsuarioController');
var sinon = require('sinon');
var assert = require('assert');
var request = require('request');
var response = {
content: null,
statusCode: 0,
json: function(content){
this.content = content;
return this;
},
status: function(status) {
this.statusCode = status;
return this;
}<|fim▁hole|>describe('Usuario Controller', function () {
it('#lista() deve retornar um array', function (done) {
request.headers = {};
request.params = { };
request.query = {
page : 1,
limit: 1
};
Usuario.lista(request, response, function() {
assert.equal(response.content.object, 'list');
done();
});
});
it('#abre() deve retornar um objeto', function (done) {
request.headers = { };
request.params = {
id: 1
};
request.query = {
page : 1,
limit: 1
};
Usuario.abre(request, response, function() {
assert.equal(response.content.object, 'error');
assert.equal(response.statusCode, 404);
done();
});
});
it('#adiciona() deve retornar um objeto', function (done) {
request.headers = { };
request.body = {
nome : 'Foo Bar',
email : '[email protected]',
password: 'foo',
uf : 'AA',
estado : 'aaa aaa',
cidade : 'bbb bbb bb'
};
Usuario.adiciona(request, response, function() {
assert.equal(response.content.object, 'error');
done();
});
});
it('#atualiza() deve retornar um objeto', function (done) {
request.headers = { };
request.params = {
id: 1
};
request.query = {
page : 1,
limit: 1
};
Usuario.atualiza(request, response, function() {
assert.equal(response.content.object, 'error');
done();
});
});
it('#apaga() deve retornar um objeto', function (done) {
request.headers = { };
request.params = {
id: 1
};
request.query = {
page : 1,
limit: 1
};
Usuario.apaga(request, response, function() {
assert.equal(response.content.object, 'error');
done();
});
});
});<|fim▁end|>
|
};
|
<|file_name|>resolved_item_builder.hpp<|end_file_name|><|fim▁begin|>/*
Playdar - music content resolver
Copyright (C) 2009 Richard Jones
Copyright (C) 2009 Last.fm Ltd.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __RESOLVED_ITEM_BUILDER_H__
#define __RESOLVED_ITEM_BUILDER_H__
#include "playdar/resolved_item.h"
#include "library.h"
using namespace json_spirit;
namespace playdar {
/*
Builds a ResolvedItem describing something (a song) that can be played.
*/
class ResolvedItemBuilder
{
public:
static void createFromFid(Library& lib, int fid, json_spirit::Object& out)
{
createFromFid( lib.db(), fid, out );
}
static void createFromFid( sqlite3pp::database& db, int fid, Object& out)
{
LibraryFile_ptr file( Library::file_from_fid(db, fid) );
out.push_back( Pair("mimetype", file->mimetype) );
out.push_back( Pair("size", file->size) );<|fim▁hole|> out.push_back( Pair("artist", artobj->name()) );
out.push_back( Pair("track", trkobj->name()) );
// album metadata kinda optional for now
if (file->pialbid) {
album_ptr albobj = Library::load_album(db, file->pialbid);
out.push_back( Pair("album", albobj->name()) );
}
out.push_back( Pair("url", file->url) );
}
};
} // ns
#endif //__RESOLVED_ITEM_BUILDER_H__<|fim▁end|>
|
out.push_back( Pair("duration", file->duration) );
out.push_back( Pair("bitrate", file->bitrate) );
artist_ptr artobj = Library::load_artist( db, file->piartid);
track_ptr trkobj = Library::load_track( db, file->pitrkid);
|
<|file_name|>clienteController.js<|end_file_name|><|fim▁begin|>'use strict';
var app = angular.module('exemplo', []).controller('clienteController', function($scope, $http){
$scope.mensagem_erro = '';
$scope.resultado = '';
$scope.novoCliente = {};
$scope.carregarClientes = function() {
var url = 'http://localhost:8080/clientes/';
$http.get(url)
.success(function(data) {
$scope.clientes = data;
})
.error(function (exception) {
$scope.mensagem_erro = 'Ocorreu um erro ao tentar recuperar os clientes.' + exception;
});
};
$scope.excluirCliente = function(_cliente) {
var url = 'http://localhost:8080/cliente/';
$http.delete(url + _cliente.id + '/excluir')
.success(function(data, status) {
console.log(data);
console.log('status: ' + status);
$scope.carregarClientes();
})
.error(function (exception) {
$scope.mensagem_erro = 'Ocorreu um erro ao tentar excluir o cliente ' + _cliente.id + ': ' + exception;
});
};
$scope.carregarDadosCliente = function(_id) {<|fim▁hole|> $http.get(url + _id)
.success(function(data) {
$scope.cliente = data;
})
.error(function (exception) {
$scope.mensagem_erro = 'Ocorreu um erro ao tentar recuperar os dados do cliente ' + _id + ': ' + exception;
});
};
$scope.atualizarCadastroCliente = function(_cliente) {
var url = 'http://localhost:8080/cliente/';
$http.put(url + _cliente.id, _cliente)
.success(function(data, status) {
console.log(data);
console.log('status: ' + status);
$scope.resultado = data;
if (status == 201) {
$scope.resultado = 'Cliente alterado com sucesso!';
};
})
.error(function (exception, status) {
console.log('status: ' + status);
console.log("error: "+exception);
$scope.resultado = 'Ocorreu um erro ao tentar alterar os dados do cliente ' + _cliente.id + ': ' + exception;
});
};
$scope.cadastrarNovoCliente = function(_novoCliente) {
var url = 'http://localhost:8080/clientes/cadastrar';
$http.post(url, _novoCliente)
.success(function(data, status) {
console.log(data);
console.log('status: ' + status);
$scope.resultado = data;
if (status == 201) {
$scope.resultado_sucesso = 'Cliente cadastrado com sucesso!';
$scope.resultado_erro = '';
};
})
.error(function (exception) {
console.log("error: "+exception);
$scope.resultado_erro = '';
$scope.resultado_erro = 'Ocorreu um erro ao tentar cadastrar um novo cliente: ' + exception;
});
};
$scope.carregarClientes();
// $scope.carregarDadosCliente(7);
});
app.directive('ngConfirmClick', [
function(){
return {
link: function (scope, element, attr) {
var msg = attr.ngConfirmClick || "Are you sure?";
var clickAction = attr.confirmedClick;
element.bind('click',function (event) {
if ( window.confirm(msg) ) {
scope.$eval(clickAction)
}
});
}
};
}])<|fim▁end|>
|
var url = 'http://localhost:8080/cliente/';
|
<|file_name|>roles.py<|end_file_name|><|fim▁begin|># Copyright 2015-2017 Rumma & Ko Ltd<|fim▁hole|>from lino.core.roles import UserRole
class SimpleContactsUser(UserRole):
pass
class ContactsUser(SimpleContactsUser):
pass
class ContactsStaff(ContactsUser):
pass<|fim▁end|>
|
# License: BSD (see file COPYING for details)
|
<|file_name|>GirlComponent.js<|end_file_name|><|fim▁begin|>/**
* Created by TC on 2016/10/10.
*/
import React, {
Component,
} from 'react'<|fim▁hole|> ToastAndroid,
ActivityIndicator,
}
from 'react-native'
import PixelRatio from "react-native/Libraries/Utilities/PixelRatio";
class GirlComponent extends Component {
constructor(props) {
super(props);
this.state = {
imgUrl: ''
}
}
loadImage() {
this.setState({imgUrl: ''});
this.getImage();
}
componentWillMount() {
this.getImage();
}
getImage() {
fetch('http://gank.io/api/data/福利/100/1')//异步请求图片
.then((response) => {
return response.json();
})
.then((responseJson) => {
if (responseJson.results) {
const index = Math.ceil(Math.random() * 100 - 1);//随机取一张福利图
this.setState({imgUrl: responseJson.results[index].url});
}
}).catch((error) => console.error(error))
.done();
}
render() {
if (this.state.imgUrl.length == 0) {
return (
<View style={ {flex: 1, flexDirection: 'row', justifyContent: 'center', alignItems: 'center'}}>
<ActivityIndicator size='large' color='#00BCD4'/>
</View>
);
} else {
return (
<View style={{flexDirection: 'column', flex: 1}}>
<Image source={{uri: this.state.imgUrl}}
style={{width: 200 * PixelRatio.get(), height: 200 * PixelRatio.get()}}/>
</View>
);
}
}
}
export default GirlComponent;<|fim▁end|>
|
import {
Image,
View,
|
<|file_name|>DistributedGoofySpeedwayAI.py<|end_file_name|><|fim▁begin|>from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import CharStateDatasAI
class DistributedGoofySpeedwayAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedwayAI')
def __init__(self, air):
DistributedCCharBaseAI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Goofy)
self.fsm = ClassicFSM.ClassicFSM('DistributedGoofySpeedwayAI', [State.State('Off', self.enterOff, self.exitOff, ['Lonely', 'TransitionToCostume', 'Walk']),
State.State('Lonely', self.enterLonely, self.exitLonely, ['Chatty', 'Walk', 'TransitionToCostume']),
State.State('Chatty', self.enterChatty, self.exitChatty, ['Lonely', 'Walk', 'TransitionToCostume']),
State.State('Walk', self.enterWalk, self.exitWalk, ['Lonely', 'Chatty', 'TransitionToCostume']),
State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def delete(self):
self.fsm.requestFinalState()
DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
self.lonelyDoneEvent = None
self.lonely = None
self.chattyDoneEvent = None
self.chatty = None
self.walkDoneEvent = None
self.walk = None
return
def generate(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
name = self.getName()
self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
self.chattyDoneEvent = self.taskName(name + '-chatty-done')
self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
else:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
return
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
def start(self):
self.fsm.request('Lonely')
def __decideNextState(self, doneStatus):
if self.transitionToCostume == 1:
curWalkNode = self.walk.getDestNode()
if simbase.air.holidayManager:
if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
else:
self.notify.warning('transitionToCostume == 1 but no costume holiday')
else:
self.notify.warning('transitionToCostume == 1 but no holiday Manager')
if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
if len(self.nearbyAvatars) > 0:
self.fsm.request('Chatty')
else:
self.fsm.request('Lonely')
def enterOff(self):
pass
def exitOff(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)
def enterLonely(self):
self.lonely.enter()
self.acceptOnce(self.lonelyDoneEvent, self.__decideNextState)
def exitLonely(self):
self.ignore(self.lonelyDoneEvent)
self.lonely.exit()
def __goForAWalk(self, task):
self.notify.debug('going for a walk')
self.fsm.request('Walk')
return Task.done
def enterChatty(self):
self.chatty.enter()
self.acceptOnce(self.chattyDoneEvent, self.__decideNextState)
<|fim▁hole|> def enterWalk(self):
self.notify.debug('going for a walk')
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def avatarEnterNextState(self):
if len(self.nearbyAvatars) == 1:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Chatty')
else:
self.notify.debug('avatarEnterNextState: in walk state')
else:
self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))
def avatarExitNextState(self):
if len(self.nearbyAvatars) == 0:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Lonely')
def handleHolidays(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
if hasattr(simbase.air, 'holidayManager'):
if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays:
if simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
self.diffPath = TTLocalizer.Donald
return
def getCCLocation(self):
if self.diffPath == None:
return 1
else:
return 0
return
def enterTransitionToCostume(self):
pass
def exitTransitionToCostume(self):
pass<|fim▁end|>
|
def exitChatty(self):
self.ignore(self.chattyDoneEvent)
self.chatty.exit()
|
<|file_name|>htmllinkelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::HTMLLinkElementBinding;
use dom::bindings::codegen::Bindings::HTMLLinkElementBinding::HTMLLinkElementMethods;
use dom::bindings::codegen::InheritTypes::HTMLLinkElementDerived;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast};
use dom::bindings::js::{MutNullableJS, JSRef, Temporary, OptionalRootable};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::domtokenlist::DOMTokenList;
use dom::element::{AttributeHandlers, Element, HTMLLinkElementTypeId};
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, NodeHelpers, ElementNodeTypeId, window_from_node};
use dom::virtualmethods::VirtualMethods;
use layout_interface::{LayoutChan, LoadStylesheetMsg};
use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS};
use std::ascii::AsciiExt;
use std::default::Default;
use url::UrlParser;
use string_cache::Atom;
#[dom_struct]
pub struct HTMLLinkElement {
htmlelement: HTMLElement,
rel_list: MutNullableJS<DOMTokenList>,
}
impl HTMLLinkElementDerived for EventTarget {
fn is_htmllinkelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLLinkElementTypeId))
}
}
impl HTMLLinkElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLinkElement {
HTMLLinkElement {
htmlelement: HTMLElement::new_inherited(HTMLLinkElementTypeId, localName, prefix, document),
rel_list: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLLinkElement> {
let element = HTMLLinkElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLLinkElementBinding::Wrap)
}
}
fn get_attr(element: JSRef<Element>, name: &Atom) -> Option<String> {
let elem = element.get_attribute(ns!(""), name).root();
elem.map(|e| e.value().as_slice().to_string())
}
fn is_stylesheet(value: &Option<String>) -> bool {
match *value {
Some(ref value) => {
value.as_slice().split(HTML_SPACE_CHARACTERS.as_slice())
.any(|s| s.as_slice().eq_ignore_ascii_case("stylesheet"))
},
None => false,
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLLinkElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => ()
}
let element: JSRef<Element> = ElementCast::from_ref(*self);
let rel = get_attr(element, &atom!("rel"));
match (rel, attr.local_name()) {
(ref rel, &atom!("href")) => {
if is_stylesheet(rel) {
self.handle_stylesheet_url(attr.value().as_slice());
}
}
(_, _) => ()
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("rel") => AttrValue::from_tokenlist(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
match self.super_type() {
Some(ref s) => s.bind_to_tree(tree_in_doc),
_ => ()
}
if tree_in_doc {
let element: JSRef<Element> = ElementCast::from_ref(*self);
let rel = get_attr(element, &atom!("rel"));
let href = get_attr(element, &atom!("href"));
match (rel, href) {
(ref rel, Some(ref href)) if is_stylesheet(rel) => {
self.handle_stylesheet_url(href.as_slice());
}
_ => {}
}
}
}
}
trait PrivateHTMLLinkElementHelpers {
fn handle_stylesheet_url(self, href: &str);
}
impl<'a> PrivateHTMLLinkElementHelpers for JSRef<'a, HTMLLinkElement> {
fn handle_stylesheet_url(self, href: &str) {
let window = window_from_node(self).root();
match UrlParser::new().base_url(&window.page().get_url()).parse(href) {
Ok(url) => {
let LayoutChan(ref layout_chan) = window.page().layout_chan;
layout_chan.send(LoadStylesheetMsg(url));
}
Err(e) => debug!("Parsing url {:s} failed: {}", href, e)
}
}
}
impl Reflectable for HTMLLinkElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
impl<'a> HTMLLinkElementMethods for JSRef<'a, HTMLLinkElement> {
fn RelList(self) -> Temporary<DOMTokenList> {
if self.rel_list.get().is_none() {
let element: JSRef<Element> = ElementCast::from_ref(self);
let rel_list = DOMTokenList::new(element, &atom!("rel"));
self.rel_list.assign(Some(rel_list));
}
self.rel_list.get().unwrap()
}<|fim▁hole|>}<|fim▁end|>
| |
<|file_name|>structure-constructor-type-mismatch.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Point<T> {
x: T,<|fim▁hole|> y: T,
}
type PointF = Point<f32>;
struct Pair<T,U> {
x: T,
y: U,
}
type PairF<U> = Pair<f32,U>;
fn main() {
let pt = PointF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 1,
y: 2,
};
let pt2 = Point::<f32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 3,
y: 4,
};
let pair = PairF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 5,
y: 6,
};
let pair2 = PairF::<i32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 7,
y: 8,
};
let pt3 = PointF::<i32> {
//~^ ERROR wrong number of type arguments
//~| ERROR structure constructor specifies a structure of type
x: 9,
y: 10,
};
}<|fim▁end|>
| |
<|file_name|>300.cpp<|end_file_name|><|fim▁begin|>#include "cpp_header.h"
class Solution {
public:
int lengthOfLIS(vector<int>& nums)
{
vector<int> lens(nums.size(), 1);
int longest = 0;
for(int i = 0; i < nums.size(); i++)
{
for(int j = 0; j < i; j++)
{
if(nums[i] > nums[j])
{
lens[i] = max(lens[i], lens[j] + 1);
}
}
if(longest < lens[i])
{
longest = lens[i];
}
}
return longest;
}
int lengthOfLIS2(vector<int>& nums)
{
if(nums.size() < 2)
return nums.size();
// sorted sub sequence of nums
// it is one of the longest sub increasing sub sequence
// which has minimum minus of the biggest and smallest.
vector<int> lis;
for(auto & num : nums)
{
// position in lis that greater or equal than num
auto idx = lower_bound(lis.begin(), lis.end(), num) - lis.begin();
if(idx >= lis.size())
lis.push_back(num);
else
lis[idx] = num;
}
for(auto x : lis)
cout << x << " ";
cout << "\n";
return lis.size();
}
};
int testcase(vector<int> nums, int res, int casenum)
{
Solution sol;
if(sol.lengthOfLIS2(nums) == res)
{<|fim▁hole|> cout << casenum << " no pass\n";
}
}
int main()
{
vector<int> nums;
int res;
int casenum;
nums = {10, 9, 2, 5, 3, 7, 101, 18};
res = 4;
casenum = 1;
testcase(nums, res, casenum);
}<|fim▁end|>
|
cout << casenum << " pass\n";
}
else
{
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>def main():
"""Instantiate a DockerStats object and collect stats."""<|fim▁hole|> print('Docker Service Module')
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>vis_utils_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Vis utils."""
from tensorflow.python import keras
from tensorflow.python.keras.utils import vis_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ModelToDotFormatTest(test.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))<|fim▁hole|> dot_img_file = 'model_1.png'
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, show_dtype=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name='dense', input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = 'model_2.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
def test_plot_model_with_add_loss(self):
inputs = keras.Input(shape=(None, 3))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(outputs))
dot_img_file = 'model_3.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
model = keras.Sequential([
keras.Input(shape=(None, 3)), keras.layers.Dense(1)])
model.add_loss(math_ops.reduce_mean(model.output))
dot_img_file = 'model_4.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(file_io.file_exists_v2(dot_img_file))
file_io.delete_file_v2(dot_img_file)
except ImportError:
pass
if __name__ == '__main__':
test.main()<|fim▁end|>
|
model.add(keras.layers.Dense(5, name='dense'))
|
<|file_name|>ConverNumberServiceAsync.java<|end_file_name|><|fim▁begin|>package es.josealmela.BasicMathCalculator.client;
import com.google.gwt.user.client.rpc.AsyncCallback;<|fim▁hole|>/**
* The async counterpart of <code>convertNumberService</code>.
*/
public interface ConverNumberServiceAsync {
void convertNumbertServer(String input, AsyncCallback<String> callback)
throws IllegalArgumentException;
}<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
var path = require('path');
var Stream = require('readable-stream');
var BufferStreams = require('bufferstreams');
var ttf2woff2 = require('ttf2woff2');
var PluginError = require('plugin-error');
var replaceExtension = require('replace-ext');
var PLUGIN_NAME = 'gulp-ttf2woff2';
// File level transform function
function ttf2woff2Transform() {
// Return a callback function handling the buffered content
return function ttf2woff2TransformCb(err, buf, cb) {
// Handle any error
if(err) {
return cb(new PluginError(PLUGIN_NAME, err, { showStack: true }));
}
// Use the buffered content
try {
buf = ttf2woff2(buf);
return cb(null, buf);
} catch(err2) {
return cb(new PluginError(PLUGIN_NAME, err2, { showStack: true }));
}
};
}
// Plugin function
function ttf2woff2Gulp(options) {
var stream = new Stream.Transform({ objectMode: true });
options = options || {};
options.ignoreExt = options.ignoreExt || false;
options.clone = options.clone || false;
stream._transform = function ttf2woff2GulpTransform(file, unused, done) {
var cntStream;
var newFile;
// When null just pass through
if(file.isNull()) {
stream.push(file); done();
return;
}
// If the ext doesn't match, pass it through
if((!options.ignoreExt) && '.ttf' !== path.extname(file.path)) {
stream.push(file); done();
return;
}
// Fix for the vinyl clone method...
// https://github.com/wearefractal/vinyl/pull/9
if(options.clone) {
if(file.isBuffer()) {
stream.push(file.clone());
} else {
cntStream = file.contents;
file.contents = null;
newFile = file.clone();
file.contents = cntStream.pipe(new Stream.PassThrough());
newFile.contents = cntStream.pipe(new Stream.PassThrough());
stream.push(newFile);
}
}
file.path = replaceExtension(file.path, '.woff2');
// Buffers
if(file.isBuffer()) {
try {
file.contents = ttf2woff2(file.contents);
} catch(err) {
stream.emit('error', new PluginError(PLUGIN_NAME, err, {
showStack: true,
}));
}
// Streams
} else {
file.contents = file.contents.pipe(new BufferStreams(ttf2woff2Transform()));
}<|fim▁hole|> };
return stream;
}
// Export the file level transform function for other plugins usage
ttf2woff2Gulp.fileTransform = ttf2woff2Transform;
// Export the plugin main function
module.exports = ttf2woff2Gulp;<|fim▁end|>
|
stream.push(file);
done();
|
<|file_name|>about.js<|end_file_name|><|fim▁begin|>import React from "react"
import Img from "gatsby-image"
import { StaticQuery, graphql } from "gatsby"
import html5 from "../images/html5.svg"
import js from "../images/javascript.svg"
import jQuery from "../images/jquery.svg"
import php from "../images/php.svg"
import python from "../images/python.svg"
import css3 from "../images/css3.svg"
import sass from "../images/sass.svg"
import react from "../images/react.svg"
import redux from "../images/redux.svg"
import angular from "../images/angular.svg"
import nodejs from "../images/nodejs.svg"
import express from "../images/express.svg"
import graphQL from "../images/graphql.svg"
import apollo from "../images/apollo.svg"
import laravel from "../images/laravel.svg"
import django from "../images/django.svg"
import ruby from "../images/ruby.svg"
import rails from "../images/rails.svg"
import firebase from "../images/firebase.svg"
import mongodb from "../images/mongodb.svg"
import postgresql from "../images/postgresql.svg"
const About = ({ id }) => (
<StaticQuery
query={graphql`
query AboutImgQuery {
aboutImg: file(relativePath: { eq: "towers.jpg" }) {
childImageSharp {
fluid(maxWidth: 1200) {
...GatsbyImageSharpFluid
}
}
}
}
`}
render={data => (
<section id={id} className="section cover">
<Img
title="About image"
alt="Towers"
fluid={data.aboutImg.childImageSharp.fluid}
style={{
borderBottom: "2px solid #0F2027",
position: "absolute",
left: 0,
top: 0,
width: "100%",
height: "100%",
}}
/>
<div className="overlay" />
<div className="about"><|fim▁hole|> <h1 className="name mt-5">
<b>About Me</b>
</h1>
<div className="description mb-4">
<h5 className="greetings">
I'm a developer who is driven by the motivation to learn and
utilize all of the <br />
newest and leading software technologies, tools and frameworks.{" "}
<br />
Here are some of the technologies I have worked with:
</h5>
</div>
<div className="svg-container">
<div className="logo-container">
<a
href="https://rebrand.ly/w1zfk5"
target="_blank"
rel="noopener noreferrer"
>
<img src={html5} alt="html5" />
</a>
<h5>HTML</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/gpe80b"
target="_blank"
rel="noopener noreferrer"
>
<img src={css3} alt="css3" />
</a>
<h5>CSS</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/ac3zez"
target="_blank"
rel="noopener noreferrer"
>
<img src={sass} alt="sass" />
</a>
<h5>Sass</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/gdw8nf"
target="_blank"
rel="noopener noreferrer"
>
<img src={js} alt="js" />
</a>
<h5>JavaScript</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/t8q4kk"
target="_blank"
rel="noopener noreferrer"
>
<img src={jQuery} alt="jQuery" />
</a>
<h5>jQuery</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/5dmk0k"
target="_blank"
rel="noopener noreferrer"
>
<img src={php} alt="php" />
</a>
<h5>PHP</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/51v3f7"
target="_blank"
rel="noopener noreferrer"
>
<img src={ruby} alt="ruby" />
</a>
<h5>Ruby</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/u9f3bu"
target="_blank"
rel="noopener noreferrer"
>
<img src={python} alt="python" />
</a>
<h5>Python</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/4711zo"
target="_blank"
rel="noopener noreferrer"
>
<img src={react} alt="react" />
</a>
<h5>React</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/f4fdtb"
target="_blank"
rel="noopener noreferrer"
>
<img src={redux} alt="redux" />
</a>
<h5>Redux</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/0af3pn"
target="_blank"
rel="noopener noreferrer"
>
<img src={angular} alt="angular" />
</a>
<h5>Angular</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/fno5hy"
target="_blank"
rel="noopener noreferrer"
>
<img src={nodejs} alt="nodejs" />
</a>
<h5>Node</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/8pwvla"
target="_blank"
rel="noopener noreferrer"
>
<img src={express} alt="express" />
</a>
<h5>Express</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/chgco7"
target="_blank"
rel="noopener noreferrer"
>
<img src={graphQL} alt="graphQL" />
</a>
<h5>GraphQL</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/s8v7qq"
target="_blank"
rel="noopener noreferrer"
>
<img src={apollo} alt="apollo" />
</a>
<h5>Apollo</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/jm3gu8"
target="_blank"
rel="noopener noreferrer"
>
<img src={laravel} alt="laravel" />
</a>
<h5>Laravel</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/hbkv6c"
target="_blank"
rel="noopener noreferrer"
>
<img src={django} alt="django" />
</a>
<h5>Django</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/71jw07"
target="_blank"
rel="noopener noreferrer"
>
<img src={rails} alt="rails" />
</a>
<h5>Ruby on Rails</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/8jg10f"
target="_blank"
rel="noopener noreferrer"
>
<img src={firebase} alt="firebase" />
</a>
<h5>Firebase</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/1lamx3"
target="_blank"
rel="noopener noreferrer"
>
<img src={mongodb} alt="mongodb" />
</a>
<h5>MongoDB</h5>
</div>
<div className="logo-container">
<a
href="https://rebrand.ly/az0ssm"
target="_blank"
rel="noopener noreferrer"
>
<img src={postgresql} alt="postgresql" />
</a>
<h5>PostgreSQL</h5>
</div>
</div>
<div className="arrow animated bounceInDown" />
</div>
</section>
)}
/>
)
export default About<|fim▁end|>
| |
<|file_name|>plugins_hook.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
var pluginlist = [
];
var exec = require('child_process').exec;
function puts(error, stdout, stderr) {
console.log(stdout);<|fim▁hole|>});<|fim▁end|>
|
}
pluginlist.forEach(function(plug) {
exec("cordova plugin add " + plug, puts);
|
<|file_name|>model.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# 자세한 설명은 상위 폴더의 03 - Seq2Seq.py 등에서 찾으실 수 있습니다.
import tensorflow as tf
# Seq2Seq 기본 클래스
class Seq2Seq:
logits = None
outputs = None
cost = None
train_op = None
def __init__(self, vocab_size, n_hidden=128, n_layers=3):
self.learning_late = 0.001
self.vocab_size = vocab_size
self.n_hidden = n_hidden
self.n_layers = n_layers
self.enc_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])
self.dec_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])
self.targets = tf.placeholder(tf.int64, [None, None])
self.weights = tf.Variable(tf.ones([self.n_hidden, self.vocab_size]), name="weights")
self.bias = tf.Variable(tf.zeros([self.vocab_size]), name="bias")
self.global_step = tf.Variable(0, trainable=False, name="global_step")
self.build_model()
self.saver = tf.train.Saver(tf.global_variables())
def build_model(self):
self.enc_input = tf.transpose(self.enc_input, [1, 0, 2])
self.dec_input = tf.transpose(self.dec_input, [1, 0, 2])
enc_cell, dec_cell = self.build_cells()
with tf.variable_scope('encode'):
outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, self.enc_input, dtype=tf.float32)
with tf.variable_scope('decode'):
outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, self.dec_input, dtype=tf.float32,
initial_state=enc_states)
self.logits, self.cost, self.train_op = self.build_ops(outputs, self.targets)
self.outputs = tf.argmax(self.logits, 2)
def build_cells(self, output_keep_prob=0.5):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(self.n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=output_keep_prob)
enc_cell = tf.nn.rnn_cell.MultiRNNCell([enc_cell] * self.n_layers)
dec_cell = tf.nn.rnn_cell.BasicRNNCell(self.n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=output_keep_prob)
dec_cell = tf.nn.rnn_cell.MultiRNNCell([dec_cell] * self.n_layers)
return enc_cell, dec_cell
<|fim▁hole|> def build_ops(self, outputs, targets):
time_steps = tf.shape(outputs)[1]
outputs = tf.reshape(outputs, [-1, self.n_hidden])
logits = tf.matmul(outputs, self.weights) + self.bias
logits = tf.reshape(logits, [-1, time_steps, self.vocab_size])
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets))
train_op = tf.train.AdamOptimizer(learning_rate=self.learning_late).minimize(cost, global_step=self.global_step)
tf.summary.scalar('cost', cost)
return logits, cost, train_op
def train(self, session, enc_input, dec_input, targets):
return session.run([self.train_op, self.cost],
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
def test(self, session, enc_input, dec_input, targets):
prediction_check = tf.equal(self.outputs, self.targets)
accuracy = tf.reduce_mean(tf.cast(prediction_check, tf.float32))
return session.run([self.targets, self.outputs, accuracy],
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
def predict(self, session, enc_input, dec_input):
return session.run(self.outputs,
feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input})
def write_logs(self, session, writer, enc_input, dec_input, targets):
merged = tf.summary.merge_all()
summary = session.run(merged, feed_dict={self.enc_input: enc_input,
self.dec_input: dec_input,
self.targets: targets})
writer.add_summary(summary, self.global_step.eval())<|fim▁end|>
| |
<|file_name|>CoMPlEx_hwConfig_Dialog.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIs\CoMPlEx_hwConfig_Dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_hwConfig_dialog(object):
def setupUi(self, hwConfig_dialog):
hwConfig_dialog.setObjectName(_fromUtf8("hwConfig_dialog"))
hwConfig_dialog.resize(531, 816)
self.verticalLayout = QtGui.QVBoxLayout(hwConfig_dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout = QtGui.QFormLayout(self.groupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.afmIpLine = QtGui.QLineEdit(self.groupBox)
self.afmIpLine.setObjectName(_fromUtf8("afmIpLine"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.afmIpLine)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.afmSubPortNum = QtGui.QSpinBox(self.groupBox)
self.afmSubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmSubPortNum.setMaximum(100000000)
self.afmSubPortNum.setObjectName(_fromUtf8("afmSubPortNum"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.afmSubPortNum)
self.label_14 = QtGui.QLabel(self.groupBox)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_14)
self.afmPubPortNum = QtGui.QSpinBox(self.groupBox)
self.afmPubPortNum.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.afmPubPortNum.setMaximum(100000000)
self.afmPubPortNum.setObjectName(_fromUtf8("afmPubPortNum"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.afmPubPortNum)
self.label_15 = QtGui.QLabel(self.groupBox)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_15)
self.curveNameLine = QtGui.QLineEdit(self.groupBox)
self.curveNameLine.setObjectName(_fromUtf8("curveNameLine"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.curveNameLine)
self.label_16 = QtGui.QLabel(self.groupBox)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_16)
self.monitNameLine = QtGui.QLineEdit(self.groupBox)
self.monitNameLine.setObjectName(_fromUtf8("monitNameLine"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.monitNameLine)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_3)
self.xyCmdTagLine = QtGui.QLineEdit(self.groupBox)
self.xyCmdTagLine.setObjectName(_fromUtf8("xyCmdTagLine"))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.xyCmdTagLine)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_4)
self.xyResTagLine = QtGui.QLineEdit(self.groupBox)
self.xyResTagLine.setObjectName(_fromUtf8("xyResTagLine"))
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.xyResTagLine)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_5)
self.maxPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoVoltNumDbl.setMinimum(-99.0)
self.maxPiezoVoltNumDbl.setObjectName(_fromUtf8("maxPiezoVoltNumDbl"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.maxPiezoVoltNumDbl)
self.label_6 = QtGui.QLabel(self.groupBox_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_6)
self.minPiezoVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoVoltNumDbl.setMinimum(-99.0)
self.minPiezoVoltNumDbl.setObjectName(_fromUtf8("minPiezoVoltNumDbl"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.minPiezoVoltNumDbl)
self.label_7 = QtGui.QLabel(self.groupBox_2)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_7)
self.maxPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxPiezoExtNumDbl.setMinimum(-99.0)
self.maxPiezoExtNumDbl.setObjectName(_fromUtf8("maxPiezoExtNumDbl"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.maxPiezoExtNumDbl)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_8)
self.minPiezoExtNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.minPiezoExtNumDbl.setMinimum(-99.0)
self.minPiezoExtNumDbl.setObjectName(_fromUtf8("minPiezoExtNumDbl"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.minPiezoExtNumDbl)
self.label_9 = QtGui.QLabel(self.groupBox_2)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_9)
self.farNearCmbBox = QtGui.QComboBox(self.groupBox_2)
self.farNearCmbBox.setObjectName(_fromUtf8("farNearCmbBox"))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.farNearCmbBox.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.farNearCmbBox)
self.label_19 = QtGui.QLabel(self.groupBox_2)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_19)
self.toStartSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.toStartSpeedNumDbl.setMinimum(1.0)
self.toStartSpeedNumDbl.setMaximum(20000.0)
self.toStartSpeedNumDbl.setObjectName(_fromUtf8("toStartSpeedNumDbl"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.toStartSpeedNumDbl)
self.maxSpeedNumDbl = QtGui.QDoubleSpinBox(self.groupBox_2)
self.maxSpeedNumDbl.setKeyboardTracking(False)
self.maxSpeedNumDbl.setMinimum(1.0)
self.maxSpeedNumDbl.setMaximum(20000.0)
self.maxSpeedNumDbl.setObjectName(_fromUtf8("maxSpeedNumDbl"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.maxSpeedNumDbl)
self.label_22 = QtGui.QLabel(self.groupBox_2)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_22)
self.label_23 = QtGui.QLabel(self.groupBox_2)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_23)
self.movingObjCmbBox = QtGui.QComboBox(self.groupBox_2)
self.movingObjCmbBox.setObjectName(_fromUtf8("movingObjCmbBox"))
self.movingObjCmbBox.addItem(_fromUtf8(""))
self.movingObjCmbBox.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.FieldRole, self.movingObjCmbBox)
self.label_5.raise_()
self.maxPiezoVoltNumDbl.raise_()
self.label_6.raise_()
self.minPiezoVoltNumDbl.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.maxPiezoExtNumDbl.raise_()
self.minPiezoExtNumDbl.raise_()
self.label_9.raise_()
self.farNearCmbBox.raise_()<|fim▁hole|> self.toStartSpeedNumDbl.raise_()
self.label_19.raise_()
self.maxSpeedNumDbl.raise_()
self.label_22.raise_()
self.label_23.raise_()
self.movingObjCmbBox.raise_()
self.verticalLayout.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(hwConfig_dialog)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.formLayout_3 = QtGui.QFormLayout(self.groupBox_3)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.label_10 = QtGui.QLabel(self.groupBox_3)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_10)
self.deflSignCmbBox = QtGui.QComboBox(self.groupBox_3)
self.deflSignCmbBox.setObjectName(_fromUtf8("deflSignCmbBox"))
self.deflSignCmbBox.addItem(_fromUtf8(""))
self.deflSignCmbBox.addItem(_fromUtf8(""))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.deflSignCmbBox)
self.label_11 = QtGui.QLabel(self.groupBox_3)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_11)
self.sumThrNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.sumThrNumDbl.setObjectName(_fromUtf8("sumThrNumDbl"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.sumThrNumDbl)
self.label_12 = QtGui.QLabel(self.groupBox_3)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_12)
self.iGainMaxNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.iGainMaxNumDbl.setMaximum(100000.0)
self.iGainMaxNumDbl.setObjectName(_fromUtf8("iGainMaxNumDbl"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.iGainMaxNumDbl)
self.label_13 = QtGui.QLabel(self.groupBox_3)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_13)
self.pGainMaxNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.pGainMaxNumDbl.setMaximum(100000.0)
self.pGainMaxNumDbl.setObjectName(_fromUtf8("pGainMaxNumDbl"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.pGainMaxNumDbl)
self.defBaseNameLine = QtGui.QLineEdit(self.groupBox_3)
self.defBaseNameLine.setObjectName(_fromUtf8("defBaseNameLine"))
self.formLayout_3.setWidget(7, QtGui.QFormLayout.FieldRole, self.defBaseNameLine)
self.defDirLine = QtGui.QLineEdit(self.groupBox_3)
self.defDirLine.setObjectName(_fromUtf8("defDirLine"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.FieldRole, self.defDirLine)
self.label_17 = QtGui.QLabel(self.groupBox_3)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_17)
self.label_18 = QtGui.QLabel(self.groupBox_3)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.formLayout_3.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_18)
self.maxDeflVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.maxDeflVoltNumDbl.setMinimum(-99.0)
self.maxDeflVoltNumDbl.setObjectName(_fromUtf8("maxDeflVoltNumDbl"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.maxDeflVoltNumDbl)
self.minDeflVoltNumDbl = QtGui.QDoubleSpinBox(self.groupBox_3)
self.minDeflVoltNumDbl.setMinimum(-99.0)
self.minDeflVoltNumDbl.setObjectName(_fromUtf8("minDeflVoltNumDbl"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.minDeflVoltNumDbl)
self.label_20 = QtGui.QLabel(self.groupBox_3)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_20)
self.label_21 = QtGui.QLabel(self.groupBox_3)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_21)
self.verticalLayout.addWidget(self.groupBox_3)
self.saveNcancBtnBox = QtGui.QDialogButtonBox(hwConfig_dialog)
self.saveNcancBtnBox.setOrientation(QtCore.Qt.Horizontal)
self.saveNcancBtnBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Save)
self.saveNcancBtnBox.setObjectName(_fromUtf8("saveNcancBtnBox"))
self.verticalLayout.addWidget(self.saveNcancBtnBox)
self.verticalLayout.setStretch(0, 3)
self.verticalLayout.setStretch(1, 3)
self.verticalLayout.setStretch(2, 3)
self.verticalLayout.setStretch(3, 1)
self.retranslateUi(hwConfig_dialog)
QtCore.QObject.connect(self.saveNcancBtnBox, QtCore.SIGNAL(_fromUtf8("accepted()")), hwConfig_dialog.accept)
QtCore.QObject.connect(self.saveNcancBtnBox, QtCore.SIGNAL(_fromUtf8("rejected()")), hwConfig_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(hwConfig_dialog)
def retranslateUi(self, hwConfig_dialog):
hwConfig_dialog.setWindowTitle(_translate("hwConfig_dialog", "Hardware Config", None))
self.groupBox.setTitle(_translate("hwConfig_dialog", "Connections", None))
self.label.setText(_translate("hwConfig_dialog", "AFM IP", None))
self.label_2.setText(_translate("hwConfig_dialog", "AFM Sub Port", None))
self.label_14.setText(_translate("hwConfig_dialog", "AFM Pub Port", None))
self.label_15.setText(_translate("hwConfig_dialog", "High res. device name", None))
self.label_16.setText(_translate("hwConfig_dialog", "Monitor device name", None))
self.label_3.setText(_translate("hwConfig_dialog", "XY CMD Tag", None))
self.label_4.setText(_translate("hwConfig_dialog", "XY RES Tag", None))
self.groupBox_2.setTitle(_translate("hwConfig_dialog", "Piezo settings", None))
self.label_5.setText(_translate("hwConfig_dialog", "Max Piezo voltage [V]", None))
self.label_6.setText(_translate("hwConfig_dialog", "Min Piezo voltage [V]", None))
self.label_7.setText(_translate("hwConfig_dialog", "Max Piezo ext [um]", None))
self.label_8.setText(_translate("hwConfig_dialog", "Min Piezo ext [um]", None))
self.label_9.setText(_translate("hwConfig_dialog", "Min piezo ext =", None))
self.farNearCmbBox.setItemText(0, _translate("hwConfig_dialog", "Far", None))
self.farNearCmbBox.setItemText(1, _translate("hwConfig_dialog", "Near", None))
self.label_19.setText(_translate("hwConfig_dialog", "Start speed [nm/s]", None))
self.label_22.setText(_translate("hwConfig_dialog", "Max Speed [nm/s]", None))
self.label_23.setText(_translate("hwConfig_dialog", "Moving object =", None))
self.movingObjCmbBox.setItemText(0, _translate("hwConfig_dialog", "Tip", None))
self.movingObjCmbBox.setItemText(1, _translate("hwConfig_dialog", "Sample", None))
self.groupBox_3.setTitle(_translate("hwConfig_dialog", "Other", None))
self.label_10.setText(_translate("hwConfig_dialog", "Deflection sign", None))
self.deflSignCmbBox.setItemText(0, _translate("hwConfig_dialog", "Signal increase = force increase", None))
self.deflSignCmbBox.setItemText(1, _translate("hwConfig_dialog", "Signal increase = force decrease", None))
self.label_11.setText(_translate("hwConfig_dialog", "Sum Threshold [V]", None))
self.label_12.setText(_translate("hwConfig_dialog", "Integral Gain Max", None))
self.label_13.setText(_translate("hwConfig_dialog", "Proportional Gain Max", None))
self.label_17.setText(_translate("hwConfig_dialog", "Default data directory", None))
self.label_18.setText(_translate("hwConfig_dialog", "Default file base name", None))
self.label_20.setText(_translate("hwConfig_dialog", "Hi-res Defl max [V]", None))
self.label_21.setText(_translate("hwConfig_dialog", "Hi-res Defl min [V]", None))<|fim▁end|>
| |
<|file_name|>db_connector.py<|end_file_name|><|fim▁begin|>import os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine<|fim▁hole|>from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class DBConnector():
'''
where every row is the details one employee was paid for an entire month.
'''
@classmethod
def get_session(cls):
database_path = os.environ["SQL_DATABASE"]
engine = create_engine(database_path)
session = sessionmaker(bind=engine)()
return session<|fim▁end|>
| |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from foo.hotel.models import Hotel
from foo.hotel.views import HotelLimitListView
from foo.hotel.views import HotelLimitNoOrderListView
urlpatterns = patterns('',
url(r'^$', HotelLimitListView.as_view(model=Hotel), name='hotel'),<|fim▁hole|> url(r'^noorder$', HotelLimitNoOrderListView.as_view(model=Hotel), name='hotelnoorder'))<|fim▁end|>
| |
<|file_name|>ranking.py<|end_file_name|><|fim▁begin|>"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:<|fim▁hole|> raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator"
and not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += np.divide(L, rank, dtype=float).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)<|fim▁end|>
| |
<|file_name|>HealthStateScope.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2016 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.serverhealth;
import com.thoughtworks.go.config.CaseInsensitiveString;
import com.thoughtworks.go.config.CruiseConfig;
import com.thoughtworks.go.config.PipelineConfig;
import com.thoughtworks.go.config.remote.ConfigRepoConfig;
import com.thoughtworks.go.domain.materials.Material;
import com.thoughtworks.go.domain.materials.MaterialConfig;
import org.apache.commons.lang.StringUtils;
import java.util.HashSet;
import java.util.Set;
public class HealthStateScope implements Comparable<HealthStateScope> {
public static final HealthStateScope GLOBAL = new HealthStateScope(ScopeType.GLOBAL, "GLOBAL");
private final ScopeType type;
private final String scope;
private HealthStateScope(ScopeType type, String scope) {
this.type = type;
this.scope = scope;
}
public static HealthStateScope forGroup(String groupName) {
return new HealthStateScope(ScopeType.GROUP, groupName);
}
public static HealthStateScope forPipeline(String pipelineName) {
return new HealthStateScope(ScopeType.PIPELINE, pipelineName);
}
public static HealthStateScope forFanin(String pipelineName) {
return new HealthStateScope(ScopeType.FANIN, pipelineName);
}
public static HealthStateScope forStage(String pipelineName, String stageName) {
return new HealthStateScope(ScopeType.STAGE, pipelineName + "/" + stageName);
}
public static HealthStateScope forJob(String pipelineName, String stageName, String jobName) {
return new HealthStateScope(ScopeType.JOB, pipelineName + "/" + stageName + "/" + jobName);
}
public static HealthStateScope forMaterial(Material material) {
return new HealthStateScope(ScopeType.MATERIAL, material.getSqlCriteria().toString());
}
public static HealthStateScope forMaterialUpdate(Material material) {
return new HealthStateScope(ScopeType.MATERIAL_UPDATE, material.getFingerprint());
}
public static HealthStateScope forMaterialConfig(MaterialConfig materialConfig) {
return new HealthStateScope(ScopeType.MATERIAL, materialConfig.getSqlCriteria().toString());
}
public static HealthStateScope forMaterialConfigUpdate(MaterialConfig materialConfig) {
return new HealthStateScope(ScopeType.MATERIAL_UPDATE, materialConfig.getFingerprint());
}
public static HealthStateScope forConfigRepo(String operation) {
return new HealthStateScope(ScopeType.CONFIG_REPO, operation);
}
public static HealthStateScope forPartialConfigRepo(ConfigRepoConfig repoConfig) {
return new HealthStateScope(ScopeType.CONFIG_PARTIAL, repoConfig.getMaterialConfig().getFingerprint());
}
public static HealthStateScope forPartialConfigRepo(String fingerprint) {
return new HealthStateScope(ScopeType.CONFIG_PARTIAL, fingerprint);
}
public boolean isSame(String scope) {
return StringUtils.endsWithIgnoreCase(this.scope, scope);
}
public boolean isForPipeline() {
return type == ScopeType.PIPELINE;
}
public boolean isForGroup() {
return type == ScopeType.GROUP;
}
public boolean isForMaterial() {
return type == ScopeType.MATERIAL;
}
ScopeType getType() {
return type;
}
public String getScope() {
return scope;
}
public String toString() {
return String.format("LogScope[%s, scope=%s]", type, scope);
}
public boolean equals(Object that) {
if (this == that) { return true; }
if (that == null) { return false; }<|fim▁hole|>
private boolean equals(HealthStateScope that) {
if (type != that.type) { return false; }
if (!scope.equals(that.scope)) { return false; }
return true;
}
public int hashCode() {
int result = type.hashCode();
result = 31 * result + (scope != null ? scope.hashCode() : 0);
return result;
}
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig) {
return type.isRemovedFromConfig(cruiseConfig, scope);
}
public static HealthStateScope forAgent(String cookie) {
return new HealthStateScope(ScopeType.GLOBAL, cookie);
}
public static HealthStateScope forInvalidConfig() {
return new HealthStateScope(ScopeType.GLOBAL, "global");
}
public int compareTo(HealthStateScope o) {
int comparison;
comparison = type.compareTo(o.type);
if (comparison != 0) {
return comparison;
}
comparison = scope.compareTo(o.scope);
if (comparison != 0) {
return comparison;
}
return 0;
}
public static HealthStateScope forPlugin(String symbolicName) {
return new HealthStateScope(ScopeType.PLUGIN, symbolicName);
}
public Set<String> getPipelineNames(CruiseConfig config) {
HashSet<String> pipelineNames = new HashSet<>();
switch (type) {
case PIPELINE:
case FANIN:
pipelineNames.add(scope);
break;
case STAGE:
case JOB:
pipelineNames.add(scope.split("/")[0]);
break;
case MATERIAL:
for (PipelineConfig pc : config.getAllPipelineConfigs()) {
for (MaterialConfig mc : pc.materialConfigs()) {
String scope = HealthStateScope.forMaterialConfig(mc).getScope();
if (scope.equals(this.scope)) {
pipelineNames.add(pc.name().toString());
}
}
}
break;
case MATERIAL_UPDATE:
for (PipelineConfig pc : config.getAllPipelineConfigs()) {
for (MaterialConfig mc : pc.materialConfigs()) {
String scope = HealthStateScope.forMaterialConfigUpdate(mc).getScope();
if (scope.equals(this.scope)) {
pipelineNames.add(pc.name().toString());
}
}
}
break;
}
return pipelineNames;
}
public boolean isForConfigPartial() {
return type == ScopeType.CONFIG_PARTIAL;
}
enum ScopeType {
GLOBAL,
CONFIG_REPO,
GROUP {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String group) {
return !cruiseConfig.hasPipelineGroup(group);
}
},
MATERIAL {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) {
if (HealthStateScope.forMaterialConfig(materialConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
MATERIAL_UPDATE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) {
if (HealthStateScope.forMaterialConfigUpdate(materialConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
CONFIG_PARTIAL {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (ConfigRepoConfig configRepoConfig : cruiseConfig.getConfigRepos()) {
if (HealthStateScope.forPartialConfigRepo(configRepoConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
PIPELINE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) {
return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline));
}
},
FANIN {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) {
return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline));
}
},
STAGE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStage) {
String[] parts = pipelineStage.split("/");
return !cruiseConfig.hasStageConfigNamed(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), true);
}
},
JOB {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStageJob) {
String[] parts = pipelineStageJob.split("/");
return !cruiseConfig.hasBuildPlan(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), parts[2], true);
}
}, PLUGIN;
protected boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String scope) {
return false;
};
}
}<|fim▁end|>
|
if (getClass() != that.getClass()) { return false; }
return equals((HealthStateScope) that);
}
|
<|file_name|>version-check.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import re
import sys
import bs4
from oslo_config import cfg
import pkg_resources
import prettytable
import requests
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..'))
# NOTE(SamYaple): Update the search patch to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools/build.py
# instead of pip installed kolla-build tool
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config as common_config
logging.basicConfig(format="%(message)s")
LOG = logging.getLogger('version-check')
# Filter list for non-projects
NOT_PROJECTS = [
'nova-novncproxy',
'nova-spicehtml5proxy',
'openstack-base',
'profiles'
]
TARBALLS_BASE_URL = 'http://tarballs.openstack.org'
VERSIONS = {'local': dict()}
def retrieve_upstream_versions():
upstream_versions = dict()
for project in VERSIONS['local']:
winner = None
series = VERSIONS['local'][project].split('.')[0]
base = '{}/{}'.format(TARBALLS_BASE_URL, project)
LOG.debug("Getting latest version for project %s from %s",
project, base)
r = requests.get(base)
s = bs4.BeautifulSoup(r.text, 'html.parser')
for link in s.find_all('a'):
version = link.get('href')
if (version.endswith('.tar.gz') and
version.startswith('{}-{}'.format(project, series))):
split = '{}-|.tar.gz'.format(project)
candidate = re.split(split, version)[1]
# Ignore 2014, 2015 versions as they are older
if candidate.startswith('201'):
continue
if not winner or more_recent(candidate, winner):
winner = candidate
if not winner:
LOG.warning("Could not find a version for %s", project)
continue
if '-' in winner:
winner = winner.split('-')[1]
upstream_versions[project] = winner
LOG.debug("Found latest version %s for project %s", winner, project)
VERSIONS['upstream'] = collections.OrderedDict(
sorted(upstream_versions.items()))
def retrieve_local_versions(conf):
for section in common_config.SOURCES:
if section in NOT_PROJECTS:
continue
project = section.split('-')[0]
if section not in conf.list_all_sections():
LOG.debug("Project %s not found in configuration file, using "
"default from kolla.common.config", project)
raw_version = common_config.SOURCES[section]['location']
else:
raw_version = getattr(conf, section).location
version = raw_version.split('/')[-1].split('.tar.gz')[0]
if '-' in version:
version = version.split('-')[1]
LOG.debug("Use local version %s for project %s", version, project)
VERSIONS['local'][project] = version
def more_recent(candidate, reference):
return pkg_resources.parse_version(candidate) > \<|fim▁hole|> pkg_resources.parse_version(reference)
def diff_link(project, old_ref, new_ref):
return "https://github.com/openstack/{}/compare/{}...{}".format(
project, old_ref, new_ref)
def compare_versions():
up_to_date = True
result = prettytable.PrettyTable(["Project", "Current version",
"Latest version", "Comparing changes"])
result.align = "l"
for project in VERSIONS['upstream']:
if project not in VERSIONS['local']:
continue
upstream_version = VERSIONS['upstream'][project]
local_version = VERSIONS['local'][project]
if more_recent(upstream_version, local_version):
result.add_row([
project,
VERSIONS['local'][project],
VERSIONS['upstream'][project],
diff_link(project, local_version, upstream_version)
])
up_to_date = False
if up_to_date:
result = "Everything is up to date"
print(result)
def main():
conf = cfg.ConfigOpts()
common_config.parse(conf, sys.argv[1:], prog='version-check')
if conf.debug:
LOG.setLevel(logging.DEBUG)
retrieve_local_versions(conf)
retrieve_upstream_versions()
compare_versions()
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
<|fim▁hole|> return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()<|fim▁end|>
|
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
|
<|file_name|>issue-3099.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn a(x: ~str) -> ~str {
fmt!("First function with %s", x)
}
fn a(x: ~str, y: ~str) -> ~str { //~ ERROR duplicate definition of value `a`
fmt!("Second function with %s and %s", x, y)
}
fn main() {
info!("Result: ");
}<|fim▁end|>
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
<|file_name|>ev3.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 Eric Pascual
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
"""
An assortment of classes modeling specific features of the EV3 brick.
"""
from .core import *
OUTPUT_A = 'outA'
OUTPUT_B = 'outB'
OUTPUT_C = 'outC'
OUTPUT_D = 'outD'
INPUT_1 = 'in1'
INPUT_2 = 'in2'
INPUT_3 = 'in3'
INPUT_4 = 'in4'
class Leds(object):
"""
The EV3 LEDs.
"""
# ~autogen led-colors platforms.ev3.led>currentClass
red_left = Led(name_pattern='ev3:left:red:ev3dev')
red_right = Led(name_pattern='ev3:right:red:ev3dev')
green_left = Led(name_pattern='ev3:left:green:ev3dev')
green_right = Led(name_pattern='ev3:right:green:ev3dev')
LEFT = ( red_left, green_left, )
RIGHT = ( red_right, green_right, )
BLACK = ( 0, 0, )
RED = ( 1, 0, )
GREEN = ( 0, 1, )
AMBER = ( 1, 1, )
ORANGE = ( 1, 0.5, )
YELLOW = ( 0.1, 1, )
@staticmethod
def set_color(group, color, pct=1):
"""
Sets brigthness of leds in the given group to the values specified in
color tuple. When percentage is specified, brightness of each led is
reduced proportionally.
Example::
Leds.set_color(LEFT, AMBER)
"""
for l, v in zip(group, color):
l.brightness_pct = v * pct
@staticmethod
def set(group, **kwargs):
"""
Set attributes for each led in group.
Example::
Leds.set(LEFT, brightness_pct=0.5, trigger='timer')
"""
for led in group:
for k in kwargs:
setattr(led, k, kwargs[k])
@staticmethod
def all_off():
"""
Turn all leds off
"""
Leds.red_left.brightness = 0
Leds.red_right.brightness = 0
Leds.green_left.brightness = 0
Leds.green_right.brightness = 0
# ~autogen
class Button(ButtonEVIO):
"""
EV3 Buttons
"""
# ~autogen button-property platforms.ev3.button>currentClass
@staticmethod
def on_up(state):
"""
This handler is called by `process()` whenever state of 'up' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_down(state):
"""
This handler is called by `process()` whenever state of 'down' button<|fim▁hole|> state of the button.
"""
pass
@staticmethod
def on_left(state):
"""
This handler is called by `process()` whenever state of 'left' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_right(state):
"""
This handler is called by `process()` whenever state of 'right' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_enter(state):
"""
This handler is called by `process()` whenever state of 'enter' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_backspace(state):
"""
This handler is called by `process()` whenever state of 'backspace' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
_buttons = {
'up': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 103},
'down': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 108},
'left': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 105},
'right': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 106},
'enter': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 28},
'backspace': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 14},
}
@property
def up(self):
"""
Check if 'up' button is pressed.
"""
return 'up' in self.buttons_pressed
@property
def down(self):
"""
Check if 'down' button is pressed.
"""
return 'down' in self.buttons_pressed
@property
def left(self):
"""
Check if 'left' button is pressed.
"""
return 'left' in self.buttons_pressed
@property
def right(self):
"""
Check if 'right' button is pressed.
"""
return 'right' in self.buttons_pressed
@property
def enter(self):
"""
Check if 'enter' button is pressed.
"""
return 'enter' in self.buttons_pressed
@property
def backspace(self):
"""
Check if 'backspace' button is pressed.
"""
return 'backspace' in self.buttons_pressed
# ~autogen<|fim▁end|>
|
has changed since last `process()` call. `state` parameter is the new
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>$ = require("jquery");
jQuery = require("jquery");
var StatusTable = require("./content/status-table");
var ArchiveTable = require("./content/archive-table");
var FailuresTable = require("./content/failures-table");
var UploadTestFile = require("./content/upload-test-file");
var UploadCredentials = require("./content/upload-credentials");
var UploadServerInfo = require("./content/upload-server-info");
var UploadBatch = require("./content/upload-batch");
var Bluebird = require("bluebird");
var StandardReport = require("./content/standard-report");
var JmeterReportTable = require("./content/jmeter-report-table");
require('expose?$!expose?jQuery!jquery');
require("bootstrap-webpack");
require('./vendor/startbootstrap-sb-admin-2-1.0.8/deps');
require("./vendor/startbootstrap-sb-admin-2-1.0.8/dist/js/sb-admin-2");
var ss = require("css-loader!./vendor/startbootstrap-sb-admin-2-1.0.8/bower_components/bootstrap/dist/css/bootstrap.min.css").toString();
ss += require("css-loader!./vendor/startbootstrap-sb-admin-2-1.0.8/bower_components/metisMenu/dist/metisMenu.min.css").toString();
ss += require("css-loader!./vendor/startbootstrap-sb-admin-2-1.0.8/dist/css/sb-admin-2.css").toString();
ss += require("css-loader!./vendor/startbootstrap-sb-admin-2-1.0.8/bower_components/font-awesome/css/font-awesome.min.css").toString();
ss += require("css-loader!./vendor/startbootstrap-sb-admin-2-1.0.8/bower_components/datatables/media/css/dataTables.jqueryui.min.css").toString();
function GetQueryStringParams(sParam){
var sPageURL = window.location.search.substring(1);
var sURLVariables = sPageURL.split('&');
for (var i = 0; i < sURLVariables.length; i++){
var sParameterName = sURLVariables[i].split('=');
if (sParameterName[0] == sParam){
return sParameterName[1];
}
}
}
//module.exports = function(){
$(document).ready(function(){
$("<style></style>").text(ss).appendTo($("head"));
new UploadTestFile($("form"));
//new SwimLanes($("#swimlanes"));
new UploadCredentials($("#credentials"));
new UploadServerInfo($("#server-info"));
new UploadBatch($("#batch"));
if($("#batches-status")){
new StatusTable($("#batches-status"), "batches");
}
if($("#runs-status")){
new StatusTable($("#runs-status"), "runs");
}
if($("#standard-report")){
if(GetQueryStringParams("batchId")){
var batchId = GetQueryStringParams("batchId");
new StandardReport($("#standard-report"), batchId);
new JmeterReportTable($("#jmeter-report-table"), batchId);
} else {
new FailuresTable($("#failures-table"));
new ArchiveTable($("#archive-table"));
}<|fim▁hole|>});
//}<|fim▁end|>
|
}
|
<|file_name|>091.py<|end_file_name|><|fim▁begin|>#-*- encoding: utf-8 -*-
"""
Right triangles with integer coordinates
The points P (x1, y1) and Q (x2, y2) are plotted at integer co-ordinates and are joined to the origin, O(0,0), to form ΔOPQ.
<|fim▁hole|>
Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed?
"""
from utils import *
#<|fim▁end|>
|
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate lies between 0 and 2 inclusive; that is,0 ≤ x1, y1, x2, y2 ≤ 2.
|
<|file_name|>test_create_page.py<|end_file_name|><|fim▁begin|>from unittest import TestCase
from machete.base.tests import IntegrationTestCase
from machete.wiki.models import Wiki, Page
class CreatePageTest(TestCase):
def test_create_page(self):
wiki = Wiki.create()
page = wiki.create_page("test name [Some link]",
"/index.html",
u"this is a test")
assert isinstance(page, Page)
assert page.html == u'<p>this is a test</p>'
<|fim▁hole|> response = self.post(url, {"url":"TestPage",
"name":"Whatever bro",
"text":"this is a test"})
self.assert200(response)
url = "/projects/{}/wiki/TestPage".format(self.project.vid)
response = self.get(url)
self.assert200(response)
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.get(url)
self.assert200(response)<|fim▁end|>
|
class PageIntegrationTest(IntegrationTestCase):
def test_create_page(self):
url = "/projects/{}/wiki/".format(self.project.vid)
|
<|file_name|>misc.js<|end_file_name|><|fim▁begin|>import Vector from '../prototype'
import {componentOrder, allComponents} from './components'
export const withInvertedCurryingSupport = f => {
const curried = right => left => f(left, right)
return (first, second) => {
if (second === undefined) {
// check for function to allow usage by the pipeline function
if (Array.isArray(first) && first.length === 2 && !(first[0] instanceof Function) && !(first[0] instanceof Number)) {
return f(first[0], first[1])
}
// curried form uses the given single parameter as the right value for the operation f
return curried(first)<|fim▁hole|> }
}
export const skipUndefinedArguments = (f, defaultValue) => (a, b) => a === undefined || b === undefined
? defaultValue
: f(a, b)
export const clone = v => {
if (Array.isArray(v)) {
const obj = Object.create(Vector.prototype)
v.forEach((value, i) => {
obj[allComponents[i]] = value
})
return [...v]
}
const prototype = Object.getPrototypeOf(v)
return Object.assign(Object.create(prototype === Object.prototype ? Vector.prototype : prototype), v)
}<|fim▁end|>
|
}
return f(first, second)
|
<|file_name|>udisks2.go<|end_file_name|><|fim▁begin|>// -*- Mode: Go; indent-tabs-mode: t -*-
/*
* Copyright (C) 2016-2017 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package builtin
import (
"strings"
"github.com/snapcore/snapd/interfaces"
"github.com/snapcore/snapd/interfaces/apparmor"
"github.com/snapcore/snapd/interfaces/dbus"
"github.com/snapcore/snapd/interfaces/seccomp"
"github.com/snapcore/snapd/interfaces/udev"
"github.com/snapcore/snapd/snap"
)
const udisks2Summary = `allows operating as or interacting with the UDisks2 service`
const udisks2BaseDeclarationSlots = `
udisks2:
allow-installation:
slot-snap-type:
- app
deny-connection: true
deny-auto-connection: true
`
const udisks2PermanentSlotAppArmor = `
# Description: Allow operating as the udisks2. This gives privileged access to
# the system.
# DBus accesses
#include <abstractions/dbus-strict>
dbus (send)
bus=system
path=/org/freedesktop/DBus
interface=org.freedesktop.DBus
member="{Request,Release}Name"
peer=(name=org.freedesktop.DBus, label=unconfined),
dbus (send)
bus=system
path=/org/freedesktop/DBus
interface=org.freedesktop.DBus
member="GetConnectionUnix{ProcessID,User}"
peer=(label=unconfined),
# Allow binding the service to the requested connection name
dbus (bind)
bus=system
name="org.freedesktop.UDisks2",
# Allow unconfined to talk to us. The API for unconfined will be limited
# with DBus policy, below.
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2{,/**}
interface=org.freedesktop.DBus*
peer=(label=unconfined),
# Needed for mount/unmount operations
capability sys_admin,
# Allow scanning of devices
network netlink raw,
/run/udev/data/b[0-9]*:[0-9]* r,
/sys/devices/**/block/** r,
# Mount points could be in /run/media/<user>/* or /media/<user>/*
/run/systemd/seats/* r,
/{,run/}media/{,**} rw,
mount options=(ro,nosuid,nodev) /dev/{sd*,mmcblk*} -> /{,run/}media/**,
mount options=(rw,nosuid,nodev) /dev/{sd*,mmcblk*} -> /{,run/}media/**,
umount /{,run/}media/**,
# This should probably be patched to use $SNAP_DATA/run/...
/run/udisks2/{,**} rw,
# udisksd execs mount/umount to do the actual operations
/bin/mount ixr,
/bin/umount ixr,
# mount/umount (via libmount) track some mount info in these files
/run/mount/utab* wrl,
# Udisks2 needs to read the raw device for partition information. These rules
# give raw read access to the system disks and therefore the entire system.
/dev/sd* r,
/dev/mmcblk* r,
/dev/vd* r,
# Needed for probing raw devices
capability sys_rawio,
`
const udisks2ConnectedSlotAppArmor = `
# Allow connected clients to interact with the service. This gives privileged
# access to the system.
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2/**
interface=org.freedesktop.DBus.Properties
peer=(label=###PLUG_SECURITY_TAGS###),
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2
interface=org.freedesktop.DBus.ObjectManager
peer=(label=###PLUG_SECURITY_TAGS###),
# Allow access to the Udisks2 API
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2/**
interface=org.freedesktop.UDisks2.*
peer=(label=###PLUG_SECURITY_TAGS###),
# Allow clients to introspect the service
dbus (receive)
bus=system
path=/org/freedesktop/UDisks2
interface=org.freedesktop.DBus.Introspectable
member=Introspect
peer=(label=###PLUG_SECURITY_TAGS###),
`
const udisks2ConnectedPlugAppArmor = `
# Description: Allow using udisks service. This gives privileged access to the
# service.
#include <abstractions/dbus-strict>
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2/**
interface=org.freedesktop.DBus.Properties
peer=(label=###SLOT_SECURITY_TAGS###),
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2
interface=org.freedesktop.DBus.ObjectManager
peer=(label=###SLOT_SECURITY_TAGS###),
# Allow access to the Udisks2 API
dbus (receive, send)
bus=system
path=/org/freedesktop/UDisks2/**
interface=org.freedesktop.UDisks2.*
peer=(label=###SLOT_SECURITY_TAGS###),
# Allow clients to introspect the service
dbus (send)
bus=system
path=/org/freedesktop/UDisks2
interface=org.freedesktop.DBus.Introspectable
member=Introspect
peer=(label=###SLOT_SECURITY_TAGS###),
`
const udisks2PermanentSlotSecComp = `
bind
chown32
fchown
fchown32
fchownat
lchown
lchown32
mount
shmctl
umount
umount2
# libudev
socket AF_NETLINK - NETLINK_KOBJECT_UEVENT
`
const udisks2PermanentSlotDBus = `
<policy user="root">
<allow own="org.freedesktop.UDisks2"/>
<allow send_destination="org.freedesktop.UDisks2"/>
</policy>
<policy context="default">
<allow send_destination="org.freedesktop.UDisks2" send_interface="org.freedesktop.DBus.Introspectable" />
</policy>
`
const udisks2ConnectedPlugDBus = `
<policy context="default">
<deny own="org.freedesktop.UDisks2"/>
<deny send_destination="org.freedesktop.UDisks2"/>
</policy>
`
const udisks2PermanentSlotUDev = `
# These udev rules come from the upstream udisks2 package
#
# This file contains udev rules for udisks 2.x
#
# Do not edit this file, it will be overwritten on updates
#
# ------------------------------------------------------------------------
# Probing
# ------------------------------------------------------------------------
# Skip probing if not a block device or if requested by other rules
#
SUBSYSTEM!="block", GOTO="udisks_probe_end"
ENV{DM_MULTIPATH_DEVICE_PATH}=="?*", GOTO="udisks_probe_end"
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="?*", GOTO="udisks_probe_end"
# MD-RAID (aka Linux Software RAID) members
#
# TODO: file bug against mdadm(8) to have --export-prefix option that can be used with e.g. UDISKS_MD_MEMBER
#
SUBSYSTEM=="block", ENV{ID_FS_USAGE}=="raid", ENV{ID_FS_TYPE}=="linux_raid_member", ENV{UDISKS_MD_MEMBER_LEVEL}=="", IMPORT{program}="/bin/sh -c '/sbin/mdadm --examine --export $tempnode | sed s/^MD_/UDISKS_MD_MEMBER_/g'"
SUBSYSTEM=="block", KERNEL=="md*", ENV{DEVTYPE}!="partition", IMPORT{program}="/bin/sh -c '/sbin/mdadm --detail --export $tempnode | sed s/^MD_/UDISKS_MD_/g'"
LABEL="udisks_probe_end"
# ------------------------------------------------------------------------
# Tag floppy drives since they need special care
# PC floppy drives
#
KERNEL=="fd*", ENV{ID_DRIVE_FLOPPY}="1"
# USB floppy drives
#
SUBSYSTEMS=="usb", ATTRS{bInterfaceClass}=="08", ATTRS{bInterfaceSubClass}=="04", ENV{ID_DRIVE_FLOPPY}="1"
# ATA Zip drives
#
ENV{ID_VENDOR}=="*IOMEGA*", ENV{ID_MODEL}=="*ZIP*", ENV{ID_DRIVE_FLOPPY_ZIP}="1"
# TODO: figure out if the drive supports SD and SDHC and what the current
# kind of media is - right now we just assume SD
KERNEL=="mmcblk[0-9]", SUBSYSTEMS=="mmc", ENV{DEVTYPE}=="disk", ENV{ID_DRIVE_FLASH_SD}="1", ENV{ID_DRIVE_MEDIA_FLASH_SD}="1"
# ditto for memstick
KERNEL=="mspblk[0-9]", SUBSYSTEMS=="memstick", ENV{DEVTYPE}=="disk", ENV{ID_DRIVE_FLASH_MS}="1", ENV{ID_DRIVE_MEDIA_FLASH_MS}="1"
# TODO: maybe automatically convert udisks1 properties to udisks2 ones?
# (e.g. UDISKS_PRESENTATION_HIDE -> UDISKS_IGNORE)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Whitelist for tagging drives with the property media type.
# TODO: figure out where to store this database
<|fim▁hole|>SUBSYSTEMS=="usb", ATTRS{idVendor}=="050d", ATTRS{idProduct}=="0248", ENV{ID_INSTANCE}=="0:3", ENV{ID_DRIVE_FLASH_SD}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="05e3", ATTRS{idProduct}=="070e", ENV{ID_INSTANCE}=="0:0", ENV{ID_DRIVE_FLASH_CF}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="05e3", ATTRS{idProduct}=="070e", ENV{ID_INSTANCE}=="0:1", ENV{ID_DRIVE_FLASH_SM}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="05e3", ATTRS{idProduct}=="070e", ENV{ID_INSTANCE}=="0:2", ENV{ID_DRIVE_FLASH_SD}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="05e3", ATTRS{idProduct}=="070e", ENV{ID_INSTANCE}=="0:3", ENV{ID_DRIVE_FLASH_MS}="1"
# APPLE SD Card Reader (MacbookPro5,4)
#
SUBSYSTEMS=="usb", ATTRS{idVendor}=="05ac", ATTRS{idProduct}=="8403", ENV{ID_DRIVE_FLASH_SD}="1"
# Realtek card readers
DRIVERS=="rts_pstor", ENV{ID_DRIVE_FLASH_SD}="1"
DRIVERS=="rts5229", ENV{ID_DRIVE_FLASH_SD}="1"
# Lexar Dual Slot USB 3.0 Reader Professional
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="05dc",ENV{ID_MODEL_ID}=="b049", ENV{ID_INSTANCE}=="0:0", ENV{ID_DRIVE_FLASH_CF}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="05dc",ENV{ID_MODEL_ID}=="b049", ENV{ID_INSTANCE}=="0:1", ENV{ID_DRIVE_FLASH_SD}="1"
# Transcend USB 3.0 Multi-Card Reader (TS-RDF8K)
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="8564",ENV{ID_MODEL_ID}=="4000", ENV{ID_INSTANCE}=="0:0", ENV{ID_DRIVE_FLASH_CF}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="8564",ENV{ID_MODEL_ID}=="4000", ENV{ID_INSTANCE}=="0:1", ENV{ID_DRIVE_FLASH_SD}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="8564",ENV{ID_MODEL_ID}=="4000", ENV{ID_INSTANCE}=="0:2", ENV{ID_DRIVE_FLASH_MS}="1"
# Common theme
#
SUBSYSTEMS=="usb", ENV{ID_MODEL}=="*Reader*SD*", ENV{ID_DRIVE_FLASH_SD}="1"
SUBSYSTEMS=="usb", ENV{ID_MODEL}=="*CF_Reader*", ENV{ID_DRIVE_FLASH_CF}="1"
SUBSYSTEMS=="usb", ENV{ID_MODEL}=="*SM_Reader*", ENV{ID_DRIVE_FLASH_SM}="1"
SUBSYSTEMS=="usb", ENV{ID_MODEL}=="*MS_Reader*", ENV{ID_DRIVE_FLASH_MS}="1"
# USB stick / thumb drives
#
SUBSYSTEMS=="usb", ENV{ID_VENDOR}=="*Kingston*", ENV{ID_MODEL}=="*DataTraveler*", ENV{ID_DRIVE_THUMB}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR}=="*SanDisk*", ENV{ID_MODEL}=="*Cruzer*", ENV{ID_CDROM}!="1", ENV{ID_DRIVE_THUMB}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR}=="HP", ENV{ID_MODEL}=="*v125w*", ENV{ID_DRIVE_THUMB}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="13fe", ENV{ID_MODEL}=="*Patriot*", ENV{ID_DRIVE_THUMB}="1"
SUBSYSTEMS=="usb", ENV{ID_VENDOR}=="*JetFlash*", ENV{ID_MODEL}=="*Transcend*", ENV{ID_DRIVE_THUMB}="1"
# SD-Card reader in Chromebook Pixel
SUBSYSTEMS=="usb", ENV{ID_VENDOR_ID}=="05e3", ENV{ID_MODEL_ID}=="0727", ENV{ID_DRIVE_FLASH_SD}="1"
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Devices which should not be display in the user interface
#
# (note that RAID/LVM members are not normally shown in an user
# interface so setting UDISKS_IGNORE at first does not seem to achieve
# anything. However it helps for RAID/LVM members that are encrypted
# using LUKS. See bug #51439.)
# Apple Bootstrap partitions
ENV{ID_PART_ENTRY_SCHEME}=="mac", ENV{ID_PART_ENTRY_TYPE}=="Apple_Bootstrap", ENV{UDISKS_IGNORE}="1"
# Apple Boot partitions
ENV{ID_PART_ENTRY_SCHEME}=="gpt", ENV{ID_PART_ENTRY_TYPE}=="426f6f74-0000-11aa-aa11-00306543ecac", ENV{UDISKS_IGNORE}="1"
# special DOS partition types (EFI, hidden, etc.) and RAID/LVM
# see http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
ENV{ID_PART_ENTRY_SCHEME}=="dos", \
ENV{ID_PART_ENTRY_TYPE}=="0x0|0x11|0x12|0x14|0x16|0x17|0x1b|0x1c|0x1e|0x27|0x3d|0x84|0x8d|0x8e|0x90|0x91|0x92|0x93|0x97|0x98|0x9a|0x9b|0xbb|0xc2|0xc3|0xdd|0xef|0xfd", \
ENV{UDISKS_IGNORE}="1"
# special GUID-identified partition types (EFI System Partition, BIOS Boot partition, RAID/LVM)
# see http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
ENV{ID_PART_ENTRY_SCHEME}=="gpt", \
ENV{ID_PART_ENTRY_TYPE}=="c12a7328-f81f-11d2-ba4b-00a0c93ec93b|21686148-6449-6e6f-744e-656564454649|a19d880f-05fc-4d3b-a006-743f0f84911e|e6d6d379-f507-44c2-a23c-238f2a3df928|e3c9e316-0b5c-4db8-817d-f92df00215ae|de94bba4-06d1-4d40-a16a-bfd50179d6ac", \
ENV{UDISKS_IGNORE}="1"
# MAC recovery/tool partitions which are useless on Linux
ENV{ID_PART_ENTRY_SCHEME}=="mac", \
ENV{ID_CDROM}=="?*", ENV{ID_FS_TYPE}=="udf", ENV{ID_FS_LABEL}=="WD*SmartWare", \
ENV{UDISKS_IGNORE}="1"
# recovery partitions
ENV{ID_FS_TYPE}=="ntfs|vfat", \
ENV{ID_FS_LABEL}=="Recovery|RECOVERY|Lenovo_Recovery|HP_RECOVERY|Recovery_Partition|DellUtility|DellRestore|IBM_SERVICE|SERVICEV001|SERVICEV002|SYSTEM_RESERVED|System_Reserved|WINRE_DRV|DIAGS|IntelRST", \
ENV{UDISKS_IGNORE}="1"
# read-only non-Linux software installer partitions
ENV{ID_VENDOR}=="Sony", ENV{ID_MODEL}=="PRS*Launcher", ENV{UDISKS_IGNORE}="1"
# non-Linux software
KERNEL=="sr*", ENV{ID_VENDOR}=="SanDisk", ENV{ID_MODEL}=="Cruzer", ENV{ID_FS_LABEL}=="U3_System", ENV{UDISKS_IGNORE}="1"
# Content created using isohybrid (typically used on CDs and USB
# sticks for bootable media) is a bit special insofar that the
# interesting content is on a DOS partition with type 0x00 ... which
# is hidden above. So undo this.
#
# See http://mjg59.dreamwidth.org/11285.html for more details
#
ENV{ID_PART_TABLE_TYPE}=="dos", ENV{ID_PART_ENTRY_TYPE}=="0x0", ENV{ID_PART_ENTRY_NUMBER}=="1", ENV{ID_FS_TYPE}=="iso9660|udf", ENV{UDISKS_IGNORE}="0"
`
type udisks2Interface struct{}
func (iface *udisks2Interface) Name() string {
return "udisks2"
}
func (iface *udisks2Interface) StaticInfo() interfaces.StaticInfo {
return interfaces.StaticInfo{
Summary: udisks2Summary,
BaseDeclarationSlots: udisks2BaseDeclarationSlots,
}
}
func (iface *udisks2Interface) DBusConnectedPlug(spec *dbus.Specification, plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) error {
spec.AddSnippet(udisks2ConnectedPlugDBus)
return nil
}
func (iface *udisks2Interface) DBusPermanentSlot(spec *dbus.Specification, slot *snap.SlotInfo) error {
spec.AddSnippet(udisks2PermanentSlotDBus)
return nil
}
func (iface *udisks2Interface) AppArmorConnectedPlug(spec *apparmor.Specification, plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) error {
old := "###SLOT_SECURITY_TAGS###"
new := slotAppLabelExpr(slot)
snippet := strings.Replace(udisks2ConnectedPlugAppArmor, old, new, -1)
spec.AddSnippet(snippet)
return nil
}
func (iface *udisks2Interface) AppArmorPermanentSlot(spec *apparmor.Specification, slot *snap.SlotInfo) error {
spec.AddSnippet(udisks2PermanentSlotAppArmor)
return nil
}
func (iface *udisks2Interface) UDevPermanentSlot(spec *udev.Specification, slot *snap.SlotInfo) error {
spec.AddSnippet(udisks2PermanentSlotUDev)
spec.TagDevice(`SUBSYSTEM=="block"`)
// # This tags all USB devices, so we'll use AppArmor to mediate specific access (eg, /dev/sd* and /dev/mmcblk*)
spec.TagDevice(`SUBSYSTEM=="usb"`)
return nil
}
func (iface *udisks2Interface) AppArmorConnectedSlot(spec *apparmor.Specification, plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) error {
old := "###PLUG_SECURITY_TAGS###"
new := plugAppLabelExpr(plug)
snippet := strings.Replace(udisks2ConnectedSlotAppArmor, old, new, -1)
spec.AddSnippet(snippet)
return nil
}
func (iface *udisks2Interface) SecCompPermanentSlot(spec *seccomp.Specification, slot *snap.SlotInfo) error {
spec.AddSnippet(udisks2PermanentSlotSecComp)
return nil
}
func (iface *udisks2Interface) AutoConnect(*snap.PlugInfo, *snap.SlotInfo) bool {
// allow what declarations allowed
return true
}
func init() {
registerIface(&udisks2Interface{})
}<|fim▁end|>
|
SUBSYSTEMS=="usb", ATTRS{idVendor}=="050d", ATTRS{idProduct}=="0248", ENV{ID_INSTANCE}=="0:0", ENV{ID_DRIVE_FLASH_CF}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="050d", ATTRS{idProduct}=="0248", ENV{ID_INSTANCE}=="0:1", ENV{ID_DRIVE_FLASH_MS}="1"
SUBSYSTEMS=="usb", ATTRS{idVendor}=="050d", ATTRS{idProduct}=="0248", ENV{ID_INSTANCE}=="0:2", ENV{ID_DRIVE_FLASH_SM}="1"
|
<|file_name|>attr-main-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![feature(main)]
pub fn main() {
panic!()
}
#[main]
fn foo() {
}<|fim▁end|>
|
//
|
<|file_name|>GNode.java<|end_file_name|><|fim▁begin|>/*
Galois, a framework to exploit amorphous data-parallelism in irregular
programs.
Copyright (C) 2010, The University of Texas at Austin. All rights reserved.
UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS SOFTWARE
AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY
PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY
WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF TRADE.
NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO THE USE OF THE
SOFTWARE OR DOCUMENTATION. Under no circumstances shall University be liable
for incidental, special, indirect, direct or consequential damages or loss of
profits, interruption of business, or related expenses which may arise from use
of Software or Documentation, including but not limited to those resulting from
defects in Software and/or Documentation, or loss or inaccuracy of data of any<|fim▁hole|> */
package galois.objects.graph;
import galois.objects.GObject;
import galois.objects.Lockable;
import galois.objects.Mappable;
import galois.runtime.Replayable;
/**
* A node in a graph.
*
* @param <N> the type of the data stored in each node
*/
public interface GNode<N> extends Replayable, Lockable, Mappable<GNode<N>>, GObject {
/**
* Retrieves the node data associated with the vertex
*
* All the Galois runtime actions (e.g., conflict detection) will be performed when
* the method is executed.
*
* @return the data contained in the node
*/
public N getData();
/**
* Retrieves the node data associated with the vertex. Equivalent to {@link #getData(byte, byte)}
* passing <code>flags</code> to both parameters.
*
* @param flags Galois runtime actions (e.g., conflict detection) that need to be executed
* upon invocation of this method. See {@link galois.objects.MethodFlag}
* @return the data contained in the node
*/
public N getData(byte flags);
/**
* Retrieves the node data associated with the vertex. For convenience, this method
* also calls {@link GObject#access(byte)} on the returned data.
*
* <p>Recall that the
* {@link GNode} object maintains information about the vertex and its connectivity
* in the graph. This is separate from the data itself. For example,
* <code>getData(MethodFlag.NONE, MethodFlag.SAVE_UNDO)</code>
* does not acquire an abstract lock on the {@link GNode} (perhaps because
* it was returned by a call to {@link GNode#map(util.fn.LambdaVoid)}), but it
* saves undo information on the returned data in case the iteration needs to
* be rolled back.
* </p>
*
* @param nodeFlags Galois runtime actions (e.g., conflict detection) that need to be executed
* upon invocation of this method on the <i>node itself</i>.
* See {@link galois.objects.MethodFlag}
* @param dataFlags Galois runtime actions (e.g., conflict detection) that need to be executed
* upon invocation of this method on the <i>data</i> contained in the node.
* See {@link galois.objects.MethodFlag}
* @return the data contained in the node
*/
public N getData(byte nodeFlags, byte dataFlags);
/**
* Sets the node data.
*
* All the Galois runtime actions (e.g., conflict detection) will be performed when
* the method is executed.
*
* @param d the data to be stored
* @return the old data associated with the node
*/
public N setData(N d);
/**
* Sets the node data.
*
* @param d the data to be stored
* @param flags Galois runtime actions (e.g., conflict detection) that need to be executed
* upon invocation of this method. See {@link galois.objects.MethodFlag}
* @return the old data associated with the node
*/
public N setData(N d, byte flags);
}<|fim▁end|>
|
kind.
File: GNode.java
|
<|file_name|>AboutPage.spec.js<|end_file_name|><|fim▁begin|>import React from 'react';
import {shallow} from 'enzyme';
import AboutPage from './AboutPage';
describe('<AboutPage />', () => {
it('should have a header called \'About\'', () => {<|fim▁hole|> const wrapper = shallow(<AboutPage />);
const actual = wrapper.find('h2').text();
const expected = 'About';
expect(actual).toEqual(expected);
});
it('should have a header with \'alt-header\' class', () => {
const wrapper = shallow(<AboutPage />);
const actual = wrapper.find('h2').prop('className');
const expected = 'alt-header';
expect(actual).toEqual(expected);
});
it('should link to an unknown route path', () => {
const wrapper = shallow(<AboutPage />);
const actual = wrapper.findWhere(n => n.prop('to') === '/badlink').length;
const expected = 1;
expect(actual).toEqual(expected);
});
});<|fim▁end|>
| |
<|file_name|>wx._gdi_.py<|end_file_name|><|fim▁begin|>def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'wx._gdi_.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
<|fim▁hole|><|fim▁end|>
|
__load()
del __load
|
<|file_name|>actions.py<|end_file_name|><|fim▁begin|># Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions related to task commands."""
import time
from drydock_provisioner.cli.action import CliAction
from drydock_provisioner.cli.const import TaskStatus
class TaskList(CliAction): # pylint: disable=too-few-public-methods
"""Action to list tasks."""
def __init__(self, api_client):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
"""
super().__init__(api_client)
self.logger.debug('TaskList action initialized')
def invoke(self):
"""Invoke execution of this action."""
return self.api_client.get_tasks()
class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
"""Action to create tasks against a design."""
def __init__(self,
api_client,
design_ref,
action_name=None,
node_names=None,
rack_names=None,
node_tags=None,
block=False,
poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string design_ref: The URI reference to design documents
:param string action_name: The name of the action being performed for this task
:param List node_names: The list of node names to restrict action application
:param List rack_names: The list of rack names to restrict action application<|fim▁hole|> :param List node_tags: The list of node tags to restrict action application
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.design_ref = design_ref
self.action_name = action_name
self.logger.debug('TaskCreate action initialized for design=%s',
design_ref)
self.logger.debug('Action is %s', action_name)
self.logger.debug("Node names = %s", node_names)
self.logger.debug("Rack names = %s", rack_names)
self.logger.debug("Node tags = %s", node_tags)
self.block = block
self.poll_interval = poll_interval
if any([node_names, rack_names, node_tags]):
filter_items = {'filter_type': 'union'}
if node_names is not None:
filter_items['node_names'] = node_names
if rack_names is not None:
filter_items['rack_names'] = rack_names
if node_tags is None:
filter_items['node_tags'] = node_tags
self.node_filter = {
'filter_set_type': 'intersection',
'filter_set': [filter_items]
}
else:
self.node_filter = None
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.create_task(
design_ref=self.design_ref,
task_action=self.action_name,
node_filter=self.node_filter)
if not self.block:
return task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.get('status',
'') in [TaskStatus.Complete, TaskStatus.Terminated]:
return task
class TaskShow(CliAction): # pylint: disable=too-few-public-methods
"""Action to show a task's detial."""
def __init__(self, api_client, task_id, block=False, poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string task_id: the UUID of the task to retrieve
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.task_id = task_id
self.logger.debug('TaskShow action initialized for task_id=%s,',
task_id)
self.block = block
self.poll_interval = poll_interval
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.get_task(task_id=self.task_id)
if not self.block:
return task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.status in [TaskStatus.Complete, TaskStatus.Terminated]:
return task<|fim▁end|>
| |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',<|fim▁hole|> url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
)<|fim▁end|>
|
url(r'', include('frontpage.urls')),
url(r'^auth/', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
|
<|file_name|>photopreviewdlg.cpp<|end_file_name|><|fim▁begin|>/*
photo_prev: Application to assist in sorting through photographs.
Copyright (C) 2008 Jeremiah LaRocco
This file is part of photo_prev.
photo_prev is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
photo_prev is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with photo_prev. If not, see <http://www.gnu.org/licenses/>.
*/
#include <QtGui>
#include "photopreviewdlg.h"
PhotoPreviewDlg::PhotoPreviewDlg(QWidget *parent) : QDialog(parent) {
ftw = false;
buildImageList(QDir::currentPath());
setWindowTitle(tr("Photo Previewer"));
// Setup GUI here...
QVBoxLayout *theLayout = new QVBoxLayout;
QHBoxLayout *input_layout = new QHBoxLayout;
QHBoxLayout *accept_layout = new QHBoxLayout;
QHBoxLayout *reject_layout = new QHBoxLayout;
QHBoxLayout *fname_layout = new QHBoxLayout;
QHBoxLayout *ar_buttons_layout = new QHBoxLayout;
QHBoxLayout *zoom_layout = new QHBoxLayout;
QHBoxLayout *extensions_layout = new QHBoxLayout;
QCompleter *input_completer = new QCompleter(this);
input_completer->setCompletionMode(QCompleter::InlineCompletion);
QDirModel *indm = new QDirModel(input_completer);
indm->index(QDir::currentPath());
indm->setFilter(QDir::AllDirs | QDir::Dirs
| QDir::NoDotAndDotDot | QDir::CaseSensitive);
input_completer->setModel(indm);
input_completer->setCompletionPrefix(QDir::currentPath());
input_dir_lbl = new QLabel(tr("Accept directory"));
input_dir_tb = new QLineEdit(QDir::currentPath());
input_dir_tb->setCompleter(input_completer);
input_dir_choose = new QPushButton(QIcon(":/icons/fileopen.png"),"");
input_layout->addWidget(input_dir_lbl);
input_layout->addWidget(input_dir_tb);
input_layout->addWidget(input_dir_choose);
QCompleter *accept_completer = new QCompleter(this);
accept_completer->setCompletionMode(QCompleter::InlineCompletion);
QDirModel *accdm = new QDirModel(accept_completer);
accdm->index(QDir::currentPath());
accdm->setFilter(QDir::AllDirs | QDir::Dirs
| QDir::NoDotAndDotDot | QDir::CaseSensitive);
accept_completer->setModel(accdm);
accept_completer->setCompletionPrefix(QDir::currentPath()+tr("/accept"));
QCompleter *reject_completer = new QCompleter(this);
reject_completer->setCompletionMode(QCompleter::InlineCompletion);
QDirModel *outdm = new QDirModel(accept_completer);
outdm->index(QDir::currentPath());
outdm->setFilter(QDir::AllDirs | QDir::Dirs
| QDir::NoDotAndDotDot | QDir::CaseSensitive);
reject_completer->setModel(outdm);
reject_completer->setCompletionPrefix(QDir::currentPath()+tr("/reject"));
accept_dir_lbl = new QLabel(tr("Accept directory"));
accept_dir_tb = new QLineEdit(QDir::currentPath() + tr("/accept"));
accept_dir_tb->setCompleter(accept_completer);
accept_dir_choose = new QPushButton(QIcon(":/icons/fileopen.png"),"");
accept_layout->addWidget(accept_dir_lbl);
accept_layout->addWidget(accept_dir_tb);
accept_layout->addWidget(accept_dir_choose);
reject_dir_lbl = new QLabel(tr("Reject directory"));
reject_dir_tb = new QLineEdit(QDir::currentPath() + tr("/reject"));
reject_dir_tb->setCompleter(reject_completer);
reject_dir_choose = new QPushButton(QIcon(":/icons/fileopen.png"),"");
reject_layout->addWidget(reject_dir_lbl);
reject_layout->addWidget(reject_dir_tb);
reject_layout->addWidget(reject_dir_choose);
fname_lbl_lbl = new QLabel(tr("File name: "));
fname_lbl = new QLabel(tr(""));
fname_layout->addWidget(fname_lbl_lbl);
fname_layout->addWidget(fname_lbl);
accept_image = new QPushButton(tr("Accept"));
reject_image = new QPushButton(tr("Reject"));
skip_image = new QPushButton(tr("Skip"));
ar_buttons_layout->addWidget(accept_image);
ar_buttons_layout->addWidget(reject_image);
ar_buttons_layout->addWidget(skip_image);
fit_to_window = new QPushButton(tr("Fit To Window"));
normal_size = new QPushButton(tr("Actual Size"));
zoom_lbl = new QLabel(tr("Zoom: 100%"));
zoom_layout->addWidget(fit_to_window);
zoom_layout->addWidget(normal_size);
zoom_layout->addWidget(zoom_lbl);
extensions_lbl = new QLabel(tr("Extensions to move"));
extensions_tb = new QLineEdit(tr(".jpg,.JPG,.cr2,.CR2"));
extensions_layout->addWidget(extensions_lbl);
extensions_layout->addWidget(extensions_tb);
img_lbl = new QLabel;
if (files.size()>0) {
updatePreview();
}
img_scroller = new QScrollArea;
img_scroller->setBackgroundRole(QPalette::Dark);
img_scroller->setWidget(img_lbl);
theLayout->addLayout(input_layout);
theLayout->addLayout(accept_layout);
theLayout->addLayout(reject_layout);
theLayout->addLayout(fname_layout);
theLayout->addWidget(img_scroller);
theLayout->addLayout(ar_buttons_layout);
theLayout->addLayout(zoom_layout);
theLayout->addLayout(extensions_layout);
setLayout(theLayout);
setupConnections();
update_extension_list();
}
void PhotoPreviewDlg::setupConnections() {
connect(input_dir_choose, SIGNAL(clicked()),
this, SLOT(chooseInputDir()));
connect(accept_dir_choose, SIGNAL(clicked()),
this, SLOT(chooseAcceptDir()));
connect(reject_dir_choose, SIGNAL(clicked()),
this, SLOT(chooseRejectDir()));
connect(accept_image, SIGNAL(clicked()),
this, SLOT(accept_button()));
connect(reject_image, SIGNAL(clicked()),
this, SLOT(reject_button()));
connect(skip_image, SIGNAL(clicked()),
this, SLOT(skip_button()));
connect(fit_to_window, SIGNAL(clicked()),
this, SLOT(ftw_button()));
connect(normal_size, SIGNAL(clicked()),
this, SLOT(actual_size_button()));
connect(extensions_tb, SIGNAL(textChanged(const QString &)),
this, SLOT(update_extension_list()));
connect(input_dir_tb, SIGNAL(editingFinished()),
this, SLOT(inputDirChange()));
}
void PhotoPreviewDlg::criticalErr(QString errMsg) {
QMessageBox::critical(this,
tr("Error"),
errMsg,
QMessageBox::Ok,
QMessageBox::NoButton,
QMessageBox::NoButton);
}
void PhotoPreviewDlg::chooseInputDir() {
QString fileName =
QFileDialog::getExistingDirectory(this,
tr("Choose an input directory"),
input_dir_tb->text());
if (fileName == tr("")) {
return;
}
input_dir_tb->setText(fileName);
buildImageList(fileName);
curIdx = 0;
updatePreview();
}
void PhotoPreviewDlg::inputDirChange() {
if (input_dir_tb->text() == tr("")) {
return;
}
if (QDir(input_dir_tb->text()).exists()) {
buildImageList(input_dir_tb->text());
curIdx = 0;
updatePreview();
}
}
void PhotoPreviewDlg::chooseAcceptDir() {
QString fileName =
QFileDialog::getExistingDirectory(this,
tr("Choose a directory"),
accept_dir_tb->text());
if (fileName == tr("")) {
return;
}
accept_dir_tb->setText(fileName);
}
void PhotoPreviewDlg::chooseRejectDir() {
QString fileName =
QFileDialog::getExistingDirectory(this,
tr("Choose a directory"),<|fim▁hole|> if (fileName == tr("")) {
return;
}
reject_dir_tb->setText(fileName);
}
void PhotoPreviewDlg::buildImageList(QString where) {
QDir imgDir(where);
QStringList filters;
filters += "*.jpg";
files = imgDir.entryList(filters, QDir::Files, QDir::IgnoreCase | QDir::Name);
curIdx = 0;
}
void PhotoPreviewDlg::accept_button() {
if (curIdx >= files.size()) {
return;
}
QDir id(input_dir_tb->text());
QDir od(accept_dir_tb->text());
foreach (QString ext, extensions) {
QString fn = files[curIdx];
int extIdx = fn.lastIndexOf(tr(".jpg"), -1, Qt::CaseInsensitive);
fn.replace(extIdx, 4, ext);
QFile tf(id.filePath(fn));
if (tf.exists()) {
tf.rename(od.filePath(fn));
}
}
files.erase(files.begin());
updatePreview();
}
void PhotoPreviewDlg::reject_button() {
if (curIdx >= files.size()) {
return;
}
QDir id(input_dir_tb->text());
QDir od(reject_dir_tb->text());
foreach (QString ext, extensions) {
QString fn = files[curIdx];
int extIdx = fn.lastIndexOf(tr(".jpg"), -1, Qt::CaseInsensitive);
fn.replace(extIdx, 4, ext);
QFile tf(id.filePath(fn));
if (tf.exists()) {
tf.rename(od.filePath(fn));
}
}
files.erase(files.begin());
updatePreview();
}
void PhotoPreviewDlg::skip_button() {
if (curIdx < files.size()-1) {
++curIdx;
updatePreview();
return;
}
curIdx = 0;
updatePreview();
}
void PhotoPreviewDlg::ftw_button() {
ftw = true;
updatePreview();
}
void PhotoPreviewDlg::actual_size_button() {
ftw = false;
updatePreview();
}
void PhotoPreviewDlg::update_extension_list() {
QStringList exts = extensions_tb->text().split(",");
if (exts.size()>0) {
extensions.clear();
QStringList::const_iterator iter;
for (iter = exts.constBegin(); iter != exts.end(); ++iter) {
extensions += *iter;
}
}
}
void PhotoPreviewDlg::updatePreview() {
if (curIdx >= files.size()) {
img_lbl->setPixmap(QPixmap());
img_lbl->setText(tr("None"));
fname_lbl->setText(tr("No files!"));
return;
}
QDir id(input_dir_tb->text());
QString infn = id.filePath(files[curIdx]);
if (!curImage.load(infn)) {
fname_lbl->setText(tr("Error!!"));
img_lbl->setText(tr("Error!!"));
criticalErr(tr("Error creating preview image for: ") + infn);
return;
}
img_lbl->setText(tr(""));
fname_lbl->setText(infn);
if (ftw) {
curImage = curImage.scaled( img_scroller->width()-4, img_scroller->height()-4, Qt::KeepAspectRatio, Qt::SmoothTransformation);
}
img_lbl->setPixmap(QPixmap::fromImage(curImage));
img_lbl->resize(curImage.size());
}<|fim▁end|>
|
reject_dir_tb->text());
|
<|file_name|>pull.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The pull thread.
//!
//! This module handles pulling all the pushed rumors from every member off a ZMQ socket.
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use protobuf;
use zmq;
use ZMQ_CONTEXT;
use server::Server;
use message::swim::{Rumor, Rumor_Type};
use trace::TraceKind;
<|fim▁hole|>/// Takes a reference to the server itself
pub struct Pull {
pub server: Server,
}
impl Pull {
/// Create a new Pull
pub fn new(server: Server) -> Pull {
Pull { server: server }
}
/// Run this thread. Creates a socket, binds to the `gossip_addr`, then processes messages as
/// they are received. Uses a ZMQ pull socket, so inbound messages are fair-queued.
pub fn run(&mut self) {
let socket = (**ZMQ_CONTEXT)
.as_mut()
.socket(zmq::PULL)
.expect("Failure to create the ZMQ pull socket");
socket
.set_linger(0)
.expect("Failure to set the ZMQ Pull socket to not linger");
socket
.set_tcp_keepalive(0)
.expect("Failure to set the ZMQ Pull socket to not use keepalive");
socket
.bind(&format!("tcp://{}", self.server.gossip_addr()))
.expect("Failure to bind the ZMQ Pull socket to the port");
'recv: loop {
if self.server.pause.load(Ordering::Relaxed) {
thread::sleep(Duration::from_millis(100));
continue;
}
let msg = match socket.recv_msg(0) {
Ok(msg) => msg,
Err(e) => {
error!("Error receiving message: {:?}", e);
continue 'recv;
}
};
let payload = match self.server.unwrap_wire(&msg) {
Ok(payload) => payload,
Err(e) => {
// NOTE: In the future, we might want to blacklist people who send us
// garbage all the time.
error!("Error parsing protobuf: {:?}", e);
continue;
}
};
let mut proto: Rumor = match protobuf::parse_from_bytes(&payload) {
Ok(proto) => proto,
Err(e) => {
error!("Error parsing protobuf: {:?}", e);
continue 'recv;
}
};
if self.server.check_blacklist(proto.get_from_id()) {
warn!(
"Not processing message from {} - it is blacklisted",
proto.get_from_id()
);
continue 'recv;
}
trace_it!(GOSSIP: &self.server, TraceKind::RecvRumor, proto.get_from_id(), &proto);
match proto.get_field_type() {
Rumor_Type::Member => {
let member = proto.mut_member().take_member().into();
let health = proto.mut_member().get_health().into();
self.server.insert_member_from_rumor(member, health);
}
Rumor_Type::Service => {
self.server.insert_service(proto.into());
}
Rumor_Type::ServiceConfig => {
self.server.insert_service_config(proto.into());
}
Rumor_Type::ServiceFile => {
self.server.insert_service_file(proto.into());
}
Rumor_Type::Election => {
self.server.insert_election(proto.into());
}
Rumor_Type::ElectionUpdate => {
self.server.insert_update_election(proto.into());
}
Rumor_Type::Departure => {
self.server.insert_departure(proto.into());
}
Rumor_Type::Fake | Rumor_Type::Fake2 => {
debug!("Nothing to do for fake rumor types")
}
}
}
}
}<|fim▁end|>
| |
<|file_name|>ExifTagWriteOperation.java<|end_file_name|><|fim▁begin|>/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ExifTagWriteOperation.java
* Copyright (C) 2019 University of Waikato, Hamilton, NZ
*/
package adams.flow.transformer.exiftagoperation;
/**
* Interface for EXIF tag write operations.<|fim▁hole|>public interface ExifTagWriteOperation<I, O>
extends ExifTagOperation<I, O> {
}<|fim▁end|>
|
*
* @author FracPete (fracpete at waikato dot ac dot nz)
*/
|
<|file_name|>solution9.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
found = True
while found:
input_string = input('Please give me some digits... \n')
found = False
for character in input_string:
if character not in digits:
# we have a non digit!
print('Error, you gave me non digits')
found = True<|fim▁hole|> break
print('starting real work on', input_string)<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.http import HttpResponseRedirect, Http404
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required<|fim▁hole|>from django.contrib.auth.models import User
@login_required
def switch_user(request, username):
if request.user.is_superuser:
try:
user = get_user_model().objects.get(username=username)
auth.login(request, user, backend='django.contrib.auth.backends.ModelBackend')
from_url = request.META.get("HTTP_ORIGIN", "")
if not from_url:
from_url = request.META.get("HTTP_HOST", "")
from_url = from_url and ("http://" + from_url) or "/"
return HttpResponseRedirect(from_url)
except User.DoesNotExist:
pass
raise Http404<|fim▁end|>
| |
<|file_name|>plotting.py<|end_file_name|><|fim▁begin|>"""
Demo of the histogram (hist) function used to plot a cumulative distribution.
"""
import numpy as np
import matplotlib.pyplot as plt<|fim▁hole|>
def plot_loss_over_time(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
events = order_events(lines)
packets_sent = 0
packets_received = 0
time_stamps = []
packet_percent = []
for event in events:
time, type = event
if type == 's':
packets_sent += 1
else:
packets_received += 1
time_stamps.append(time)
packet_percent.append(float(packets_received)/packets_sent)
plt.plot(time_stamps, packet_percent)
plt.show()
def order_events(lines):
events = []
for line in lines:
split_line = line.split(" ")
send_time = int(split_line[0])
recv_time = int(split_line[1])
events.append((send_time, 's'))
events.append((recv_time, 'r'))
events.sort()
return events
plot_loss_over_time("tcp_160_10.out")
# 1. time on x-axis, percent packets received on y-axis
# 2. loss percentage on x-axis, 90 percentile latency on y-axis
# time packet sent, time packet received in nanoseconds<|fim▁end|>
|
from matplotlib import mlab
|
<|file_name|>validate.rs<|end_file_name|><|fim▁begin|>//! Validates the MIR to ensure that invariants are upheld.
use super::MirPass;
use rustc_index::bit_set::BitSet;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::traversal;
use rustc_middle::mir::visit::{PlaceContext, Visitor};
use rustc_middle::mir::{
AggregateKind, BasicBlock, Body, BorrowKind, Local, Location, MirPhase, Operand, PlaceElem,
PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement, StatementKind, Terminator,
TerminatorKind, START_BLOCK,
};
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, TypeFoldable};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::AlwaysLiveLocals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
use rustc_target::abi::Size;
#[derive(Copy, Clone, Debug)]
enum EdgeKind {
Unwind,
Normal,
}
pub struct Validator {
/// Describes at which point in the pipeline this validation is happening.
pub when: String,
/// The phase for which we are upholding the dialect. If the given phase forbids a specific
/// element, this validator will now emit errors if that specific element is encountered.
/// Note that phases that change the dialect cause all *following* phases to check the
/// invariants of the new dialect. A phase that changes dialects never checks the new invariants
/// itself.
pub mir_phase: MirPhase,
}
impl<'tcx> MirPass<'tcx> for Validator {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let def_id = body.source.def_id();
let param_env = tcx.param_env(def_id);
let mir_phase = self.mir_phase;
let always_live_locals = AlwaysLiveLocals::new(body);
let storage_liveness = MaybeStorageLive::new(always_live_locals)
.into_engine(tcx, body)
.iterate_to_fixpoint()
.into_results_cursor(body);
TypeChecker {
when: &self.when,
body,
tcx,
param_env,
mir_phase,
reachable_blocks: traversal::reachable_as_bitset(body),
storage_liveness,
place_cache: Vec::new(),
}
.visit_body(body);
}
}<|fim▁hole|>///
/// The point of this function is to approximate "equal up to subtyping". However,
/// the approximation is incorrect as variance is ignored.
pub fn equal_up_to_regions(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
src: Ty<'tcx>,
dest: Ty<'tcx>,
) -> bool {
// Fast path.
if src == dest {
return true;
}
// Normalize lifetimes away on both sides, then compare.
let param_env = param_env.with_reveal_all_normalized(tcx);
let normalize = |ty: Ty<'tcx>| {
tcx.normalize_erasing_regions(
param_env,
ty.fold_with(&mut BottomUpFolder {
tcx,
// FIXME: We erase all late-bound lifetimes, but this is not fully correct.
// If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
// this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
// since one may have an `impl SomeTrait for fn(&32)` and
// `impl SomeTrait for fn(&'static u32)` at the same time which
// specify distinct values for Assoc. (See also #56105)
lt_op: |_| tcx.lifetimes.re_erased,
// Leave consts and types unchanged.
ct_op: |ct| ct,
ty_op: |ty| ty,
})
.into_ok(),
)
};
tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
}
struct TypeChecker<'a, 'tcx> {
when: &'a str,
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
mir_phase: MirPhase,
reachable_blocks: BitSet<BasicBlock>,
storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
place_cache: Vec<PlaceRef<'tcx>>,
}
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
fn fail(&self, location: Location, msg: impl AsRef<str>) {
let span = self.body.source_info(location).span;
// We use `delay_span_bug` as we might see broken MIR when other errors have already
// occurred.
self.tcx.sess.diagnostic().delay_span_bug(
span,
&format!(
"broken MIR in {:?} ({}) at {:?}:\n{}",
self.body.source.instance,
self.when,
location,
msg.as_ref()
),
);
}
fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
if bb == START_BLOCK {
self.fail(location, "start block must not have predecessors")
}
if let Some(bb) = self.body.basic_blocks().get(bb) {
let src = self.body.basic_blocks().get(location.block).unwrap();
match (src.is_cleanup, bb.is_cleanup, edge_kind) {
// Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
(false, false, EdgeKind::Normal)
// Non-cleanup blocks can jump to cleanup blocks along unwind edges
| (false, true, EdgeKind::Unwind)
// Cleanup blocks can jump to cleanup blocks along non-unwind edges
| (true, true, EdgeKind::Normal) => {}
// All other jumps are invalid
_ => {
self.fail(
location,
format!(
"{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
edge_kind,
bb,
src.is_cleanup,
bb.is_cleanup,
)
)
}
}
} else {
self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
}
}
/// Check if src can be assigned into dest.
/// This is not precise, it will accept some incorrect assignments.
fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
// Fast path before we normalize.
if src == dest {
// Equal types, all is good.
return true;
}
// Normalize projections and things like that.
// FIXME: We need to reveal_all, as some optimizations change types in ways
// that require unfolding opaque types.
let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
let src = self.tcx.normalize_erasing_regions(param_env, src);
let dest = self.tcx.normalize_erasing_regions(param_env, dest);
// Type-changing assignments can happen when subtyping is used. While
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences. So we compare ignoring lifetimes.
equal_up_to_regions(self.tcx, param_env, src, dest)
}
}
impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
if self.body.local_decls.get(*local).is_none() {
self.fail(
location,
format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
);
}
if self.reachable_blocks.contains(location.block) && context.is_use() {
// Uses of locals must occur while the local's storage is allocated.
self.storage_liveness.seek_after_primary_effect(location);
let locals_with_storage = self.storage_liveness.get();
if !locals_with_storage.contains(*local) {
self.fail(location, format!("use of local {:?}, which has no storage here", local));
}
}
}
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
// This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
if self.tcx.sess.opts.debugging_opts.validate_mir {
// `Operand::Copy` is only supposed to be used with `Copy` types.
if let Operand::Copy(place) = operand {
let ty = place.ty(&self.body.local_decls, self.tcx).ty;
let span = self.body.source_info(location).span;
if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
}
}
}
self.super_operand(operand, location);
}
fn visit_projection_elem(
&mut self,
local: Local,
proj_base: &[PlaceElem<'tcx>],
elem: PlaceElem<'tcx>,
context: PlaceContext,
location: Location,
) {
if let ProjectionElem::Index(index) = elem {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
self.fail(location, format!("bad index ({:?} != usize)", index_ty))
}
}
self.super_projection_elem(local, proj_base, elem, context, location);
}
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
match &statement.kind {
StatementKind::Assign(box (dest, rvalue)) => {
// LHS and RHS of the assignment must have the same type.
let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
if !self.mir_assign_valid_types(right_ty, left_ty) {
self.fail(
location,
format!(
"encountered `{:?}` with incompatible types:\n\
left-hand side has type: {}\n\
right-hand side has type: {}",
statement.kind, left_ty, right_ty,
),
);
}
match rvalue {
// The sides of an assignment must not alias. Currently this just checks whether the places
// are identical.
Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) => {
if dest == src {
self.fail(
location,
"encountered `Assign` statement with overlapping memory",
);
}
}
// The deaggregator currently does not deaggreagate arrays.
// So for now, we ignore them here.
Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
// All other aggregates must be gone after some phases.
Rvalue::Aggregate(box kind, _) => {
if self.mir_phase > MirPhase::DropLowering
&& !matches!(kind, AggregateKind::Generator(..))
{
// Generators persist until the state machine transformation, but all
// other aggregates must have been lowered.
self.fail(
location,
format!("{:?} have been lowered to field assignments", rvalue),
)
} else if self.mir_phase > MirPhase::GeneratorLowering {
// No more aggregates after drop and generator lowering.
self.fail(
location,
format!("{:?} have been lowered to field assignments", rvalue),
)
}
}
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
if self.mir_phase > MirPhase::DropLowering {
self.fail(
location,
"`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
);
}
}
_ => {}
}
}
StatementKind::AscribeUserType(..) => {
if self.mir_phase > MirPhase::DropLowering {
self.fail(
location,
"`AscribeUserType` should have been removed after drop lowering phase",
);
}
}
StatementKind::FakeRead(..) => {
if self.mir_phase > MirPhase::DropLowering {
self.fail(
location,
"`FakeRead` should have been removed after drop lowering phase",
);
}
}
StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
ref src,
ref dst,
ref count,
}) => {
let src_ty = src.ty(&self.body.local_decls, self.tcx);
let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
src_deref.ty
} else {
self.fail(
location,
format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
);
return;
};
let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
dst_deref.ty
} else {
self.fail(
location,
format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
);
return;
};
// since CopyNonOverlapping is parametrized by 1 type,
// we only need to check that they are equal and not keep an extra parameter.
if op_src_ty != op_dst_ty {
self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
}
let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
if op_cnt_ty != self.tcx.types.usize {
self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
}
}
StatementKind::SetDiscriminant { .. }
| StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
| StatementKind::LlvmInlineAsm(..)
| StatementKind::Retag(_, _)
| StatementKind::Coverage(_)
| StatementKind::Nop => {}
}
self.super_statement(statement, location);
}
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
match &terminator.kind {
TerminatorKind::Goto { target } => {
self.check_edge(location, *target, EdgeKind::Normal);
}
TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
let ty = discr.ty(&self.body.local_decls, self.tcx);
if ty != *switch_ty {
self.fail(
location,
format!(
"encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
ty, switch_ty,
),
);
}
let target_width = self.tcx.sess.target.pointer_width;
let size = Size::from_bits(match switch_ty.kind() {
ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
ty::Char => 32,
ty::Bool => 1,
other => bug!("unhandled type: {:?}", other),
});
for (value, target) in targets.iter() {
if Scalar::<()>::try_from_uint(value, size).is_none() {
self.fail(
location,
format!("the value {:#x} is not a proper {:?}", value, switch_ty),
)
}
self.check_edge(location, target, EdgeKind::Normal);
}
self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
}
TerminatorKind::Drop { target, unwind, .. } => {
self.check_edge(location, *target, EdgeKind::Normal);
if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind);
}
}
TerminatorKind::DropAndReplace { target, unwind, .. } => {
if self.mir_phase > MirPhase::DropLowering {
self.fail(
location,
"`DropAndReplace` is not permitted to exist after drop elaboration",
);
}
self.check_edge(location, *target, EdgeKind::Normal);
if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind);
}
}
TerminatorKind::Call { func, args, destination, cleanup, .. } => {
let func_ty = func.ty(&self.body.local_decls, self.tcx);
match func_ty.kind() {
ty::FnPtr(..) | ty::FnDef(..) => {}
_ => self.fail(
location,
format!("encountered non-callable type {} in `Call` terminator", func_ty),
),
}
if let Some((_, target)) = destination {
self.check_edge(location, *target, EdgeKind::Normal);
}
if let Some(cleanup) = cleanup {
self.check_edge(location, *cleanup, EdgeKind::Unwind);
}
// The call destination place and Operand::Move place used as an argument might be
// passed by a reference to the callee. Consequently they must be non-overlapping.
// Currently this simply checks for duplicate places.
self.place_cache.clear();
if let Some((destination, _)) = destination {
self.place_cache.push(destination.as_ref());
}
for arg in args {
if let Operand::Move(place) = arg {
self.place_cache.push(place.as_ref());
}
}
let all_len = self.place_cache.len();
self.place_cache.sort_unstable();
self.place_cache.dedup();
let has_duplicates = all_len != self.place_cache.len();
if has_duplicates {
self.fail(
location,
format!(
"encountered overlapping memory in `Call` terminator: {:?}",
terminator.kind,
),
);
}
}
TerminatorKind::Assert { cond, target, cleanup, .. } => {
let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
if cond_ty != self.tcx.types.bool {
self.fail(
location,
format!(
"encountered non-boolean condition of type {} in `Assert` terminator",
cond_ty
),
);
}
self.check_edge(location, *target, EdgeKind::Normal);
if let Some(cleanup) = cleanup {
self.check_edge(location, *cleanup, EdgeKind::Unwind);
}
}
TerminatorKind::Yield { resume, drop, .. } => {
if self.mir_phase > MirPhase::GeneratorLowering {
self.fail(location, "`Yield` should have been replaced by generator lowering");
}
self.check_edge(location, *resume, EdgeKind::Normal);
if let Some(drop) = drop {
self.check_edge(location, *drop, EdgeKind::Normal);
}
}
TerminatorKind::FalseEdge { real_target, imaginary_target } => {
self.check_edge(location, *real_target, EdgeKind::Normal);
self.check_edge(location, *imaginary_target, EdgeKind::Normal);
}
TerminatorKind::FalseUnwind { real_target, unwind } => {
self.check_edge(location, *real_target, EdgeKind::Normal);
if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind);
}
}
TerminatorKind::InlineAsm { destination, .. } => {
if let Some(destination) = destination {
self.check_edge(location, *destination, EdgeKind::Normal);
}
}
// Nothing to validate for these.
TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop => {}
}
self.super_terminator(terminator, location);
}
fn visit_source_scope(&mut self, scope: &SourceScope) {
if self.body.source_scopes.get(*scope).is_none() {
self.tcx.sess.diagnostic().delay_span_bug(
self.body.span,
&format!(
"broken MIR in {:?} ({}):\ninvalid source scope {:?}",
self.body.source.instance, self.when, scope,
),
);
}
}
}<|fim▁end|>
|
/// Returns whether the two types are equal up to lifetimes.
/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
|
<|file_name|>inject-script.spec.ts<|end_file_name|><|fim▁begin|>import { injectCoreHtml } from './inject-scripts';
describe('Inject Scripts', () => {
describe('injectCoreHtml', () => {
it('should replace an existed injected script tag', () => {
const inputHtml = '' +
'<html>\n' +
'<head>\n' +
' <script data-ionic="inject">\n' +
' alert(11111);\n' +
' </script>\n' +
'</head>\n' +
'<body>\n' +
'</body>\n' +
'</html>';
const output = injectCoreHtml(inputHtml, ' <script data-ionic="inject">\n' +
' alert(55555);\n' +
' </script>');
expect(output).toEqual(
'<html>\n' +
'<head>\n' +
' <script data-ionic="inject">\n' +
' alert(55555);\n' +
' </script>\n' +
'</head>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
it('should replace only one existed injected script tag', () => {
const inputHtml = '' +
'<html>\n' +
'<head>\n' +
' <script data-ionic="inject">\n' +
' alert(11111);\n' +
' </script>\n' +
' <script>\n' +
' alert(222);\n' +
' </script>\n' +
'</head>\n' +
'<body>\n' +<|fim▁hole|>
const output = injectCoreHtml(inputHtml, ' <script data-ionic="inject">\n' +
' alert(55555);\n' +
' </script>');
expect(output).toEqual(
'<html>\n' +
'<head>\n' +
' <script data-ionic="inject">\n' +
' alert(55555);\n' +
' </script>\n' +
' <script>\n' +
' alert(222);\n' +
' </script>\n' +
'</head>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
it('should add script to top of file when no html tag', () => {
const inputHtml = '' +
'<body>\n' +
'</body>';
const output = injectCoreHtml(inputHtml, '<injected></injected>');
expect(output).toEqual(
'<injected></injected>\n' +
'<body>\n' +
'</body>');
});
it('should add script below <html> with attributes', () => {
const inputHtml = '' +
'<html dir="rtl">\n' +
'<body>\n' +
'</body>\n' +
'</html>';
const output = injectCoreHtml(inputHtml, '<injected></injected>');
expect(output).toEqual(
'<html dir="rtl">\n' +
'<injected></injected>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
it('should add script below <html> when no head tag', () => {
const inputHtml = '' +
'<html>\n' +
'<body>\n' +
'</body>\n' +
'</html>';
const output = injectCoreHtml(inputHtml, '<injected></injected>');
expect(output).toEqual(
'<html>\n' +
'<injected></injected>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
it('should add script below <head>', () => {
const inputHtml = '' +
'<html>\n' +
'<head>\n' +
'</head>\n' +
'<body>\n' +
'</body>\n' +
'</html>';
const output = injectCoreHtml(inputHtml, '<injected></injected>');
expect(output).toEqual(
'<html>\n' +
'<head>\n' +
'<injected></injected>\n' +
'</head>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
it('should add script below <head> with attributes and all caps tag', () => {
const inputHtml = '' +
'<html>\n' +
'<HEAD data-attr="yup">\n' +
'</HEAD>\n' +
'<body>\n' +
'</body>\n' +
'</html>';
const output = injectCoreHtml(inputHtml, '<injected></injected>');
expect(output).toEqual(
'<html>\n' +
'<HEAD data-attr="yup">\n' +
'<injected></injected>\n' +
'</HEAD>\n' +
'<body>\n' +
'</body>\n' +
'</html>');
});
});
});<|fim▁end|>
|
'</body>\n' +
'</html>';
|
<|file_name|>foreign.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::link;
use llvm::{ValueRef, CallConv, get_param};
use llvm;
use middle::weak_lang_items;
use rustc::ast_map;
use trans::attributes;
use trans::base::{llvm_linkage_by_name, push_ctxt};
use trans::base;
use trans::build::*;
use trans::cabi;
use trans::common::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of::*;
use trans::type_of;
use middle::ty::{self, Ty};
use middle::subst::Substs;
use std::cmp;
use libc::c_uint;
use syntax::abi::{Cdecl, Aapcs, C, Win64, Abi};
use syntax::abi::{RustIntrinsic, Rust, RustCall, Stdcall, Fastcall, System};
use syntax::codemap::Span;
use syntax::parse::token::{InternedString, special_idents};
use syntax::parse::token;
use syntax::ast;
use syntax::attr;
use syntax::print::pprust;
use util::ppaux::Repr;
///////////////////////////////////////////////////////////////////////////
// Type definitions
struct ForeignTypes<'tcx> {
/// Rust signature of the function
fn_sig: ty::FnSig<'tcx>,
/// Adapter object for handling native ABI rules (trust me, you
/// don't want to know)
fn_ty: cabi::FnType,
/// LLVM types that will appear on the foreign function
llsig: LlvmSignature,
}
struct LlvmSignature {
// LLVM versions of the types of this function's arguments.
llarg_tys: Vec<Type> ,
// LLVM version of the type that this function returns. Note that
// this *may not be* the declared return type of the foreign
// function, because the foreign function may opt to return via an
// out pointer.
llret_ty: Type,
/// True if there is a return value (not bottom, not unit)
ret_def: bool,
}
///////////////////////////////////////////////////////////////////////////
// Calls to external functions
pub fn llvm_calling_convention(ccx: &CrateContext,
abi: Abi) -> CallConv {
match ccx.sess().target.target.adjust_abi(abi) {
RustIntrinsic => {
// Intrinsics are emitted at the call site
ccx.sess().bug("asked to register intrinsic fn");
}
Rust => {
// FIXME(#3678) Implement linking to foreign fns with Rust ABI
ccx.sess().unimpl("foreign functions with Rust ABI");
}
RustCall => {
// FIXME(#3678) Implement linking to foreign fns with Rust ABI
ccx.sess().unimpl("foreign functions with RustCall ABI");
}
// It's the ABI's job to select this, not us.
System => ccx.sess().bug("system abi should be selected elsewhere"),
Stdcall => llvm::X86StdcallCallConv,
Fastcall => llvm::X86FastcallCallConv,
C => llvm::CCallConv,
Win64 => llvm::X86_64_Win64,
// These API constants ought to be more specific...
Cdecl => llvm::CCallConv,
Aapcs => llvm::CCallConv,
}
}
pub fn register_static(ccx: &CrateContext,
foreign_item: &ast::ForeignItem) -> ValueRef {
let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
let llty = type_of::type_of(ccx, ty);
let ident = link_name(foreign_item);
match attr::first_attr_value_str_by_name(&foreign_item.attrs,
"linkage") {
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
Some(name) => {
let linkage = match llvm_linkage_by_name(&name) {
Some(linkage) => linkage,
None => {
ccx.sess().span_fatal(foreign_item.span,
"invalid linkage specified");
}
};
let llty2 = match ty.sty {
ty::ty_ptr(ref mt) => type_of::type_of(ccx, mt.ty),
_ => {
ccx.sess().span_fatal(foreign_item.span,
"must have type `*T` or `*mut T`");
}
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = declare::declare_global(ccx, &ident[..], llty2);
llvm::SetLinkage(g1, linkage);
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&ident);
let g2 = declare::define_global(ccx, &real_name[..], llty).unwrap_or_else(||{
ccx.sess().span_fatal(foreign_item.span,
&format!("symbol `{}` is already defined", ident))
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
g2
}
}
None => // Generate an external declaration.
declare::declare_global(ccx, &ident[..], llty),
}
}
// only use this for foreign function ABIs and glue, use `get_extern_rust_fn` for Rust functions
pub fn get_extern_fn(ccx: &CrateContext,
externs: &mut ExternMap,
name: &str,
cc: llvm::CallConv,
ty: Type,
output: Ty)
-> ValueRef {
match externs.get(name) {
Some(n) => return *n,
None => {}
}
let f = declare::declare_fn(ccx, name, cc, ty, ty::FnConverging(output));
externs.insert(name.to_string(), f);
f
}
/// Registers a foreign function found in a library. Just adds a LLVM global.
pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
abi: Abi, fty: Ty<'tcx>,
name: &str) -> ValueRef {
debug!("register_foreign_item_fn(abi={}, \
ty={}, \
name={})",
abi.repr(ccx.tcx()),
fty.repr(ccx.tcx()),
name);
let cc = llvm_calling_convention(ccx, abi);
// Register the function as a C extern fn
let tys = foreign_types_for_fn_ty(ccx, fty);
// Make sure the calling convention is right for variadic functions
// (should've been caught if not in typeck)
if tys.fn_sig.variadic {
assert!(cc == llvm::CCallConv);
}
// Create the LLVM value for the C extern fn
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), name, cc, llfn_ty, fty);
add_argument_attributes(&tys, llfn);
llfn
}
<|fim▁hole|>/// from the Rust argument passing rules to the native rules.
///
/// # Parameters
///
/// - `callee_ty`: Rust type for the function we are calling
/// - `llfn`: the function pointer we are calling
/// - `llretptr`: where to store the return value of the function
/// - `llargs_rust`: a list of the argument values, prepared
/// as they would be if calling a Rust function
/// - `passed_arg_tys`: Rust type for the arguments. Normally we
/// can derive these from callee_ty but in the case of variadic
/// functions passed_arg_tys will include the Rust type of all
/// the arguments including the ones not specified in the fn's signature.
pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
llfn: ValueRef,
llretptr: ValueRef,
llargs_rust: &[ValueRef],
passed_arg_tys: Vec<Ty<'tcx>>,
call_debug_loc: DebugLoc)
-> Block<'blk, 'tcx>
{
let ccx = bcx.ccx();
let tcx = bcx.tcx();
debug!("trans_native_call(callee_ty={}, \
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
ccx.tn().val_to_string(llfn),
ccx.tn().val_to_string(llretptr));
let (fn_abi, fn_sig) = match callee_ty.sty {
ty::ty_bare_fn(_, ref fn_ty) => (fn_ty.abi, &fn_ty.sig),
_ => ccx.sess().bug("trans_native_call called on non-function type")
};
let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
let llsig = foreign_signature(ccx, &fn_sig, &passed_arg_tys[..]);
let fn_type = cabi::compute_abi_info(ccx,
&llsig.llarg_tys,
llsig.llret_ty,
llsig.ret_def);
let arg_tys: &[cabi::ArgType] = &fn_type.arg_tys;
let mut llargs_foreign = Vec::new();
// If the foreign ABI expects return value by pointer, supply the
// pointer that Rust gave us. Sometimes we have to bitcast
// because foreign fns return slightly different (but equivalent)
// views on the same type (e.g., i64 in place of {i32,i32}).
if fn_type.ret_ty.is_indirect() {
match fn_type.ret_ty.cast {
Some(ty) => {
let llcastedretptr =
BitCast(bcx, llretptr, ty.ptr_to());
llargs_foreign.push(llcastedretptr);
}
None => {
llargs_foreign.push(llretptr);
}
}
}
for (i, &llarg_rust) in llargs_rust.iter().enumerate() {
let mut llarg_rust = llarg_rust;
if arg_tys[i].is_ignore() {
continue;
}
// Does Rust pass this argument by pointer?
let rust_indirect = type_of::arg_is_indirect(ccx, passed_arg_tys[i]);
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
ccx.tn().val_to_string(llarg_rust),
rust_indirect,
ccx.tn().type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
if !rust_indirect {
let scratch =
base::alloca(bcx,
type_of::type_of(ccx, passed_arg_tys[i]),
"__arg");
base::store_ty(bcx, llarg_rust, scratch, passed_arg_tys[i]);
llarg_rust = scratch;
}
debug!("llarg_rust={} (after indirection)",
ccx.tn().val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
Some(ty) => llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to()),
None => ()
}
debug!("llarg_rust={} (after casting)",
ccx.tn().val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
let llarg_foreign = if foreign_indirect {
llarg_rust
} else {
if ty::type_is_bool(passed_arg_tys[i]) {
let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False);
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
Load(bcx, llarg_rust)
}
};
debug!("argument {}, llarg_foreign={}",
i, ccx.tn().val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
Some(ty) => llargs_foreign.push(C_undef(ty)),
None => ()
}
llargs_foreign.push(llarg_foreign);
}
let cc = llvm_calling_convention(ccx, fn_abi);
// A function pointer is called without the declaration available, so we have to apply
// any attributes with ABI implications directly to the call instruction.
let mut attrs = llvm::AttrBuilder::new();
// Add attributes that are always applicable, independent of the concrete foreign ABI
if fn_type.ret_ty.is_indirect() {
let llret_sz = machine::llsize_of_real(ccx, fn_type.ret_ty.ty);
// The outptr can be noalias and nocapture because it's entirely
// invisible to the program. We also know it's nonnull as well
// as how many bytes we can dereference
attrs.arg(1, llvm::Attribute::NoAlias)
.arg(1, llvm::Attribute::NoCapture)
.arg(1, llvm::DereferenceableAttribute(llret_sz));
};
// Add attributes that depend on the concrete foreign ABI
let mut arg_idx = if fn_type.ret_ty.is_indirect() { 1 } else { 0 };
match fn_type.ret_ty.attr {
Some(attr) => { attrs.arg(arg_idx, attr); },
_ => ()
}
arg_idx += 1;
for arg_ty in &fn_type.arg_tys {
if arg_ty.is_ignore() {
continue;
}
// skip padding
if arg_ty.pad.is_some() { arg_idx += 1; }
if let Some(attr) = arg_ty.attr {
attrs.arg(arg_idx, attr);
}
arg_idx += 1;
}
let llforeign_retval = CallWithConv(bcx,
llfn,
&llargs_foreign[..],
cc,
Some(attrs),
call_debug_loc);
// If the function we just called does not use an outpointer,
// store the result into the rust outpointer. Cast the outpointer
// type to match because some ABIs will use a different type than
// the Rust type. e.g., a {u32,u32} struct could be returned as
// u64.
if llsig.ret_def && !fn_type.ret_ty.is_indirect() {
let llrust_ret_ty = llsig.llret_ty;
let llforeign_ret_ty = match fn_type.ret_ty.cast {
Some(ty) => ty,
None => fn_type.ret_ty.ty
};
debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
match fn_sig.output {
ty::FnConverging(result_ty) => {
base::store_ty(bcx, llforeign_retval, llretptr, result_ty)
}
ty::FnDiverging => {}
}
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
Store(bcx, llforeign_retval, llscratch);
let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to());
let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to());
let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty);
let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty);
let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty);
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size), llalign as u32);
}
}
return bcx;
}
// feature gate SIMD types in FFI, since I (huonw) am not sure the
// ABIs are handled at all correctly.
fn gate_simd_ffi(tcx: &ty::ctxt, decl: &ast::FnDecl, ty: &ty::BareFnTy) {
if !tcx.sess.features.borrow().simd_ffi {
let check = |ast_ty: &ast::Ty, ty: ty::Ty| {
if ty::type_is_simd(tcx, ty) {
tcx.sess.span_err(ast_ty.span,
&format!("use of SIMD type `{}` in FFI is highly experimental and \
may result in invalid code",
pprust::ty_to_string(ast_ty)));
tcx.sess.fileline_help(ast_ty.span,
"add #![feature(simd_ffi)] to the crate attributes to enable");
}
};
let sig = &ty.sig.0;
for (input, ty) in decl.inputs.iter().zip(sig.inputs.iter()) {
check(&*input.ty, *ty)
}
if let ast::Return(ref ty) = decl.output {
check(&**ty, sig.output.unwrap())
}
}
}
pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &ast::ForeignMod) {
let _icx = push_ctxt("foreign::trans_foreign_mod");
for foreign_item in &foreign_mod.items {
let lname = link_name(&**foreign_item);
if let ast::ForeignItemFn(ref decl, _) = foreign_item.node {
match foreign_mod.abi {
Rust | RustIntrinsic => {}
abi => {
let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
match ty.sty {
ty::ty_bare_fn(_, bft) => gate_simd_ffi(ccx.tcx(), &**decl, bft),
_ => ccx.tcx().sess.span_bug(foreign_item.span,
"foreign fn's sty isn't a bare_fn_ty?")
}
let llfn = register_foreign_item_fn(ccx, abi, ty, &lname);
attributes::from_fn_attrs(ccx, &foreign_item.attrs, llfn);
// Unlike for other items, we shouldn't call
// `base::update_linkage` here. Foreign items have
// special linkage requirements, which are handled
// inside `foreign::register_*`.
}
}
}
ccx.item_symbols().borrow_mut().insert(foreign_item.id,
lname.to_string());
}
}
///////////////////////////////////////////////////////////////////////////
// Rust functions with foreign ABIs
//
// These are normal Rust functions defined with foreign ABIs. For
// now, and perhaps forever, we translate these using a "layer of
// indirection". That is, given a Rust declaration like:
//
// extern "C" fn foo(i: u32) -> u32 { ... }
//
// we will generate a function like:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
//
// #[inline_always]
// void foo0(uint32_t *r, void *env, uint32_t i) { ... }
//
// Here the (internal) `foo0` function follows the Rust ABI as normal,
// where the `foo` function follows the C ABI. We rely on LLVM to
// inline the one into the other. Of course we could just generate the
// correct code in the first place, but this is much simpler.
pub fn decl_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
name: &str)
-> ValueRef {
let tys = foreign_types_for_fn_ty(ccx, t);
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let cconv = match t.sty {
ty::ty_bare_fn(_, ref fn_ty) => {
llvm_calling_convention(ccx, fn_ty.abi)
}
_ => panic!("expected bare fn in decl_rust_fn_with_foreign_abi")
};
let llfn = declare::declare_fn(ccx, name, cconv, llfn_ty,
ty::FnConverging(ty::mk_nil(ccx.tcx())));
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
pub fn register_rust_fn_with_foreign_abi(ccx: &CrateContext,
sp: Span,
sym: String,
node_id: ast::NodeId)
-> ValueRef {
let _icx = push_ctxt("foreign::register_foreign_fn");
let tys = foreign_types_for_id(ccx, node_id);
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let t = ty::node_id_to_type(ccx.tcx(), node_id);
let cconv = match t.sty {
ty::ty_bare_fn(_, ref fn_ty) => {
llvm_calling_convention(ccx, fn_ty.abi)
}
_ => panic!("expected bare fn in register_rust_fn_with_foreign_abi")
};
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={}, llfn_ty={}, llfn={})",
node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
attrs: &[ast::Attribute],
llwrapfn: ValueRef,
param_substs: &'tcx Substs<'tcx>,
id: ast::NodeId,
hash: Option<&str>) {
let _icx = push_ctxt("foreign::build_foreign_fn");
let fnty = ty::node_id_to_type(ccx.tcx(), id);
let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty);
let tys = foreign_types_for_fn_ty(ccx, mty);
unsafe { // unsafe because we call LLVM operations
// Build up the Rust function (`foo0` above).
let llrustfn = build_rust_fn(ccx, decl, body, param_substs, attrs, id, hash);
// Build up the foreign wrapper (`foo` above).
return build_wrap_fn(ccx, llrustfn, llwrapfn, &tys, mty);
}
fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
param_substs: &'tcx Substs<'tcx>,
attrs: &[ast::Attribute],
id: ast::NodeId,
hash: Option<&str>)
-> ValueRef
{
let _icx = push_ctxt("foreign::foreign::build_rust_fn");
let tcx = ccx.tcx();
let t = ty::node_id_to_type(tcx, id);
let t = monomorphize::apply_param_substs(tcx, param_substs, &t);
let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.into_iter()), hash)
});
// Compute the type that the function would have if it were just a
// normal Rust function. This will be the type of the wrappee fn.
match t.sty {
ty::ty_bare_fn(_, ref f) => {
assert!(f.abi != Rust && f.abi != RustIntrinsic);
}
_ => {
ccx.sess().bug(&format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
ccx.tcx().map.path_to_string(id),
t.repr(tcx)));
}
};
debug!("build_rust_fn: path={} id={} t={}",
ccx.tcx().map.path_to_string(id),
id, t.repr(tcx));
let llfn = declare::define_internal_rust_fn(ccx, &ps[..], t).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", ps));
});
attributes::from_fn_attrs(ccx, attrs, llfn);
base::trans_fn(ccx, decl, body, llfn, param_substs, id, &[]);
llfn
}
unsafe fn build_wrap_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
llrustfn: ValueRef,
llwrapfn: ValueRef,
tys: &ForeignTypes<'tcx>,
t: Ty<'tcx>) {
let _icx = push_ctxt(
"foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn");
let tcx = ccx.tcx();
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
ccx.tn().val_to_string(llrustfn),
ccx.tn().val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
// LLVM here.
//
// We want to generate code like this:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
if llvm::LLVMCountBasicBlocks(llwrapfn) != 0 {
ccx.sess().bug("wrapping a function inside non-empty wrapper, most likely cause is \
multiple functions being wrapped");
}
let ptr = "the block\0".as_ptr();
let the_block = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn,
ptr as *const _);
let builder = ccx.builder();
builder.position_at_end(the_block);
// Array for the arguments we will pass to the rust function.
let mut llrust_args = Vec::new();
let mut next_foreign_arg_counter: c_uint = 0;
let mut next_foreign_arg = |pad: bool| -> c_uint {
next_foreign_arg_counter += if pad {
2
} else {
1
};
next_foreign_arg_counter - 1
};
// If there is an out pointer on the foreign function
let foreign_outptr = {
if tys.fn_ty.ret_ty.is_indirect() {
Some(get_param(llwrapfn, next_foreign_arg(false)))
} else {
None
}
};
let rustfn_ty = Type::from_ref(llvm::LLVMTypeOf(llrustfn)).element_type();
let mut rust_param_tys = rustfn_ty.func_params().into_iter();
// Push Rust return pointer, using null if it will be unused.
let rust_uses_outptr = match tys.fn_sig.output {
ty::FnConverging(ret_ty) => type_of::return_uses_outptr(ccx, ret_ty),
ty::FnDiverging => false
};
let return_alloca: Option<ValueRef>;
let llrust_ret_ty = if rust_uses_outptr {
rust_param_tys.next().expect("Missing return type!").element_type()
} else {
rustfn_ty.return_type()
};
if rust_uses_outptr {
// Rust expects to use an outpointer. If the foreign fn
// also uses an outpointer, we can reuse it, but the types
// may vary, so cast first to the Rust type. If the
// foreign fn does NOT use an outpointer, we will have to
// alloca some scratch space on the stack.
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
ccx.tn().val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
ccx.tn().val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
None => {
let slot = builder.alloca(llrust_ret_ty, "return_alloca");
debug!("out pointer, \
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
ccx.tn().val_to_string(slot),
ccx.tn().type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
}
}
} else {
// Rust does not expect an outpointer. If the foreign fn
// does use an outpointer, then we will do a store of the
// value that the Rust fn returns.
return_alloca = None;
};
// Build up the arguments to the call to the rust function.
// Careful to adapt for cases where the native convention uses
// a pointer and Rust does not or vice versa.
for i in 0..tys.fn_sig.inputs.len() {
let rust_ty = tys.fn_sig.inputs[i];
let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty);
let llty = rust_param_tys.next().expect("Not enough parameter types!");
let llrust_ty = if rust_indirect {
llty.element_type()
} else {
llty
};
let llforeign_arg_ty = tys.fn_ty.arg_tys[i];
let foreign_indirect = llforeign_arg_ty.is_indirect();
if llforeign_arg_ty.is_ignore() {
debug!("skipping ignored arg #{}", i);
llrust_args.push(C_undef(llrust_ty));
continue;
}
// skip padding
let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some());
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
i, ccx.tn().val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
// Ensure that the foreign argument is indirect (by
// pointer). It makes adapting types easier, since we can
// always just bitcast pointers.
if !foreign_indirect {
llforeign_arg = if ty::type_is_bool(rust_ty) {
let lltemp = builder.alloca(Type::bool(ccx), "");
builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp);
lltemp
} else {
let lltemp = builder.alloca(val_ty(llforeign_arg), "");
builder.store(llforeign_arg, lltemp);
lltemp
}
}
// If the types in the ABI and the Rust types don't match,
// bitcast the llforeign_arg pointer so it matches the types
// Rust expects.
if llforeign_arg_ty.cast.is_some() {
assert!(!foreign_indirect);
llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to());
}
let llrust_arg = if rust_indirect {
llforeign_arg
} else {
if ty::type_is_bool(rust_ty) {
let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False);
builder.trunc(tmp, Type::i1(ccx))
} else if type_of::type_of(ccx, rust_ty).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate
// LLVM type for this leads to bad optimizations, so its arg type is an
// appropriately sized integer and we have to convert it
let tmp = builder.bitcast(llforeign_arg,
type_of::arg_type_of(ccx, rust_ty).ptr_to());
let load = builder.load(tmp);
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty));
load
} else {
builder.load(llforeign_arg)
}
};
debug!("llrust_arg {}{}: {}", "#",
i, ccx.tn().val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
debug!("calling llrustfn = {}, t = {}",
ccx.tn().val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = attributes::from_fn_type(ccx, t);
let llrust_ret_val = builder.call(llrustfn, &llrust_args, Some(attributes));
// Get the return value where the foreign fn expects it.
let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast {
Some(ty) => ty,
None => tys.fn_ty.ret_ty.ty
};
match foreign_outptr {
None if !tys.llsig.ret_def => {
// Function returns `()` or `bot`, which in Rust is the LLVM
// type "{}" but in foreign ABIs is "Void".
builder.ret_void();
}
None if rust_uses_outptr => {
// Rust uses an outpointer, but the foreign ABI does not. Load.
let llrust_outptr = return_alloca.unwrap();
let llforeign_outptr_casted =
builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to());
let llforeign_retval = builder.load(llforeign_outptr_casted);
builder.ret(llforeign_retval);
}
None if llforeign_ret_ty != llrust_ret_ty => {
// Neither ABI uses an outpointer, but the types don't
// quite match. Must cast. Probably we should try and
// examine the types and use a concrete llvm cast, but
// right now we just use a temp memory location and
// bitcast the pointer, which is the same thing the
// old wrappers used to do.
let lltemp = builder.alloca(llforeign_ret_ty, "");
let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to());
builder.store(llrust_ret_val, lltemp_casted);
let llforeign_retval = builder.load(lltemp);
builder.ret(llforeign_retval);
}
None => {
// Neither ABI uses an outpointer, and the types
// match. Easy peasy.
builder.ret(llrust_ret_val);
}
Some(llforeign_outptr) if !rust_uses_outptr => {
// Foreign ABI requires an out pointer, but Rust doesn't.
// Store Rust return value.
let llforeign_outptr_casted =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
builder.store(llrust_ret_val, llforeign_outptr_casted);
builder.ret_void();
}
Some(_) => {
// Both ABIs use outpointers. Easy peasy.
builder.ret_void();
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// General ABI Support
//
// This code is kind of a confused mess and needs to be reworked given
// the massive simplifications that have occurred.
pub fn link_name(i: &ast::ForeignItem) -> InternedString {
match attr::first_attr_value_str_by_name(&i.attrs, "link_name") {
Some(ln) => ln.clone(),
None => match weak_lang_items::link_name(&i.attrs) {
Some(name) => name,
None => token::get_ident(i.ident),
}
}
}
/// The ForeignSignature is the LLVM types of the arguments/return type of a function. Note that
/// these LLVM types are not quite the same as the LLVM types would be for a native Rust function
/// because foreign functions just plain ignore modes. They also don't pass aggregate values by
/// pointer like we do.
fn foreign_signature<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_sig: &ty::FnSig<'tcx>,
arg_tys: &[Ty<'tcx>])
-> LlvmSignature {
let llarg_tys = arg_tys.iter().map(|&arg| foreign_arg_type_of(ccx, arg)).collect();
let (llret_ty, ret_def) = match fn_sig.output {
ty::FnConverging(ret_ty) =>
(type_of::foreign_arg_type_of(ccx, ret_ty), !return_type_is_void(ccx, ret_ty)),
ty::FnDiverging =>
(Type::nil(ccx), false)
};
LlvmSignature {
llarg_tys: llarg_tys,
llret_ty: llret_ty,
ret_def: ret_def
}
}
fn foreign_types_for_id<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
id: ast::NodeId) -> ForeignTypes<'tcx> {
foreign_types_for_fn_ty(ccx, ty::node_id_to_type(ccx.tcx(), id))
}
fn foreign_types_for_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> ForeignTypes<'tcx> {
let fn_sig = match ty.sty {
ty::ty_bare_fn(_, ref fn_ty) => &fn_ty.sig,
_ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type")
};
let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
let llsig = foreign_signature(ccx, &fn_sig, &fn_sig.inputs);
let fn_ty = cabi::compute_abi_info(ccx,
&llsig.llarg_tys,
llsig.llret_ty,
llsig.ret_def);
debug!("foreign_types_for_fn_ty(\
ty={}, \
llsig={} -> {}, \
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
ccx.tn().types_to_str(&llsig.llarg_tys),
ccx.tn().type_to_string(llsig.llret_ty),
ccx.tn().types_to_str(&fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>()),
ccx.tn().type_to_string(fn_ty.ret_ty.ty),
llsig.ret_def);
ForeignTypes {
fn_sig: fn_sig,
llsig: llsig,
fn_ty: fn_ty
}
}
fn lltype_for_fn_from_foreign_types(ccx: &CrateContext, tys: &ForeignTypes) -> Type {
let mut llargument_tys = Vec::new();
let ret_ty = tys.fn_ty.ret_ty;
let llreturn_ty = if ret_ty.is_indirect() {
llargument_tys.push(ret_ty.ty.ptr_to());
Type::void(ccx)
} else {
match ret_ty.cast {
Some(ty) => ty,
None => ret_ty.ty
}
};
for &arg_ty in &tys.fn_ty.arg_tys {
if arg_ty.is_ignore() {
continue;
}
// add padding
match arg_ty.pad {
Some(ty) => llargument_tys.push(ty),
None => ()
}
let llarg_ty = if arg_ty.is_indirect() {
arg_ty.ty.ptr_to()
} else {
match arg_ty.cast {
Some(ty) => ty,
None => arg_ty.ty
}
};
llargument_tys.push(llarg_ty);
}
if tys.fn_sig.variadic {
Type::variadic_func(&llargument_tys, &llreturn_ty)
} else {
Type::func(&llargument_tys[..], &llreturn_ty)
}
}
pub fn lltype_for_foreign_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> Type {
lltype_for_fn_from_foreign_types(ccx, &foreign_types_for_fn_ty(ccx, ty))
}
fn add_argument_attributes(tys: &ForeignTypes,
llfn: ValueRef) {
let mut i = if tys.fn_ty.ret_ty.is_indirect() {
1
} else {
0
};
match tys.fn_ty.ret_ty.attr {
Some(attr) => unsafe {
llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
},
None => {}
}
i += 1;
for &arg_ty in &tys.fn_ty.arg_tys {
if arg_ty.is_ignore() {
continue;
}
// skip padding
if arg_ty.pad.is_some() { i += 1; }
match arg_ty.attr {
Some(attr) => unsafe {
llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
},
None => ()
}
i += 1;
}
}<|fim▁end|>
|
/// Prepares a call to a native function. This requires adapting
|
<|file_name|>decode.rs<|end_file_name|><|fim▁begin|>use std::{iter, fs, path};
use image::ImageFormat;
use criterion::{Criterion, criterion_group, criterion_main};
#[derive(Clone, Copy)]
struct BenchDef {
dir: &'static [&'static str],
files: &'static [&'static str],
format: ImageFormat,
}
fn load_all(c: &mut Criterion) {
const BENCH_DEFS: &'static [BenchDef] = &[
BenchDef {
dir: &["bmp", "images"],
files: &[
"Core_1_Bit.bmp",
"Core_4_Bit.bmp",
"Core_8_Bit.bmp",
"rgb16.bmp",
"rgb24.bmp",
"rgb32.bmp",
"pal4rle.bmp",
"pal8rle.bmp",
"rgb16-565.bmp",
"rgb32bf.bmp",
],
format: ImageFormat::Bmp,
},
BenchDef {
dir: &["gif", "simple"],
files: &[
"alpha_gif_a.gif",
"sample_1.gif",
],
format: ImageFormat::Gif,
},
BenchDef {
dir: &["hdr", "images"],
files: &[
"image1.hdr",
"rgbr4x4.hdr",
],
format: ImageFormat::Hdr,
},
BenchDef {
dir: &["ico", "images"],
files: &[
"bmp-24bpp-mask.ico",
"bmp-32bpp-alpha.ico",
"png-32bpp-alpha.ico",
"smile.ico",
],
format: ImageFormat::Ico,
},
BenchDef {
dir: &["jpg", "progressive"],
files: &[
"3.jpg",
"cat.jpg",
"test.jpg",
],
format: ImageFormat::Jpeg,
},
// TODO: pnm
// TODO: png
BenchDef {
dir: &["tga", "testsuite"],
files: &[
"cbw8.tga",
"ctc24.tga",
"ubw8.tga",
"utc24.tga",
],
format: ImageFormat::Tga,
},
BenchDef {
dir: &["tiff", "testsuite"],
files: &[
"hpredict.tiff",
"hpredict_packbits.tiff",
"mandrill.tiff",
"rgb-3c-16b.tiff",
],
format: ImageFormat::Tiff,
},<|fim▁hole|> files: &[
"simple-gray.webp",
"simple-rgb.webp",
"vp8x-gray.webp",
"vp8x-rgb.webp",
],
format: ImageFormat::WebP,
},
];
for bench in BENCH_DEFS {
bench_load(c, bench);
}
}
criterion_group!(benches, load_all);
criterion_main!(benches);
fn bench_load(c: &mut Criterion, def: &BenchDef) {
let group_name = format!("load-{:?}", def.format);
let mut group = c.benchmark_group(&group_name);
let paths = IMAGE_DIR.iter().chain(def.dir);
for file_name in def.files {
let path: path::PathBuf = paths.clone().chain(iter::once(file_name)).collect();
let buf = fs::read(path).unwrap();
group.bench_function(file_name.to_owned(), |b| b.iter(|| {
image::load_from_memory_with_format(&buf, def.format).unwrap();
}));
}
}
const IMAGE_DIR: [&'static str; 3] = [".", "tests", "images"];<|fim▁end|>
|
BenchDef {
dir: &["webp", "images"],
|
<|file_name|>fmt.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::fmt::radix;
use core::fmt::RadixFmt;
use core::fmt::Radix;
// #[derive(Clone, Copy, PartialEq)]
// #[unstable(feature = "core",
// reason = "may be renamed or move to a different module")]
// pub struct Radix {
// base: u8,
// }
// #[derive(Copy, Clone)]
// pub struct RadixFmt<T, R>(T, R);
// pub fn radix<T>(x: T, base: u8) -> RadixFmt<T, Radix> {
// RadixFmt(x, Radix::new(base))
// }
// macro_rules! radix_fmt {
// ($T:ty as $U:ty, $fmt:ident) => {
// #[stable(feature = "rust1", since = "1.0.0")]
// impl fmt::Debug for RadixFmt<$T, Radix> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// fmt::Display::fmt(self, f)
// }
// }
// #[stable(feature = "rust1", since = "1.0.0")]
// impl fmt::Display for RadixFmt<$T, Radix> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// match *self { RadixFmt(ref x, radix) => radix.$fmt(*x as $U, f) }
// }
// }
// }
// }
// macro_rules! int_base {
// ($Trait:ident for $T:ident as $U:ident -> $Radix:ident) => {
// #[stable(feature = "rust1", since = "1.0.0")]
// impl fmt::$Trait for $T {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// $Radix.fmt_int(*self as $U, f)
// }
// }
// }
// }
// macro_rules! debug {
// ($T:ident) => {
// #[stable(feature = "rust1", since = "1.0.0")]
// impl fmt::Debug for $T {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// fmt::Display::fmt(self, f)
// }
// }
// }
// }
// macro_rules! integer {
// ($Int:ident, $Uint:ident) => {
// int_base! { Display for $Int as $Int -> Decimal }
// int_base! { Binary for $Int as $Uint -> Binary }
// int_base! { Octal for $Int as $Uint -> Octal }
// int_base! { LowerHex for $Int as $Uint -> LowerHex }
// int_base! { UpperHex for $Int as $Uint -> UpperHex }
// radix_fmt! { $Int as $Int, fmt_int }
// debug! { $Int }
//
// int_base! { Display for $Uint as $Uint -> Decimal }
// int_base! { Binary for $Uint as $Uint -> Binary }
// int_base! { Octal for $Uint as $Uint -> Octal }
// int_base! { LowerHex for $Uint as $Uint -> LowerHex }
// int_base! { UpperHex for $Uint as $Uint -> UpperHex }
// radix_fmt! { $Uint as $Uint, fmt_int }
// debug! { $Uint }
// }
// }
// integer! { isize, usize }
// integer! { i8, u8 }
// integer! { i16, u16 }
// integer! { i32, u32 }
// integer! { i64, u64 }
macro_rules! radix_fmt_test_impl {
($T:ty, $value:expr, $base:expr, $s:expr) => (
{
let x: $T = $value;
let base: u8 = $base;
let radixfmt: RadixFmt<$T, Radix> = radix::<$T>(x, base);
let output: String = format!("{:?}", radixfmt);
assert_eq!(output, $s.to_string());
let output: String = format!("{}", radixfmt);
assert_eq!(output, $s.to_string());
}
)
}
macro_rules! radix_fmt_test {
() => {
radix_fmt_test_impl! { i64, 68, 2, "1000100" }
radix_fmt_test_impl! { i64, 68, 3, "2112" }
radix_fmt_test_impl! { i64, 68, 4, "1010" }
radix_fmt_test_impl! { i64, 68, 5, "233" }
radix_fmt_test_impl! { i64, 68, 6, "152" }
radix_fmt_test_impl! { i64, 68, 7, "125" }
radix_fmt_test_impl! { i64, 68, 8, "104" }
radix_fmt_test_impl! { i64, 68, 9, "75" }
radix_fmt_test_impl! { i64, 68, 10, "68" }
radix_fmt_test_impl! { i64, 68, 11, "62" }
radix_fmt_test_impl! { i64, 68, 12, "58" }
radix_fmt_test_impl! { i64, 68, 13, "53" }
radix_fmt_test_impl! { i64, 68, 14, "4c" }
radix_fmt_test_impl! { i64, 68, 15, "48" }
radix_fmt_test_impl! { i64, 68, 16, "44" }
radix_fmt_test_impl! { i64, 68, 17, "40" }
radix_fmt_test_impl! { i64, 68, 18, "3e" }
radix_fmt_test_impl! { i64, 68, 19, "3b" }
radix_fmt_test_impl! { i64, 68, 20, "38" }
radix_fmt_test_impl! { i64, 68, 21, "35" }
radix_fmt_test_impl! { i64, 68, 22, "32" }
radix_fmt_test_impl! { i64, 68, 23, "2m" }
radix_fmt_test_impl! { i64, 68, 24, "2k" }
radix_fmt_test_impl! { i64, 68, 25, "2i" }
radix_fmt_test_impl! { i64, 68, 26, "2g" }
radix_fmt_test_impl! { i64, 68, 27, "2e" }
radix_fmt_test_impl! { i64, 68, 28, "2c" }
radix_fmt_test_impl! { i64, 68, 29, "2a" }
radix_fmt_test_impl! { i64, 68, 30, "28" }
radix_fmt_test_impl! { i64, 68, 31, "26" }
radix_fmt_test_impl! { i64, 68, 32, "24" }
radix_fmt_test_impl! { i64, 68, 33, "22" }
radix_fmt_test_impl! { i64, 68, 34, "20" }<|fim▁hole|> }
#[test]
#[should_panic]
fn fmt_test1() {
radix_fmt_test_impl! { i64, 68, 1, "" }; // panicked at 'the base must be in the range of 2..36: 1'
}
#[test]
fn fmt_test2() {
radix_fmt_test!();
}
#[test]
#[should_panic]
fn fmt_test3() {
radix_fmt_test_impl! { i64, 68, 37, "" }; // panicked at 'the base must be in the range of 2..36: 37'
}
}<|fim▁end|>
|
radix_fmt_test_impl! { i64, 68, 35, "1x" }
radix_fmt_test_impl! { i64, 68, 36, "1w" }
}
|
<|file_name|>switcher.js<|end_file_name|><|fim▁begin|>var EventEmitter = require('events').EventEmitter;
var util = require('util');
var WSProcessor = require('./wsprocessor');
var TCPProcessor = require('./tcpprocessor');
var logger = require('pomelo-logger').getLogger('pomelo', __filename);
var HTTP_METHODS = [
'GET', 'POST', 'DELETE', 'PUT', 'HEAD'
];
var ST_STARTED = 1;
var ST_CLOSED = 2;
var DEFAULT_TIMEOUT = 90;
/**
* Switcher for tcp and websocket protocol
*
* @param {Object} server tcp server instance from node.js net module
*/
var Switcher = function(server, opts) {
EventEmitter.call(this);
this.server = server;
this.wsprocessor = new WSProcessor();
this.tcpprocessor = new TCPProcessor(opts.closeMethod);
this.id = 1;
this.timers = {};
this.timeout = opts.timeout || DEFAULT_TIMEOUT;
this.setNoDelay = opts.setNoDelay;
this.server.on('connection', this.newSocket.bind(this));
this.wsprocessor.on('connection', this.emit.bind(this, 'connection'));
this.tcpprocessor.on('connection', this.emit.bind(this, 'connection'));
this.state = ST_STARTED;
};
util.inherits(Switcher, EventEmitter);
module.exports = Switcher;
Switcher.prototype.newSocket = function(socket) {
if(this.state !== ST_STARTED) {
return;
}
// if set connection timeout
if(!!this.timeout) {
var timer = setTimeout(function() {
logger.warn('connection is timeout without communication, the remote ip is %s && port is %s', socket.remoteAddress, socket.remotePort);
socket.destroy();
}, this.timeout * 1000);
this.timers[this.id] = timer;
socket.id = this.id++;
}
var self = this;
socket.once('close', function() {
if (!!socket.id) {
clearTimeout(self.timers[socket.id]);
delete self.timers[socket.id];
}
});
socket.once('data', function(data) {
if(!!socket.id) {
clearTimeout(self.timers[socket.id]);
delete self.timers[socket.id];
}
if(isHttp(data)) {
processHttp(self, self.wsprocessor, socket, data);
} else {
if(!!self.setNoDelay) {<|fim▁hole|> });
};
Switcher.prototype.close = function() {
if(this.state !== ST_STARTED) {
return;
}
this.state = ST_CLOSED;
this.wsprocessor.close();
this.tcpprocessor.close();
};
var isHttp = function(data) {
var head = data.toString('utf8', 0, 4);
for(var i=0, l=HTTP_METHODS.length; i<l; i++) {
if(head.indexOf(HTTP_METHODS[i]) === 0) {
return true;
}
}
return false;
};
var processHttp = function(switcher, processor, socket, data) {
processor.add(socket, data);
};
var processTcp = function(switcher, processor, socket, data) {
processor.add(socket, data);
};<|fim▁end|>
|
socket.setNoDelay(true);
}
processTcp(self, self.tcpprocessor, socket, data);
}
|
<|file_name|>log.rs<|end_file_name|><|fim▁begin|>//!
//! Global, mutable log.
//!
//! You may think, "Global and mutable?" and wonder how the borrow checker doesn't completely have a meltdown.
//!
//! Well I do too.
//!
// What can I say, I think `GlobalLog` is prettier than GLOBALLLOG
#![allow(non_upper_case_globals)]
///
/// How to use it
///
/// Import log and use the macro `log!()` and pass in the string/RGB tuple. Anything else and im pretty sure it panics
///
///
/// How it actually works
///
/// So here's the thought process of this whole thing. I realized that I need some way for various detached objects to have some
/// way to communicate what they are doing directly to the player without creating a mess of spaghetti. The best idea I came up with
/// was to take some sort of global singleton that can be mutated and then read from the renderer to be drawn to the screen.
///
/// So here's the breakdown.
///
/// A Mutex is a "mutual exclusion primitive useful for protecting shared data", which is essentially just an RAII construct
/// that guarantees that the resource is available to any function that may access the object. In order to access the static reference,
/// we must `lock` the mutex, which simply blocks the current thread until the mutex is able to be acquired.
/// Since we are single-threaded, this is a non-issue in terms of runtime.
///
/// This mutex then provides a static, mutable reference to the log which then can have it's methods called. After the log is done being used,
/// the reference to the log must be dropped. This does not remove the static reference, but merely allows the mutex to be freed and thus
/// used later by another resource.
///
/// Note that rust mutexes can be poisoned. Essentially, if I lock the mutex then panic a thread, that mutex is no longer considered safe and
/// thus poisoned, which is why it must be unwrapped. Since this is single-threaded, if the thread panics the game doesn't function meaning this
/// in theory, is not an issue.
///
/// In order to however *even fundamentally expose a mutex to the rest of the program via a static reference* we need the lazy_static
/// macro which is the final key to getting it all working. And for fluff, non_uppercase_globals because.
///
/// Then it's slapped into a macro.
///
use std::sync::Mutex;
use core::renderer::RGB;
///
/// A log just wraps some strings with a color value to be printed and look pretty
///
#[derive(Default)]
pub struct Log {
pub data: Vec<(&'static str, RGB, u32)>,
}
impl Log {
///
/// Get a new, empty log
///
pub fn new() -> Self {
Log { data: vec![] }
}
///
/// Get a range of the last n items added to the log
///
/// The intention of this is that the range is the interated over, and then used as indices
/// to read the log data
///
pub fn get_last_n_messages(&self, n: usize) -> &[(&'static str, RGB, u32)] {
// Basically if there are n items in the log, but we want to get > n items, we
// should make sure rust doesn't have some sort of underflow error
if n > self.data.len() {
return &self.data[0..self.data.len()];
} else {
return &self.data[(self.data.len() - n)..self.data.len()];<|fim▁hole|> /// Push new data onto the log stack
///
pub fn push(&mut self, message: (&'static str, RGB, u32)) {
// If there are elements in the log
if self.data.len() > 0 {
// If the last message string is the same, update the counter instead of pushing.
let last_pos = self.data.len() - 1;
if &self.data[last_pos].0 == &message.0 {
self.data[last_pos].2 += 1;
return;
}
}
// Push message
self.data.push(message);
}
}
// Make a mutex available
lazy_static! {
pub static ref GlobalLog: Mutex<Log> = Mutex::new(Log::new());
}
/// This macro automates the log mutex process. This whole thing is pretty crazy
/// Obviously if any panics occur here then the mutex becomes poisoned
#[macro_export]
macro_rules! log {
($msg:expr, $col:expr) => {{
// Import it's own lazy static ref
use self::log::GlobalLog;
// Lock the mutex
let mut log = GlobalLog.lock().unwrap();
// Push the message
// Highly implies a correct expression for the push arguments are being supplied
log.push(($msg, $col, 1));
// Drop the reference
drop(log);
}};
}
// Macro for debugging information
#[macro_export]
macro_rules! debugln {
($id:expr, $msg:expr) => {{
use core::init::debug;
if debug() {
println!("[{}] {}", $id, $msg);
}
}};
}<|fim▁end|>
|
}
}
///
|
<|file_name|>ThridPartyStorageServiceImpl.java<|end_file_name|><|fim▁begin|>package com.enseirb.telecom.dngroup.dvd2c.service.impl;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.inject.Inject;
import javax.ws.rs.core.UriBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import com.enseirb.telecom.dngroup.dvd2c.modeldb.Document;
import com.enseirb.telecom.dngroup.dvd2c.modeldb.ThirdPartyConfiguration;
import com.enseirb.telecom.dngroup.dvd2c.repository.DocumentRepository;
import com.enseirb.telecom.dngroup.dvd2c.repository.ThirdPartyStorageConfigRepository;
import com.enseirb.telecom.dngroup.dvd2c.service.ThridPartyStorageService;
@Service
public class ThridPartyStorageServiceImpl implements ThridPartyStorageService {
private static final Logger LOGGER = LoggerFactory
.getLogger(ThridPartyStorageServiceImpl.class);
@Inject
DocumentRepository docRepo;
@Inject
ThirdPartyStorageConfigRepository repo;
private List<URI> generateRedirectUri(Document doc) {
List<URI> res = new ArrayList<URI>();
for (ThirdPartyConfiguration conf : repo.findAll()) {
if (thirdPartyDeployable(conf, doc.getType())) {
res.add(UriBuilder.fromPath(conf.getBaseUrl())
.path("" + doc.getId()).build());
}
}
return res;
}
@SuppressWarnings("unchecked")
@Override
public List<URI> generateRedirectUri(String contentId) {
Document doc = docRepo.findOne(Integer.valueOf(contentId));
if (doc != null) {
return generateRedirectUri(doc);
} else
return Collections.EMPTY_LIST;
}
/*
* (non-Javadoc)
*
* @see
* com.enseirb.telecom.dngroup.dvd2c.service.ThridPartyStorage#register(
* java.lang.String, java.lang.String)
*/
@Override
public void register(String baseUrL, String name) {
if (repo.findByBaseUrl(baseUrL) == null) {
ThirdPartyConfiguration conf = new ThirdPartyConfiguration();<|fim▁hole|> repo.save(conf);
} else {
LOGGER.debug("third party already registered");
}
}
private boolean thirdPartyDeployable(ThirdPartyConfiguration conf,
String type) {
return true;
}
}<|fim▁end|>
|
conf.setBaseUrl(baseUrL);
conf.setName(name);
|
<|file_name|>testapp_spec.js<|end_file_name|><|fim▁begin|>/*global jasmine*/
var excludes = [<|fim▁hole|> "map_lazy_init.html",
"map-lazy-load.html",
"marker_with_dynamic_position.html",
"marker_with_dynamic_address.html",
"marker_with_info_window.html",
"places-auto-complete.html"
];
function using(values, func){
for (var i = 0, count = values.length; i < count; i++) {
if (Object.prototype.toString.call(values[i]) !== '[object Array]') {
values[i] = [values[i]];
}
func.apply(this, values[i]);
jasmine.currentEnv_.currentSpec.description += ' (with using ' + values[i].join(', ') + ')';
}
}
describe('testapp directory', function() {
'use strict';
//var urls = ["aerial-rotate.html", "aerial-simple.html", "hello_map.html", "map_control.html"];
var files = require('fs').readdirSync(__dirname + "/../../testapp");
var urls = files.filter(function(filename) {
return filename.match(/\.html$/) && excludes.indexOf(filename) === -1;
});
console.log('urls', urls);
using(urls, function(url){
it('testapp/'+url, function() {
browser.get(url);
browser.wait( function() {
return browser.executeScript( function() {
var el = document.querySelector("map");
var scope = angular.element(el).scope();
//return scope.map.getCenter().lat();
return scope.map.getCenter();
}).then(function(result) {
return result;
});
}, 5000);
//element(by.css("map")).evaluate('map.getCenter().lat()').then(function(lat) {
// console.log('lat', lat);
// expect(lat).toNotEqual(0);
//});
browser.manage().logs().get('browser').then(function(browserLog) {
(browserLog.length > 0) && console.log('log: ' + require('util').inspect(browserLog));
expect(browserLog).toEqual([]);
});
});
});
});<|fim▁end|>
|
"map_events.html",
|
<|file_name|>time.rs<|end_file_name|><|fim▁begin|>use std::io::{self, Write};
use std::time::{Duration, Instant};
/// RAII timer to measure how long phases take.
#[derive(Debug)]
pub struct Timer<'a> {
output: bool,
name: &'a str,
start: Instant,
}
impl<'a> Timer<'a> {
/// Creates a Timer with the given name, and starts it. By default,
/// will print to stderr when it is `drop`'d
pub fn new(name: &'a str) -> Self {
Timer {
output: true,
name,
start: Instant::now(),
}
}
/// Sets whether or not the Timer will print a message
/// when it is dropped.
pub fn with_output(mut self, output: bool) -> Self {
self.output = output;
self
}
/// Returns the time elapsed since the timer's creation
pub fn elapsed(&self) -> Duration {
Instant::now() - self.start
}
fn print_elapsed(&mut self) {
if self.output {
let elapsed = self.elapsed();
let time = (elapsed.as_secs() as f64) * 1e3 +
(elapsed.subsec_nanos() as f64) / 1e6;<|fim▁hole|> let stderr = io::stderr();
// Arbitrary output format, subject to change.
writeln!(stderr.lock(), " time: {:>9.3} ms.\t{}", time, self.name)
.expect("timer write should not fail");
}
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
self.print_elapsed();
}
}<|fim▁end|>
| |
<|file_name|>MesosStreamMessageTypes.ts<|end_file_name|><|fim▁begin|>export const AGENT_ADDED = "AGENT_ADDED";
export const AGENT_REMOVED = "AGENT_REMOVED";
export const FRAMEWORK_ADDED = "FRAMEWORK_ADDED";
export const FRAMEWORK_REMOVED = "FRAMEWORK_REMOVED";<|fim▁hole|>export const GET_MASTER = "GET_MASTER";
export const GET_STATE = "GET_STATE";
export const GET_TASKS = "GET_TASKS";
export const SUBSCRIBED = "SUBSCRIBED";
export const TASK_ADDED = "TASK_ADDED";
export const TASK_UPDATED = "TASK_UPDATED";
export const UNKNOWN = "UNKNOWN";<|fim▁end|>
|
export const FRAMEWORK_UPDATED = "FRAMEWORK_UPDATED";
export const GET_AGENTS = "GET_AGENTS";
export const GET_EXECUTORS = "GET_EXECUTORS";
export const GET_FRAMEWORKS = "GET_FRAMEWORKS";
|
<|file_name|>buglist.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2008,2011 Lanedo GmbH
#
# Author: Tim Janik
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os, re, urllib, csv
pkginstall_configvars = {
'PACKAGE' : 'dummy', 'PACKAGE_NAME' : 'dummy', 'VERSION' : '0.0', 'REVISION' : 'uninstalled',
#@PKGINSTALL_CONFIGVARS_IN24LINES@ # configvars are substituted upon script installation
}
# TODO:
# - support mixing in comments.txt which has "bug# person: task"
bugurls = (
('gb', 'http://bugzilla.gnome.org/buglist.cgi?bug_id='),
('gnome', 'http://bugzilla.gnome.org/buglist.cgi?bug_id='),
('fd', 'https://bugs.freedesktop.org/buglist.cgi?bug_id='),
('freedesktop', 'https://bugs.freedesktop.org/buglist.cgi?bug_id='),
('mb', 'https://bugs.maemo.org/buglist.cgi?bug_id='),
('maemo', 'https://bugs.maemo.org/buglist.cgi?bug_id='),
('nb', 'https://projects.maemo.org/bugzilla/buglist.cgi?bug_id='),
('nokia', 'https://projects.maemo.org/bugzilla/buglist.cgi?bug_id='),
('gcc', 'http://gcc.gnu.org/bugzilla/buglist.cgi?bug_id='),
('libc', 'http://sources.redhat.com/bugzilla/buglist.cgi?bug_id='),
('moz', 'https://bugzilla.mozilla.org/buglist.cgi?bug_id='),
('mozilla', 'https://bugzilla.mozilla.org/buglist.cgi?bug_id='),
('xm', 'http://bugzilla.xamarin.com/buglist.cgi?id='),
('xamarin', 'http://bugzilla.xamarin.com/buglist.cgi?id='),
)
# URL authentication handling
def auth_urls():
import ConfigParser, os, re
cp = ConfigParser.SafeConfigParser()
cp.add_section ('authentication-urls')
cp.set ('authentication-urls', 'urls', '')
cp.read (os.path.expanduser ('~/.urlrc'))
urlstr = cp.get ('authentication-urls', 'urls') # space separated url list
urls = re.split ("\s*", urlstr.strip()) # list urls
urls = [u for u in urls if u] # strip empty urls
global auth_urls; auth_urls = lambda : urls # cache result for the future
return urls
def add_auth (url):
for ai in auth_urls():
prefix = re.sub ('//[^:/@]*:[^:/@]*@', '//', ai)
if url.startswith (prefix):
pl = len (prefix)
return ai + url[pl:]
return url
# carry out online bug queries
def bug_summaries (buglisturl):<|fim▁hole|> query = buglisturl + '&ctype=csv' # buglisturl.replace (',', '%2c')
query = add_auth (query)
f = urllib.urlopen (query)
csvdata = f.read()
f.close()
# read CSV lines
reader = csv.reader (csvdata.splitlines (1))
# parse head to interpret columns
col_bug_id = -1
col_description = -1
header = reader.next()
i = 0
for col in header:
col = col.strip()
if col == 'bug_id':
col_bug_id = i
if col == 'short_short_desc':
col_description = i
elif col_description < 0 and col == 'short_desc':
col_description = i
i = i + 1
if col_bug_id < 0:
print >>sys.stderr, 'Failed to identify bug_id from CSV data'
sys.exit (11)
if col_description < 0:
print >>sys.stderr, 'Failed to identify description columns from CSV data'
sys.exit (12)
# parse bug list
result = []
summary = ''
for row in reader:
bug_number = row[col_bug_id]
description = row[col_description]
result += [ (bug_number, description) ]
return result
# parse bug numbers and list bugs
def read_handle_bugs (config, url):
lines = sys.stdin.read()
# print >>sys.stderr, 'Using bugzilla URL: %s' % (bz, url)
for line in [ lines ]:
# find all bug numbers
bugs = re.findall (r'\b[0-9]+\b', line)
# int-convert, dedup and sort bug numbers
ibugs = []
if bugs:
bught = {}
for b in bugs:
b = int (b)
if not b or bught.has_key (b): continue
bught[b] = True
ibugs += [ b ]
del bugs
if config.get ('sort', False):
ibugs.sort()
# construct full query URL
fullurl = url + ','.join ([str (b) for b in ibugs])
# print fullurl
if len (ibugs) and config.get ('show-query', False):
print fullurl
# print bug summaries
if len (ibugs) and config.get ('show-list', False):
bught = {}
for bug in bug_summaries (fullurl):
bught[int (bug[0])] = bug[1] # bug summaries can have random order
for bugid in ibugs: # print bugs in user provided order
iid = int (bugid)
if bught.has_key (iid):
desc = bught[iid]
if len (desc) >= 70:
desc = desc[:67].rstrip() + '...'
print "% 7u - %s" % (iid, desc)
else:
print "% 7u (NOBUG)" % iid
def help (version = False, verbose = False):
print "buglist %s (%s, %s)" % (pkginstall_configvars['VERSION'],
pkginstall_configvars['PACKAGE_NAME'], pkginstall_configvars['REVISION'])
print "Redistributable under GNU GPLv3 or later: http://gnu.org/licenses/gpl.html"
if version: # version *only*
return
print "Usage: %s [options] <BUG-TRACKER> " % os.path.basename (sys.argv[0])
print "List or download bugs from a bug tracker. Bug numbers are read from stdin."
if not verbose:
print "Use the --help option for verbose usage information."
return
# 12345678911234567892123456789312345678941234567895123456789612345678971234567898
print "Options:"
print " -h, --help Print verbose help message."
print " -v, --version Print version information."
print " -U Keep bug list unsorted."
print " --bug-tracker-list List supported bug trackers."
print "Authentication:"
print " An INI-style config file is used to associate bugzilla URLs with account"
print " authentication for secured installations. The file should be unreadable"
print " by others to keep passwords secret, e.g. with: chmod 0600 ~/.urlrc"
print " A sample ~/.urlrc might look like this:"
print "\t# INI-style config file for URLs"
print "\t[authentication-urls]"
print "\turls =\thttps://USERNAME:[email protected]/bugzilla"
print "\t\thttp://BLOGGER:[email protected]/BLOGGER/xmlrpc.php"
def main ():
import getopt
# default configuration
config = {
'sort' : True,
'show-query' : True,
'show-list' : True,
}
# parse options
try:
options, args = getopt.gnu_getopt (sys.argv[1:], 'vhU', [ 'help', 'version', 'bug-tracker-list' ])
except getopt.GetoptError, err:
print >>sys.stderr, "%s: %s" % (os.path.basename (sys.argv[0]), str (err))
help()
sys.exit (126)
for arg, val in options:
if arg == '-h' or arg == '--help': help (verbose=True); sys.exit (0)
if arg == '-v' or arg == '--version': help (version=True); sys.exit (0)
if arg == '-U': config['sort'] = False
if arg == '--bug-tracker-list':
print "Bug Tracker:"
for kv in bugurls:
print " %-20s %s" % kv
sys.exit (0)
if len (args) < 1:
print >>sys.stderr, "%s: Missing bug tracker argument" % os.path.basename (sys.argv[0])
help()
sys.exit (126)
trackerdict = dict (bugurls)
if not trackerdict.has_key (args[0]):
print >>sys.stderr, "%s: Unknown bug tracker: %s" % (os.path.basename (sys.argv[0]), args[0])
sys.exit (10)
# handle bugs
read_handle_bugs (config, trackerdict[args[0]])
if __name__ == '__main__':
main()<|fim▁end|>
|
if not buglisturl:
return []
# Bugzilla query to use
|
<|file_name|>test_image.py<|end_file_name|><|fim▁begin|>def test_image_export_reference(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'REFERENCE'
gltf_image_default['uri'] = '../filepath.png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
gltf_image_default['uri'] = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACElEQVR42gMAAAAAAW'
'/dyZEAAAAASUVORK5CYII='
)
gltf_image_default['mimeType'] = 'image/png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed_glb(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
state['settings']['gltf_export_binary'] = True
gltf_image_default['mimeType'] = 'image/png'
gltf_image_default['bufferView'] = 'bufferView_buffer_Image_0'
output = exporters.ImageExporter.export(state, bpy_image_default)
for ref in state['references']:
ref.source[ref.prop] = ref.blender_name
assert output == gltf_image_default
def test_image_to_data_uri(exporters, bpy_image_default):
image_data = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\r'
b'IHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\x08'
b'IDATx\xda\x03\x00\x00\x00\x00\x01o\xdd\xc9\x91\x00\x00\x00\x00'
b'IEND\xaeB`\x82'
)
assert exporters.ImageExporter.image_to_data_uri(bpy_image_default) == image_data
def test_image_check(exporters, state, bpy_image_default):
assert exporters.ImageExporter.check(state, bpy_image_default)
def test_image_default(exporters, state, bpy_image_default):
assert exporters.ImageExporter.default(state, bpy_image_default) == {
'name': 'Image',
'uri': '',
}
def test_image_check_0_x(exporters, state, bpy_image_default):
bpy_image_default.size = [0, 1]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
<|fim▁hole|>
def test_image_check_0_y(exporters, state, bpy_image_default):
bpy_image_default.size = [1, 0]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_type(exporters, state, bpy_image_default):
bpy_image_default.type = 'NOT_IMAGE'
assert exporters.ImageExporter.check(state, bpy_image_default) is not True<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf8 -*-
from django.conf import settings
from django.contrib import auth
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from bootcamp.decorators import ajax_required
from registration.users import UserModel
from django.contrib.auth.models import User
from bootcamp.feeds.models import Feed
from django.core.context_processors import csrf
from django.template.loader import render_to_string
from django.shortcuts import render, redirect, get_object_or_404
import random
import json
FEEDS_NUM_PAGES = 20
MAJOR_VERSION = 0
MID_VERSION = 1
MIN_VERSION = 3
NOTE = """
更新内容:
1. 删除评论、帖子,取消赞扣分以防刷经验;
2. 增加修改资料功能;
"""
URL = "http://nqzx.net/media/ads/nqzx.apk"
def check_version(version):
ret = False
ls = version.split('.')
if MAJOR_VERSION > int(ls[0]):
ret = True
elif MID_VERSION > int(ls[1]):
ret = True
elif MIN_VERSION > int(ls[2]):
ret = True
else:
ret = False
return ret
def get_level(reputation):
if not reputation:
return 1;
if reputation < 5:
return 1
elif reputation < 15:
return 2
elif reputation < 30:
return 3
elif reputation < 50:
return 4
elif reputation < 100:
return 5
elif reputation < 200:
return 6
elif reputation < 500:
return 7
elif reputation < 1000:
return 8
elif reputation < 2000:
return 9
elif reputation < 3000:
return 10
elif reputation < 6000:
return 11
elif reputation < 10000:
return 12
elif reputation < 18000:
return 13
elif reputation < 30000:
return 14
elif reputation < 60000:
return 15
elif reputation < 100000:
return 16
elif reputation < 300000:
return 17
else:
return 18
@require_POST
@ajax_required
def login(request):
username = request.POST.get('account')
password = request.POST.get('password')
result = {"status": False, "data":""}
if not username or not password:
result = {"status": False, "data":"未收到用户名或密码!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
else:
result = {"status": False, "data":"["+username+"]已被暂时禁用"}
else:
result = {"status": False, "data":"用户名或密码不正确,请重试"}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def reg(request):
username = request.POST.get('account')
password = request.POST.get('password')
email = request.POST.get('email')
result = {"status": False, "data":""}
if not username or not password or not email:
result = {"status": False, "data":"未收到用户名、密码或者用户名!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if email=="" or email.isspace():
result = {"status": False, "data":"邮箱不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
# clean data
existing = UserModel().objects.filter(username__iexact=username)
if existing.exists():
result = {"status": False, "data":"用户名已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
if UserModel().objects.filter(email__iexact=email):
result = {"status": False, "data":"邮箱已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = UserModel().objects.create_user(username, email, password)
user.is_active = True
user.save()
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def get_state(request):
user = request.user
state = {"id": user.id, "username": user.username, "email": user.email, "location": user.profile.location, \
"mobile": user.profile.mobile, "reputation": user.profile.reputation,"first_name": user.first_name, \
"sex": user.profile.sex,"signdate": user.profile.signdate}
return HttpResponse(json.dumps(state), content_type="application/json")
@require_POST
@ajax_required
def set_state(request):
result = {"status": False, "data": {}}
userid = request.POST.get('userid')
user = User.objects.get(pk=userid)
if not user:
return HttpResponse(json.dumps(state), content_type="application/json")
first_name = request.POST.get('first_name')
location = request.POST.get('location')
mobile = request.POST.get('mobile')
reputation = request.POST.get('reputation')
sex = request.POST.get('sex')
signdate = request.POST.get('signdate')
if first_name:
user.first_name = first_name;
if location:
user.profile.location = location
if mobile:
user.profile.mobile = mobile
if reputation:
user.profile.reputation = reputation
if sex:
user.profile.sex = sex
if signdate:
user.profile.signdate = signdate
user.save()
result = {"status": True, "data": {"first_name": first_name, "sex": sex, \
"location":location,"mobile":mobile,"reputation":reputation,"signdate":signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
def get_feeds(request):
page = 1
feed_id = request.POST["feed_id"]
csrf_token = unicode(csrf(request)['csrf_token'])
html = u''
if feed_id:
feed = Feed.objects.get(pk=feed_id)
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
else:
feeds = Feed.get_feeds()
paginator = Paginator(feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(page)
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return HttpResponse(html)
@ajax_required
def checkupdate(request):
version = request.POST.get('version')
ret = {"status": check_version(version), "note": NOTE, "url": URL}
return HttpResponse(json.dumps(ret), content_type="application/json")
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = u''
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return html
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
rand_user = User.objects.get(pk=random.randint(318, 367))
csrf_token = unicode(csrf(request)['csrf_token'])
feed = Feed()
if user.id == 283:
feed.user = rand_user
else:
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
user.profile.reputation += 3
user.save()
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
active = request.GET.get('active')
feed_source = request.GET.get('feed_source')
if active and active != 'all':
all_feeds = Feed.get_feeds(from_feed, active)
else:
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except EmptyPage:
feeds = []
html = u''
csrf_token = unicode(csrf(request)['csrf_token'])
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'lvl': get_level(feed.user.profile.reputation),<|fim▁hole|> 'csrf_token': csrf_token
})
)
return HttpResponse(html)
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
user.profile.reputation += 2
user.save()
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})<|fim▁end|>
| |
<|file_name|>nofile.go<|end_file_name|><|fim▁begin|>package main
import (
"log"
"os/exec"
"strconv"
"strings"
)
var t_ipv4 string = " IPv4 "
var t_ipv6 string = " IPv6 "
var t_unix string = " unix "
//执行lsof命令
func execLSOF(result chan string) {
top := exec.Command("lsof", "-bw")
out, err := top.CombinedOutput()
if err != nil {
log.Println("execLSOF", err)
result <- ""
return
}
s := string(out)
result <- s
}
//分析给定进程ID在lsof的output的当前进程,打开网络连接,打开文件数,系统总打开文件数情况
func fetchLSOF(pid string, lsofout string) (nonetconn, nofile, sysnofile string) {
s := lsofout
if len(strings.TrimSpace(s)) == 0 {<|fim▁hole|> openfile := 0
opennetconn := 0
lines := strings.Split(s, "\n")
var nline string
for _, line := range lines {
line = strings.TrimSpace(line)
if len(line) > 0 {
sysopenfile += 1
}
if strings.Contains(line, " "+pid+" ") {
nline = line
} else {
continue
}
fields := strings.Split(nline, " ")
var newFields []string
//过滤掉空白field
for _, field := range fields {
field = strings.TrimSpace(field)
if len(field) == 0 {
continue
}
newFields = append(newFields, field)
}
if len(newFields) > 5 && newFields[1] == pid {
openfile += 1
if strings.Count(nline, t_ipv4) > 0 || strings.Count(nline, t_ipv6) > 0 || strings.Count(nline, t_unix) > 0 {
opennetconn += 1
}
}
}
return strconv.Itoa(opennetconn), strconv.Itoa(openfile), strconv.Itoa(sysopenfile)
}<|fim▁end|>
|
return "-2", "-2", "-2"
}
sysopenfile := -1 //lsof 第一行排除
|
<|file_name|>run.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Goal: Implement the application entry point.
@authors:
Andrei Sura <[email protected]>
"""
import argparse
from olass.olass_client import OlassClient
from olass.version import __version__
DEFAULT_SETTINGS_FILE = 'config/settings.py'
def main():
""" Read args """
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version",
default=False,
action='store_true',
help="Show the version number")
parser.add_argument("-c", "--config",
default=DEFAULT_SETTINGS_FILE,
help="Application config file")
parser.add_argument('--interactive',
default=True,
help="When `true` ask for confirmation")
parser.add_argument('--rows',
default=100,
help="Number of rows/batch sent to the server")
<|fim▁hole|> args = parser.parse_args()
if args.version:
import sys
print("olass, version {}".format(__version__))
sys.exit()
app = OlassClient(config_file=args.config,
interactive=args.interactive,
rows_per_batch=args.rows)
app.run()
if __name__ == "__main__":
main()<|fim▁end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.