prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>randomWalk.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|> Util = require('../../shared/util'),
Base = require('./base'),
Action = require('../../action'),
Types = require('./types');
var _parent = Base.prototype;
var RandomWalk = Util.extend(Base, {
properties: ['walkPropability'],
type: Types.RANDOM_WALK,
_walkPropability: 0.3,
create: function(config) {
var me = this;
me.getConfig(config, ['walkPropability']);
_parent.create.apply(me, arguments);
},
decide: function() {
var me = this;
if (Math.random() > me._walkPropability) {
return null;
}
switch (_.random(3)) {
case 0:
return new Action.Move({deltaX: 1});
case 1:
return new Action.Move({deltaX: -1});
case 2:
return new Action.Move({deltaY: 1});
case 3:
return new Action.Move({deltaY: -1});
}
return null;
}
});
module.exports = RandomWalk;<|fim▁end|>
|
var _ = require('underscore'),
|
<|file_name|>GW_RTC_SET_TIME_ZONE_REQ.ts<|end_file_name|><|fim▁begin|>"use strict";
import { GW_FRAME_REQ } from "./common";
export class GW_RTC_SET_TIME_ZONE_REQ extends GW_FRAME_REQ {
// /**
// * Creates an instance of GW_RTC_SET_TIME_ZONE_REQ.
// *
// * @param {string} [TimeZoneName="localtime"] Time zone name, e.g. IANA time zones. See https://github.com/SpiritIT/timezonecomplete/blob/master/doc/API.md for further information.
// * @memberof GW_RTC_SET_TIME_ZONE_REQ
// */
// constructor(readonly TimeZoneName: string = "localtime") {
// super();<|fim▁hole|> // // let timeZoneString = "";
// // // Get the time zone data
// // const tz = TimeZone.zone(this.TimeZoneName);
// // const Jan1st = DateTime.now(tz).startOfYear();
// // const currentYear = Jan1st.year();
// // if (tz.kind() !== TimeZoneKind.Proper) {
// // timeZoneString += `:${Jan1st.format("O")}`;
// // }
// // else if (!tz.hasDst()) {
// // // No daylight saving time at all -> simple offset
// // timeZoneString += `:${Jan1st.format("O")}`;
// // }
// // else {
// // const tzwoDST = TimeZone.zone(this.TimeZoneName, false); // Time zone without DST -> if offsets for Jan, 1st are the same, then we are on northern hemisphere -> 2 entries, else 3 entries
// // const startsWithRegularTime = tz.offsetForZone(currentYear, 1, 1, 0, 0, 0, 0) === tzwoDST.offsetForZone(currentYear, 1, 1, 0, 0, 0, 0);
// // const transitions = TzDatabase.instance().getTransitionsTotalOffsets(this.TimeZoneName, currentYear, currentYear + 1); // Get DST transitions for the current and the next year
// // if (transitions.length > 0 && new DateTime(transitions[0].at).year() < currentYear) {
// // transitions.shift();
// // }
// // if (startsWithRegularTime) {
// // const atDate = new DateTime(transitions[0].at, tz);
// // timeZoneString += `:${Jan1st.format("O")}:${atDate.format("O")}:${Jan1st.offset}`;
// // let isDST = true;
// // transitions.forEach(transition => {
// // // Write DST entries
// // const atDate = new DateTime(transition.at, tz);
// // let atDateUnspecific = atDate.withZone(undefined);
// // if (isDST)
// // {
// // atDateUnspecific = atDateUnspecific.sub(1, TimeUnit.Hour);
// // }
// // isDST = !isDST;
// // timeZoneString += `:${atDateUnspecific.format(":(yyyy)MMddHH")}`
// // });
// // }
// // else {
// // const atDate = new DateTime(transitions[0].at, tz);
// // timeZoneString += `:${atDate.format("O")}:${Jan1st.format("O")}:${atDate.offset}:(${currentYear})010100`;
// // let isDST = false;
// // transitions.forEach(transition => {
// // // Write DST entries
// // const atDate = new DateTime(transition.at, tz);
// // let atDateUnspecific = atDate.withZone(undefined);
// // if (isDST)
// // {
// // atDateUnspecific = atDateUnspecific.sub(1, TimeUnit.Hour);
// // }
// // isDST = !isDST;
// // timeZoneString += `:${atDateUnspecific.format(":MMddHH")}`
// // });
// // }
// // }
// // // Write resulting time zone string
// // buff.write(timeZoneString, 0, 64, "utf8");
// buff.write(TimeZoneName, 0, 64, "utf8");
// }
/**
* Creates an instance of GW_RTC_SET_TIME_ZONE_REQ.
*
* @param {string} [TimeZoneString] Time zone string, e.g. :GMT+1:GMT+2:0060:(1996)040102-0:110102-0
* @memberof GW_RTC_SET_TIME_ZONE_REQ
*/
constructor(readonly TimeZoneString: string) {
super(64);
const buff = this.Data.slice(this.offset); // View on the internal buffer makes setting the data easier
buff.write(TimeZoneString, 0, 64, "utf8");
}
}<|fim▁end|>
|
// const buff = this.Data.slice(this.offset); // View on the internal buffer makes setting the data easier
|
<|file_name|>ReturnSetExpression.java<|end_file_name|><|fim▁begin|>package usp.ime.line.ivprog.model.domain.actions;
import usp.ime.line.ivprog.model.components.datafactory.dataobjetcs.Expression;
import usp.ime.line.ivprog.model.components.datafactory.dataobjetcs.ReturnStatement;
import usp.ime.line.ivprog.model.domain.IVPDomainModel;
import ilm.framework.assignment.model.DomainAction;
import ilm.framework.domain.DomainModel;
public class ReturnSetExpression extends DomainAction {
private IVPDomainModel model = null;
private ReturnStatement rStatement = null;
private Expression returnedExpression = null;
private Expression lastReturned = null;
public ReturnSetExpression(String name, String description) {
super(name, description);
}
public void setDomainModel(DomainModel m) {
model = (IVPDomainModel)model;
}
protected void executeAction() {
lastReturned = model.setReturnExpression(returnedExpression, rStatement, _currentState);
}
protected void undoAction() {
model.setReturnExpression(lastReturned, rStatement, _currentState);
}
public boolean equals(DomainAction a) {
return false;
}
public ReturnStatement getReturnStatement() {
return rStatement;
}
public void setReturnStatement(ReturnStatement rStatement) {
this.rStatement = rStatement;
}
public Expression getReturnedExpression() {
return returnedExpression;
}
public void setReturnedExpression(Expression returnedExpression) {
this.returnedExpression = returnedExpression;
}
<|fim▁hole|>
}<|fim▁end|>
| |
<|file_name|>d2.rs<|end_file_name|><|fim▁begin|>fn run(program: Vec<usize>) -> usize {
let mut arr = program.clone();
let mut i = 0;
loop {
let op = arr[i];<|fim▁hole|> let a = arr[i + 1];
let b = arr[i + 2];
let to = arr[i + 3];
if op == 1 {
arr[to] = arr[a] + arr[b]
} else if op == 2 {
arr[to] = arr[a] * arr[b]
}
i += 4
}
}
fn main() {
let program: Vec<usize> = include_str!("./input.txt")
.split(",")
.map(|i| i.parse::<usize>().unwrap())
.collect();
// p1
let mut p1 = program.clone();
p1[1] = 12;
p1[2] = 2;
println!("{}", run(p1));
// p2
for x in 0..100 {
for y in 0..100 {
let mut p2 = program.clone();
p2[1] = x;
p2[2] = y;
if run(p2) == 19690720 {
println!("{}", 100 * x + y)
}
}
}
}<|fim▁end|>
|
if op == 99 {
return arr[0];
}
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import FileSystemStorage
from django.forms import Form
from django.template.response import SimpleTemplateResponse
from django.urls import NoReverseMatch
from formtools.wizard.views import SessionWizardView
from cms.models import Page
from cms.utils import get_current_site
from cms.utils.i18n import get_site_language_from_request
from .wizard_pool import wizard_pool
from .forms import (
WizardStep1Form,
WizardStep2BaseForm,
step2_form_factory,
)
class WizardCreateView(SessionWizardView):
template_name = 'cms/wizards/start.html'
file_storage = FileSystemStorage(
location=os.path.join(settings.MEDIA_ROOT, 'wizard_tmp_files'))
form_list = [
('0', WizardStep1Form),
# Form is used as a placeholder form.
# the real form will be loaded after step 0
('1', Form),
]
def dispatch(self, *args, **kwargs):
user = self.request.user
if not user.is_active or not user.is_staff:
raise PermissionDenied
self.site = get_current_site()
return super(WizardCreateView, self).dispatch(*args, **kwargs)
def get_current_step(self):
"""Returns the current step, if possible, else None."""
try:
return self.steps.current
except AttributeError:
return None
def is_first_step(self, step=None):
step = step or self.get_current_step()
return step == '0'
def is_second_step(self, step=None):
step = step or self.get_current_step()
return step == '1'
def get_context_data(self, **kwargs):<|fim▁hole|> return context
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
# We need to grab the page from pre-validated data so that the wizard
# has it to prepare the list of valid entries.
if data:
page_key = "{0}-page".format(step)
self.page_pk = data.get(page_key, None)
else:
self.page_pk = None
if self.is_second_step(step):
self.form_list[step] = self.get_step_2_form(step, data, files)
return super(WizardCreateView, self).get_form(step, data, files)
def get_form_kwargs(self, step=None):
"""This is called by self.get_form()"""
kwargs = super(WizardCreateView, self).get_form_kwargs()
kwargs['wizard_user'] = self.request.user
if self.is_second_step(step):
kwargs['wizard_page'] = self.get_origin_page()
kwargs['wizard_language'] = self.get_origin_language()
else:
page_pk = self.page_pk or self.request.GET.get('page', None)
if page_pk and page_pk != 'None':
kwargs['wizard_page'] = Page.objects.filter(pk=page_pk).first()
else:
kwargs['wizard_page'] = None
kwargs['wizard_language'] = get_site_language_from_request(
self.request,
site_id=self.site.pk,
)
return kwargs
def get_form_initial(self, step):
"""This is called by self.get_form()"""
initial = super(WizardCreateView, self).get_form_initial(step)
if self.is_first_step(step):
initial['page'] = self.request.GET.get('page')
initial['language'] = self.request.GET.get('language')
return initial
def get_step_2_form(self, step=None, data=None, files=None):
entry_form_class = self.get_selected_entry().form
step_2_base_form = self.get_step_2_base_form()
form = step2_form_factory(
mixin_cls=step_2_base_form,
entry_form_class=entry_form_class,
)
return form
def get_step_2_base_form(self):
"""
Returns the base form to be used for step 2.
This form is sub classed dynamically by the form defined per module.
"""
return WizardStep2BaseForm
def get_template_names(self):
if self.is_first_step():
template_name = self.template_name
else:
template_name = self.get_selected_entry().template_name
return template_name
def done(self, form_list, **kwargs):
"""
This step only runs if all forms are valid. Simply emits a simple
template that uses JS to redirect to the newly created object.
"""
form_one, form_two = list(form_list)
instance = form_two.save()
url = self.get_success_url(instance)
language = form_one.cleaned_data['language']
if not url:
page = self.get_origin_page()
if page:
try:
url = page.get_absolute_url(language)
except NoReverseMatch:
url = '/'
else:
url = '/'
return SimpleTemplateResponse("cms/wizards/done.html", {"url": url})
def get_selected_entry(self):
data = self.get_cleaned_data_for_step('0')
return wizard_pool.get_entry(data['entry'])
def get_origin_page(self):
data = self.get_cleaned_data_for_step('0')
return data.get('page')
def get_origin_language(self):
data = self.get_cleaned_data_for_step('0')
return data.get('language')
def get_success_url(self, instance):
entry = self.get_selected_entry()
language = self.get_origin_language()
success_url = entry.get_success_url(
obj=instance,
language=language,
)
return success_url<|fim▁end|>
|
context = super(WizardCreateView, self).get_context_data(**kwargs)
if self.is_second_step():
context['wizard_entry'] = self.get_selected_entry()
|
<|file_name|>language.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Freshermeat - An open source software directory and release tracker.
# Copyright (C) 2017-2020 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information: https://sr.ht/~cedric/freshermeat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,<|fim▁hole|># GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from freshermeat.bootstrap import manager
from freshermeat.models import Language
from freshermeat.web.views.api.v1.common import url_prefix
blueprint_language = manager.create_api_blueprint(
Language, url_prefix=url_prefix, methods=["GET"]
)<|fim▁end|>
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
<|file_name|>test_mongodb.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, unicode_literals
import datetime
import pytest
from case import MagicMock, call, patch, skip
from kombu import Connection
from kombu.five import Empty
def _create_mock_connection(url='', **kwargs):
from kombu.transport import mongodb # noqa
class _Channel(mongodb.Channel):
# reset _fanout_queues for each instance
_fanout_queues = {}
collections = {}
now = datetime.datetime.utcnow()
def _create_client(self):
mock = MagicMock(name='client')
# we need new mock object for every collection
def get_collection(name):
try:
return self.collections[name]
except KeyError:
mock = self.collections[name] = MagicMock(
name='collection:%s' % name)
return mock
mock.__getitem__.side_effect = get_collection
return mock
def get_now(self):
return self.now
class Transport(mongodb.Transport):
Channel = _Channel
return Connection(url, transport=Transport, **kwargs)
@skip.unless_module('pymongo')
class test_mongodb_uri_parsing:
def test_defaults(self):
url = 'mongodb://'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'kombu_default'
assert hostname == 'mongodb://127.0.0.1'
def test_custom_host(self):
url = 'mongodb://localhost'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'kombu_default'
def test_custom_database(self):
url = 'mongodb://localhost/dbname'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'dbname'
def test_custom_credentials(self):
url = 'mongodb://localhost/dbname'
channel = _create_mock_connection(
url, userid='foo', password='bar').default_channel
hostname, dbname, options = channel._parse_uri()
assert hostname == 'mongodb://foo:bar@localhost/dbname'
assert dbname == 'dbname'
def test_correct_readpreference(self):
url = 'mongodb://localhost/dbname?readpreference=nearest'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert options['readpreference'] == 'nearest'
class BaseMongoDBChannelCase:
def _get_method(self, cname, mname):
collection = getattr(self.channel, cname)
method = getattr(collection, mname.split('.', 1)[0])
for bit in mname.split('.')[1:]:
method = getattr(method.return_value, bit)
return method
def set_operation_return_value(self, cname, mname, *values):
method = self._get_method(cname, mname)
if len(values) == 1:
method.return_value = values[0]
else:
method.side_effect = values
def declare_droadcast_queue(self, queue):
self.channel.exchange_declare('fanout_exchange', type='fanout')
self.channel._queue_bind('fanout_exchange', 'foo', '*', queue)
assert queue in self.channel._broadcast_cursors
def get_broadcast(self, queue):
return self.channel._broadcast_cursors[queue]
def set_broadcast_return_value(self, queue, *values):
self.declare_droadcast_queue(queue)
cursor = MagicMock(name='cursor')
cursor.__iter__.return_value = iter(values)
self.channel._broadcast_cursors[queue]._cursor = iter(cursor)
def assert_collection_accessed(self, *collections):
self.channel.client.__getitem__.assert_has_calls(
[call(c) for c in collections], any_order=True)
def assert_operation_has_calls(self, cname, mname, calls, any_order=False):
method = self._get_method(cname, mname)
method.assert_has_calls(calls, any_order=any_order)
def assert_operation_called_with(self, cname, mname, *args, **kwargs):
self.assert_operation_has_calls(cname, mname, [call(*args, **kwargs)])
@skip.unless_module('pymongo')
class test_mongodb_channel(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection()
self.channel = self.connection.default_channel
# Tests for "public" channel interface
def test_new_queue(self):
self.channel._new_queue('foobar')
self.channel.client.assert_not_called()
def test_get(self):
import pymongo
self.set_operation_return_value('messages', 'find_and_modify', {
'_id': 'docId', 'payload': '{"some": "data"}',
})
event = self.channel._get('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'find_and_modify',
query={'queue': 'foobar'},
remove=True,
sort=[
('priority', pymongo.ASCENDING),
],
)
assert event == {'some': 'data'}
self.set_operation_return_value('messages', 'find_and_modify', None)
with pytest.raises(Empty):
self.channel._get('foobar')
def test_get_fanout(self):
self.set_broadcast_return_value('foobar', {
'_id': 'docId1', 'payload': '{"some": "data"}',
})
event = self.channel._get('foobar')
self.assert_collection_accessed('messages.broadcast')
assert event == {'some': 'data'}
with pytest.raises(Empty):
self.channel._get('foobar')
def test_put(self):
self.channel._put('foobar', {'some': 'data'})
self.assert_collection_accessed('messages')
self.assert_operation_called_with('messages', 'insert', {
'queue': 'foobar',
'priority': 9,
'payload': '{"some": "data"}',
})
def test_put_fanout(self):
self.declare_droadcast_queue('foobar')
self.channel._put_fanout('foobar', {'some': 'data'}, 'foo')
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with('broadcast', 'insert', {
'queue': 'foobar', 'payload': '{"some": "data"}',
})
def test_size(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._size('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'find', {'queue': 'foobar'},
)
assert result == 77
def test_size_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
cursor.get_size.return_value = 77
self.channel._broadcast_cursors['foobar'] = cursor
result = self.channel._size('foobar')
assert result == 77
def test_purge(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._purge('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'remove', {'queue': 'foobar'},
)
assert result == 77
def test_purge_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
cursor.get_size.return_value = 77
self.channel._broadcast_cursors['foobar'] = cursor
result = self.channel._purge('foobar')
cursor.purge.assert_any_call()
assert result == 77
def test_get_table(self):
state_table = [('foo', '*', 'foo')]
stored_table = [('bar', '*', 'bar')]
self.channel.exchange_declare('test_exchange')
self.channel.state.exchanges['test_exchange']['table'] = state_table
self.set_operation_return_value('routing', 'find', [{
'_id': 'docId',
'routing_key': stored_table[0][0],
'pattern': stored_table[0][1],
'queue': stored_table[0][2],
}])
result = self.channel.get_table('test_exchange')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'find', {'exchange': 'test_exchange'},
)
assert set(result) == frozenset(state_table) | frozenset(stored_table)
def test_queue_bind(self):
self.channel._queue_bind('test_exchange', 'foo', '*', 'foo')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},
upsert=True,
)
def test_queue_delete(self):
self.channel.queue_delete('foobar')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'remove', {'queue': 'foobar'},
)
def test_queue_delete_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
self.channel._broadcast_cursors['foobar'] = cursor
self.channel.queue_delete('foobar')
cursor.close.assert_any_call()
assert 'foobar' not in self.channel._broadcast_cursors
assert 'foobar' not in self.channel._fanout_queues
# Tests for channel internals
def test_create_broadcast(self):
self.channel._create_broadcast(self.channel.client)
self.channel.client.create_collection.assert_called_with(
'messages.broadcast', capped=True, size=100000,
)
def test_ensure_indexes(self):
self.channel._ensure_indexes(self.channel.client)
self.assert_operation_called_with(
'messages', 'ensure_index',
[('queue', 1), ('priority', 1), ('_id', 1)],
background=True,
)
self.assert_operation_called_with(
'broadcast', 'ensure_index',
[('queue', 1)],
)
self.assert_operation_called_with(
'routing', 'ensure_index', [('queue', 1), ('exchange', 1)],
)
def test_create_broadcast_cursor(self):
import pymongo
with patch.object(pymongo, 'version_tuple', (2, )):
self.channel._create_broadcast_cursor(
'fanout_exchange', 'foo', '*', 'foobar',
)
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with(
'broadcast', 'find',
tailable=True,
query={'queue': 'fanout_exchange'},
)
if pymongo.version_tuple >= (3, ):
self.channel._create_broadcast_cursor(
'fanout_exchange1', 'foo', '*', 'foobar',
)
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with(
'broadcast', 'find',
cursor_type=pymongo.CursorType.TAILABLE,
filter={'queue': 'fanout_exchange1'},
)
def test_open_rc_version(self):
import pymongo
def server_info(self):
return {'version': '3.6.0-rc'}
with patch.object(pymongo.MongoClient, 'server_info', server_info):
self.channel._open()
@skip.unless_module('pymongo')
class test_mongodb_channel_ttl(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection(
transport_options={'ttl': True},
)
self.channel = self.connection.default_channel
self.expire_at = (
self.channel.get_now() + datetime.timedelta(milliseconds=777))
# Tests
def test_new_queue(self):
self.channel._new_queue('foobar')
self.assert_operation_called_with(
'queues', 'update',
{'_id': 'foobar'},
{'_id': 'foobar', 'options': {}, 'expire_at': None},
upsert=True,
)
def test_get(self):
import pymongo
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.set_operation_return_value('messages', 'find_and_modify', {
'_id': 'docId', 'payload': '{"some": "data"}',
})
self.channel._get('foobar')
self.assert_collection_accessed('messages', 'messages.queues')
self.assert_operation_called_with(
'messages', 'find_and_modify',
query={'queue': 'foobar'},
remove=True,
sort=[
('priority', pymongo.ASCENDING),
],
)
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multiple=True,
)
def test_put(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-message-ttl': 777}},
})
self.channel._put('foobar', {'some': 'data'})
self.assert_collection_accessed('messages')
self.assert_operation_called_with('messages', 'insert', {
'queue': 'foobar',
'priority': 9,
'payload': '{"some": "data"}',
'expire_at': self.expire_at,
})
def test_queue_bind(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.channel._queue_bind('test_exchange', 'foo', '*', 'foo')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},<|fim▁hole|> )
def test_queue_delete(self):
self.channel.queue_delete('foobar')
self.assert_collection_accessed('messages.queues')
self.assert_operation_called_with(
'queues', 'remove', {'_id': 'foobar'})
def test_ensure_indexes(self):
self.channel._ensure_indexes(self.channel.client)
self.assert_operation_called_with(
'messages', 'ensure_index', [('expire_at', 1)],
expireAfterSeconds=0)
self.assert_operation_called_with(
'routing', 'ensure_index', [('expire_at', 1)],
expireAfterSeconds=0)
self.assert_operation_called_with(
'queues', 'ensure_index', [('expire_at', 1)], expireAfterSeconds=0)
def test_get_expire(self):
result = self.channel._get_expire(
{'arguments': {'x-expires': 777}}, 'x-expires')
self.channel.client.assert_not_called()
assert result == self.expire_at
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
result = self.channel._get_expire('foobar', 'x-expires')
assert result == self.expire_at
def test_update_queues_expire(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.channel._update_queues_expire('foobar')
self.assert_collection_accessed('messages.routing', 'messages.queues')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multiple=True,
)
self.assert_operation_called_with(
'queues', 'update',
{'_id': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multiple=True,
)
@skip.unless_module('pymongo')
class test_mongodb_channel_calc_queue_size(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection(
transport_options={'calc_queue_size': False})
self.channel = self.connection.default_channel
self.expire_at = (
self.channel.get_now() + datetime.timedelta(milliseconds=777))
# Tests
def test_size(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._size('foobar')
self.assert_operation_has_calls('messages', 'find', [])
assert result == 0<|fim▁end|>
|
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange',
'expire_at': self.expire_at},
upsert=True,
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
/*
* Regex
*/
var regex = {
facebookPattern: /(?:https?:\/\/)?(?:[\w\-]+\.)?facebook\.com\/(?:(?:\w)*#!\/)?(?:pages\/)?(?:[\w\-]*\/)*([\w\-\.]*)/, // jshint ignore:line
facebookPluginPattern: /.*%2F(\w+?)&/
};
module.exports = {
/**
* Returns the page id for a given URL
* e.g. : https://www.facebook.com/my_page_id => my_page_id
* http://www.facebook.com/pages/foo/Bar/123456 => 123456
*
* @param String URL FacebookURL
* @return Page ID
*/
getPageId: function (url) {
if (typeof url !== 'string') {
return null;
}
var match = url.replace(/\/$/, '').match(regex.facebookPattern);
if (match) {
var short_url = match[0],
id = match[1];
<|fim▁hole|> } else {
return id;
}
}
return null;
}
};<|fim▁end|>
|
if (/plugins/.test(short_url)) {
var likeMatch = regex.facebookPluginPattern.exec(match.input);
if (likeMatch) { return likeMatch[1]; }
|
<|file_name|>puput_initial_data.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from django import VERSION as DJANGO_VERSION
from django.contrib.contenttypes.models import ContentType
from django.core.management import BaseCommand
from wagtail.wagtailcore.models import Page, Site
class Command(BaseCommand):
help = "Load Puput initial dummy data"
def handle(self, *args, **options):
# Get blogpage content type
blogpage_content_type, created = ContentType.objects.get_or_create(
model='blogpage',
app_label='puput',
defaults={'name': 'page'} if DJANGO_VERSION < (1, 8) else {}
)
# Get root page
rootpage = Page.objects.first()
# Set site root page as root site page
site = Site.objects.first()
site.root_page = rootpage
site.save()<|fim▁hole|> title="Blog",
content_type=blogpage_content_type,
slug='blog',
)
# Add blog page as a child for homepage
rootpage.add_child(instance=blogpage)
revision = blogpage.save_revision()
revision.publish()<|fim▁end|>
|
# Create example blog page
blogpage = Page(
|
<|file_name|>msbuild.py<|end_file_name|><|fim▁begin|>import copy
import os
import re
import subprocess
from conans.client import tools
from conans.client.build.visual_environment import (VisualStudioBuildEnvironment,
vs_build_type_flags, vs_std_cpp)
from conans.client.tools.oss import cpu_count
from conans.client.tools.win import vcvars_command
from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.tools import vcvars_command as tools_vcvars_command
from conans.util.env_reader import get_env
from conans.util.files import decode_text, save
class MSBuild(object):
def __init__(self, conanfile):
if isinstance(conanfile, ConanFile):
self._conanfile = conanfile
self._settings = self._conanfile.settings
self._output = self._conanfile.output
self.build_env = VisualStudioBuildEnvironment(self._conanfile,
with_build_type_flags=False)
else: # backwards compatible with build_sln_command
self._settings = conanfile
self.build_env = None
def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,
vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,
property_file_name=None, verbosity=None, definitions=None):
"""
:param project_file: Path to the .sln file.
:param targets: List of targets to build.
:param upgrade_project: Will call devenv to upgrade the solution to your
current Visual Studio.
:param build_type: Use a custom build type instead of the default settings.build_type one.
:param arch: Use a custom architecture name instead of the settings.arch one.
It will be used to build the /p:Configuration= parameter of MSBuild.
It can be used as the key of the platforms parameter.
E.g. arch="x86", platforms={"x86": "i386"}
:param parallel: Will use the configured number of cores in the conan.conf file or
tools.cpu_count():
In the solution: Building the solution with the projects in parallel. (/m: parameter).
CL compiler: Building the sources in parallel. (/MP: compiler flag)
:param force_vcvars: Will ignore if the environment is already set for a different
Visual Studio version.
:param toolset: Specify a toolset. Will append a /p:PlatformToolset option.
:param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another
one. It is useful for Visual Studio solutions that have a different naming in architectures.
Example: platforms={"x86":"Win32"} (Visual solution uses "Win32" instead of "x86").
This dictionary will update the default one:
msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}
:param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.
:param vcvars_ver: Specifies the Visual Studio compiler toolset to use.
:param winsdk_version: Specifies the version of the Windows SDK to use.
:param properties: Dictionary with new properties, for each element in the dictionary
{name: value} it will append a /p:name="value" option.
:param output_binary_log: If set to True then MSBuild will output a binary log file
called msbuild.binlog in the working directory. It can also be used to set the name of
log file like this output_binary_log="my_log.binlog".
This parameter is only supported starting from MSBuild version 15.3 and onwards.
:param property_file_name: When None it will generate a file named conan_build.props.
You can specify a different name for the generated properties file.
:param verbosity: Specifies verbosity level (/verbosity: parameter)
:param definitions: Dictionary with additional compiler definitions to be applied during
the build. Use value of None to set compiler definition with no value.
:return: status code of the MSBuild command invocation
"""
property_file_name = property_file_name or "conan_build.props"
self.build_env.parallel = parallel
with tools.environment_append(self.build_env.vars):
# Path for custom properties file
props_file_contents = self._get_props_file_contents(definitions)
property_file_name = os.path.abspath(property_file_name)
save(property_file_name, props_file_contents)
vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,
vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,
output=self._output)
command = self.get_command(project_file, property_file_name,
targets=targets, upgrade_project=upgrade_project,
build_type=build_type, arch=arch, parallel=parallel,
toolset=toolset, platforms=platforms,
use_env=use_env, properties=properties,
output_binary_log=output_binary_log,
verbosity=verbosity)
command = "%s && %s" % (vcvars, command)
return self._conanfile.run(command)
def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,
build_type=None, arch=None, parallel=True, toolset=None, platforms=None,
use_env=False, properties=None, output_binary_log=None, verbosity=None):
targets = targets or []<|fim▁hole|>
if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
command.append('devenv "%s" /upgrade &&' % project_file)
else:
self._output.info("Skipped sln project upgrade")
build_type = build_type or self._settings.get_safe("build_type")
arch = arch or self._settings.get_safe("arch")
if toolset is None: # False value to skip adjusting
toolset = tools.msvs_toolset(self._settings)
verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal"
if not build_type:
raise ConanException("Cannot build_sln_command, build_type not defined")
if not arch:
raise ConanException("Cannot build_sln_command, arch not defined")
command.append('msbuild "%s" /p:Configuration="%s"' % (project_file, build_type))
msvc_arch = {'x86': 'x86',
'x86_64': 'x64',
'armv7': 'ARM',
'armv8': 'ARM64'}
if platforms:
msvc_arch.update(platforms)
msvc_arch = msvc_arch.get(str(arch))
if self._settings.get_safe("os") == "WindowsCE":
msvc_arch = self._settings.get_safe("os.platform")
try:
sln = tools.load(project_file)
pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)"
r"(.*?)EndGlobalSection", re.DOTALL)
solution_global = pattern.search(sln).group(1)
lines = solution_global.splitlines()
lines = [s.split("=")[0].strip() for s in lines]
except Exception:
pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?
else:
config = "%s|%s" % (build_type, msvc_arch)
if config not in "".join(lines):
self._output.warn("***** The configuration %s does not exist in this solution *****"
% config)
self._output.warn("Use 'platforms' argument to define your architectures")
if output_binary_log:
msbuild_version = MSBuild.get_version(self._settings)
if msbuild_version >= "15.3": # http://msbuildlog.com/
command.append('/bl' if isinstance(output_binary_log, bool)
else '/bl:"%s"' % output_binary_log)
else:
raise ConanException("MSBuild version detected (%s) does not support "
"'output_binary_log' ('/bl')" % msbuild_version)
if use_env:
command.append('/p:UseEnv=true')
if msvc_arch:
command.append('/p:Platform="%s"' % msvc_arch)
if parallel:
command.append('/m:%s' % cpu_count(output=self._output))
if targets:
command.append("/target:%s" % ";".join(targets))
if toolset:
command.append('/p:PlatformToolset="%s"' % toolset)
if verbosity:
command.append('/verbosity:%s' % verbosity)
if props_file_path:
command.append('/p:ForceImportBeforeCppTargets="%s"'
% os.path.abspath(props_file_path))
for name, value in properties.items():
command.append('/p:%s="%s"' % (name, value))
return " ".join(command)
def _get_props_file_contents(self, definitions=None):
def format_macro(name, value):
return "%s=%s" % (name, value) if value else name
# how to specify runtime in command line:
# https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project
runtime_library = {"MT": "MultiThreaded",
"MTd": "MultiThreadedDebug",
"MD": "MultiThreadedDLL",
"MDd": "MultiThreadedDebugDLL"}.get(
self._settings.get_safe("compiler.runtime"), "")
if self.build_env:
# Take the flags from the build env, the user was able to alter them if needed
flags = copy.copy(self.build_env.flags)
flags.append(self.build_env.std)
else: # To be removed when build_sln_command is deprecated
flags = vs_build_type_flags(self._settings, with_flags=False)
flags.append(vs_std_cpp(self._settings))
if definitions:
definitions = ";".join([format_macro(name, definitions[name]) for name in definitions])
flags_str = " ".join(list(filter(None, flags))) # Removes empty and None elements
additional_node = "<AdditionalOptions>" \
"{} %(AdditionalOptions)" \
"</AdditionalOptions>".format(flags_str) if flags_str else ""
runtime_node = "<RuntimeLibrary>" \
"{}" \
"</RuntimeLibrary>".format(runtime_library) if runtime_library else ""
definitions_node = "<PreprocessorDefinitions>" \
"{};%(PreprocessorDefinitions)" \
"</PreprocessorDefinitions>".format(definitions) if definitions else ""
template = """<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
{runtime_node}
{additional_node}
{definitions_node}
</ClCompile>
</ItemDefinitionGroup>
</Project>""".format(**{"runtime_node": runtime_node,
"additional_node": additional_node,
"definitions_node": definitions_node})
return template
@staticmethod
def get_version(settings):
msbuild_cmd = "msbuild -version"
vcvars = tools_vcvars_command(settings)
command = "%s && %s" % (vcvars, msbuild_cmd)
try:
out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()
version_line = decode_text(out).split("\n")[-1]
prog = re.compile("(\d+\.){2,3}\d+")
result = prog.match(version_line).group()
return Version(result)
except Exception as e:
raise ConanException("Error retrieving MSBuild version: '{}'".format(e))<|fim▁end|>
|
properties = properties or {}
command = []
|
<|file_name|>Protocol.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2006-2013 by OpenLayers Contributors (see authors.txt for
* full list of contributors). Published under the 2-clause BSD license.
* See license.txt in the OpenLayers distribution or repository for the
* full text of the license. */
/**
* @requires OpenLayers/BaseTypes/Class.js
*/
/**
* Class: OpenLayers.Protocol
* Abstract vector layer protocol class. Not to be instantiated directly. Use
* one of the protocol subclasses instead.
*/
OpenLayers.Protocol = OpenLayers.Class({
/**<|fim▁hole|> * {<OpenLayers.Format>} The format used by this protocol.
*/
format: null,
/**
* Property: options
* {Object} Any options sent to the constructor.
*/
options: null,
/**
* Property: autoDestroy
* {Boolean} The creator of the protocol can set autoDestroy to false
* to fully control when the protocol is destroyed. Defaults to
* true.
*/
autoDestroy: true,
/**
* Property: defaultFilter
* {<OpenLayers.Filter>} Optional default filter to read requests
*/
defaultFilter: null,
/**
* Constructor: OpenLayers.Protocol
* Abstract class for vector protocols. Create instances of a subclass.
*
* Parameters:
* options - {Object} Optional object whose properties will be set on the
* instance.
*/
initialize: function(options) {
options = options || {};
OpenLayers.Util.extend(this, options);
this.options = options;
},
/**
* Method: mergeWithDefaultFilter
* Merge filter passed to the read method with the default one
*
* Parameters:
* filter - {<OpenLayers.Filter>}
*/
mergeWithDefaultFilter: function(filter) {
var merged;
if (filter && this.defaultFilter) {
merged = new OpenLayers.Filter.Logical({
type: OpenLayers.Filter.Logical.AND,
filters: [this.defaultFilter, filter]
});
} else {
merged = filter || this.defaultFilter || undefined;
}
return merged;
},
/**
* APIMethod: destroy
* Clean up the protocol.
*/
destroy: function() {
this.options = null;
this.format = null;
},
/**
* APIMethod: read
* Construct a request for reading new features.
*
* Parameters:
* options - {Object} Optional object for configuring the request.
*
* Returns:
* {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response>
* object, the same object will be passed to the callback function passed
* if one exists in the options object.
*/
read: function(options) {
options = options || {};
options.filter = this.mergeWithDefaultFilter(options.filter);
},
/**
* APIMethod: create
* Construct a request for writing newly created features.
*
* Parameters:
* features - {Array({<OpenLayers.Feature.Vector>})} or
* {<OpenLayers.Feature.Vector>}
* options - {Object} Optional object for configuring the request.
*
* Returns:
* {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response>
* object, the same object will be passed to the callback function passed
* if one exists in the options object.
*/
create: function() {
},
/**
* APIMethod: update
* Construct a request updating modified features.
*
* Parameters:
* features - {Array({<OpenLayers.Feature.Vector>})} or
* {<OpenLayers.Feature.Vector>}
* options - {Object} Optional object for configuring the request.
*
* Returns:
* {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response>
* object, the same object will be passed to the callback function passed
* if one exists in the options object.
*/
update: function() {
},
/**
* APIMethod: delete
* Construct a request deleting a removed feature.
*
* Parameters:
* feature - {<OpenLayers.Feature.Vector>}
* options - {Object} Optional object for configuring the request.
*
* Returns:
* {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response>
* object, the same object will be passed to the callback function passed
* if one exists in the options object.
*/
"delete": function() {
},
/**
* APIMethod: commit
* Go over the features and for each take action
* based on the feature state. Possible actions are create,
* update and delete.
*
* Parameters:
* features - {Array({<OpenLayers.Feature.Vector>})}
* options - {Object} Object whose possible keys are "create", "update",
* "delete", "callback" and "scope", the values referenced by the
* first three are objects as passed to the "create", "update", and
* "delete" methods, the value referenced by the "callback" key is
* a function which is called when the commit operation is complete
* using the scope referenced by the "scope" key.
*
* Returns:
* {Array({<OpenLayers.Protocol.Response>})} An array of
* <OpenLayers.Protocol.Response> objects.
*/
commit: function() {
},
/**
* Method: abort
* Abort an ongoing request.
*
* Parameters:
* response - {<OpenLayers.Protocol.Response>}
*/
abort: function(response) {
},
/**
* Method: createCallback
* Returns a function that applies the given public method with resp and
* options arguments.
*
* Parameters:
* method - {Function} The method to be applied by the callback.
* response - {<OpenLayers.Protocol.Response>} The protocol response object.
* options - {Object} Options sent to the protocol method
*/
createCallback: function(method, response, options) {
return OpenLayers.Function.bind(function() {
method.apply(this, [response, options]);
}, this);
},
CLASS_NAME: "OpenLayers.Protocol"
});
/**
* Class: OpenLayers.Protocol.Response
* Protocols return Response objects to their users.
*/
OpenLayers.Protocol.Response = OpenLayers.Class({
/**
* Property: code
* {Number} - OpenLayers.Protocol.Response.SUCCESS or
* OpenLayers.Protocol.Response.FAILURE
*/
code: null,
/**
* Property: requestType
* {String} The type of request this response corresponds to. Either
* "create", "read", "update" or "delete".
*/
requestType: null,
/**
* Property: last
* {Boolean} - true if this is the last response expected in a commit,
* false otherwise, defaults to true.
*/
last: true,
/**
* Property: features
* {Array({<OpenLayers.Feature.Vector>})} or {<OpenLayers.Feature.Vector>}
* The features returned in the response by the server. Depending on the
* protocol's read payload, either features or data will be populated.
*/
features: null,
/**
* Property: data
* {Object}
* The data returned in the response by the server. Depending on the
* protocol's read payload, either features or data will be populated.
*/
data: null,
/**
* Property: reqFeatures
* {Array({<OpenLayers.Feature.Vector>})} or {<OpenLayers.Feature.Vector>}
* The features provided by the user and placed in the request by the
* protocol.
*/
reqFeatures: null,
/**
* Property: priv
*/
priv: null,
/**
* Property: error
* {Object} The error object in case a service exception was encountered.
*/
error: null,
/**
* Constructor: OpenLayers.Protocol.Response
*
* Parameters:
* options - {Object} Optional object whose properties will be set on the
* instance.
*/
initialize: function(options) {
OpenLayers.Util.extend(this, options);
},
/**
* Method: success
*
* Returns:
* {Boolean} - true on success, false otherwise
*/
success: function() {
return this.code > 0;
},
CLASS_NAME: "OpenLayers.Protocol.Response"
});
OpenLayers.Protocol.Response.SUCCESS = 1;
OpenLayers.Protocol.Response.FAILURE = 0;<|fim▁end|>
|
* Property: format
|
<|file_name|>profile-settings.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { NgForm } from '@angular/forms';
import * as firebase from 'firebase';
import { AuthService, AlertService, UserService } from '@shared';
@Component({
selector: 'app-profile-settings',
templateUrl: './profile-settings.component.html',
styleUrls: ['./profile-settings.component.scss']
})
export class ProfileSettingsComponent implements OnInit {
public uid = firebase.auth().currentUser.uid;
public displayName: string = 'Your username';
public bio: any = 'Your bio';
constructor(
private authService: AuthService,
private alertService: AlertService,
private userService: UserService) {
}
public ngOnInit(): Promise<void> {
return firebase.database().ref().child(`users/${this.uid}`).once('value').then((snap) => {
this.displayName = snap.val().displayName;
this.bio = snap.val().bio;
});
}
public onPasswordReset(): void {
this.userService.sendUserPasswordResetEmail();
this.alertService.showToaster('Reset password is sent to your email');
}
public onUpdateUserInfo(form: NgForm): void {
const displayName = form.value.displayName;
const bio = form.value.bio;
this.userService.updateUserInfo(firebase.auth().currentUser.uid, displayName, bio);
this.alertService.showToaster('Your settings are saved');
}
public onLogout(): void {<|fim▁hole|> this.authService.logout();
this.alertService.showToaster('Logout succesful');
}
}<|fim▁end|>
| |
<|file_name|>fileutil.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.util.contextutil import temporary_file
def atomic_copy(src, dst):<|fim▁hole|> with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.rename(tmp_dst.name, dst)
def create_size_estimators():
def line_count(filename):
with open(filename, 'rb') as fh:
return sum(1 for line in fh)
return {
'linecount': lambda srcs: sum(line_count(src) for src in srcs),
'filecount': lambda srcs: len(srcs),
'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs),
'nosize': lambda srcs: 0,
}<|fim▁end|>
|
"""Copy the file src to dst, overwriting dst atomically."""
|
<|file_name|>logging.js<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview This file contains additional helper definitions on top of the
* Google Closure Library's logging subsystem (see
* <http://google.github.io/closure-library/api/namespace_goog_log.html>).
*
* Aside from providing several helper functions, this file, when executed, sets
* up the logging subsystem parameters:
*
* * The logging level of the root logger is set up according to whether or not
* the compilation is performed in a debug mode (see
* <http://google.github.io/closure-library/api/namespace_goog.html#DEBUG>).
* * Log messages that bubbled till the root logger are emitted to the
* JavaScript Console.
* * Log messages are set up to be kept (probably, truncated) in a background
* page's log buffer, which allows to export them later.
*/
goog.provide('GoogleSmartCard.Logging');
goog.require('GoogleSmartCard.LogBuffer');
goog.require('GoogleSmartCard.Logging.CrashLoopDetection');
goog.require('goog.array');
goog.require('goog.asserts');
goog.require('goog.debug');
goog.require('goog.debug.Console');
goog.require('goog.log');
goog.require('goog.log.Level');
goog.require('goog.log.Logger');
goog.require('goog.object');
goog.scope(function() {
const GSC = GoogleSmartCard;
/**
* @define {boolean} Whether to make every logger created via this library a
* child of the |LOGGER_SCOPE|.
* Overriding it to false allows to reduce the boilerplate printed in every
* logged message; the default true value, on the other hand, allows to avoid
* clashes in case the extension creates and manages its own Closure Library
* loggers.
*/
GSC.Logging.USE_SCOPED_LOGGERS =
goog.define('GoogleSmartCard.Logging.USE_SCOPED_LOGGERS', true);
/**
* @define {boolean} Whether to trigger the extension reload in case a fatal
* error occurs in Release mode.
*/
GSC.Logging.SELF_RELOAD_ON_FATAL_ERROR =
goog.define('GoogleSmartCard.Logging.SELF_RELOAD_ON_FATAL_ERROR', false);
/**
* Every logger created via this library is created as a child of this logger,
* as long as the |USE_SCOPED_LOGGERS| constant is true. Ignored when that
* constant is false.
*/
const LOGGER_SCOPE = 'GoogleSmartCard';
/**
* The logging level that will be applied to the root logger (and therefore
* would be effective for all loggers unless the ones that have an explicitly
* set level).
*/
const ROOT_LOGGER_LEVEL =
goog.DEBUG ? goog.log.Level.FINE : goog.log.Level.INFO;
/**
* The capacity of the buffer that stores the emitted log messages.
*
* When the number of log messages exceeds this capacity, the messages from the
* middle will be removed (so only some first and some last messages will be
* kept at any given moment of time).
*/
const LOG_BUFFER_CAPACITY = goog.DEBUG ? 20 * 1000 : 2000;
/**
* This constant specifies the name of the special window attribute in which our
* log buffer is stored. This is used so that popup windows and other pages can
* access the background page's log buffer and therefore use a centralized place
* for aggregating logs.
*/
const GLOBAL_LOG_BUFFER_VARIABLE_NAME = 'googleSmartCard_logBuffer';
/**
* @type {!goog.log.Logger}
*/
const rootLogger =
goog.asserts.assert(goog.log.getLogger(goog.log.ROOT_LOGGER_NAME));
/**
* @type {!goog.log.Logger}
*/
const logger = GSC.Logging.USE_SCOPED_LOGGERS ?
goog.asserts.assert(goog.log.getLogger(LOGGER_SCOPE)) :
rootLogger;
/** @type {boolean} */
let wasLoggingSetUp = false;
/**
* The log buffer that aggregates all log messages, to let them be exported if
* the user requests so. This variable is initialized to a new `LogBuffer`
* instance, but if we're running outside the background page this variable is
* later reassigned to the background page's log buffer.
* @type {!GSC.LogBuffer}
*/
let logBuffer = new GSC.LogBuffer(LOG_BUFFER_CAPACITY);
/**
* Sets up logging parameters and log buffering.
*
* This function is called automatically when this library file is included.
*/
GSC.Logging.setupLogging = function() {
if (wasLoggingSetUp)
return;
wasLoggingSetUp = true;
setupConsoleLogging();
setupRootLoggerLevel();
goog.log.fine(
logger,
'Logging was set up with level=' + ROOT_LOGGER_LEVEL.name +
' and enabled logging to JS console');
setupLogBuffer();
};
/**
* Returns a logger.
* @param {string} name
* @param {!goog.log.Level=} opt_level
* @return {!goog.log.Logger}
*/
GSC.Logging.getLogger = function(name, opt_level) {
const logger = goog.log.getLogger(name, opt_level);
GSC.Logging.check(logger);
goog.asserts.assert(logger);
return logger;
};
/**
* Returns a library-scoped logger.
* @param {string} name
* @param {!goog.log.Level=} opt_level
* @return {!goog.log.Logger}
*/
GSC.Logging.getScopedLogger = function(name, opt_level) {
let fullName;
if (GSC.Logging.USE_SCOPED_LOGGERS && name)
fullName = `${LOGGER_SCOPE}.${name}`;
else if (GSC.Logging.USE_SCOPED_LOGGERS)
fullName = LOGGER_SCOPE;
else
fullName = name;
return GSC.Logging.getLogger(fullName, opt_level);
};
/**
* Returns the logger with the specified name relative to the specified parent
* logger.
* @param {!goog.log.Logger} parentLogger
* @param {string} relativeName
* @param {!goog.log.Level=} opt_level
* @return {!goog.log.Logger}
*/
GSC.Logging.getChildLogger = function(parentLogger, relativeName, opt_level) {
return GSC.Logging.getLogger(parentLogger.getName() + '.' + relativeName);
};
/**
* Changes the logger level so that the logger is not more verbose than the
* specified level.
* @param {!goog.log.Logger} logger
* @param {!goog.log.Level} boundaryLevel
*/
GSC.Logging.setLoggerVerbosityAtMost = function(logger, boundaryLevel) {
const effectiveLevel = goog.log.getEffectiveLevel(logger);
if (!effectiveLevel || effectiveLevel.value < boundaryLevel.value)
goog.log.setLevel(logger, boundaryLevel);
};
/**
* Checks if the condition evaluates to true.
*
* In contrast to goog.asserts.assert method, this method works in non-Debug
* builds too.
* @template T
* @param {T} condition The condition to check.
* @param {string=} opt_message Error message in case of failure.
*/
GSC.Logging.check = function(condition, opt_message) {
if (!condition)
GSC.Logging.fail(opt_message);
};
/**
* The same as GSC.Logging.check, but the message is prefixed with the logger
* title.
* @template T
* @param {!goog.log.Logger} logger The logger which name is to be prepended
* to the error message.
* @param {T} condition The condition to check.
* @param {string=} opt_message Error message in case of failure.
*/
GSC.Logging.checkWithLogger = function(logger, condition, opt_message) {
if (!condition)
GSC.Logging.failWithLogger(logger, opt_message);
};
/**
* Throws an exception and emits severe log with the specified message.
*
* In the release mode, this additionally asynchronously initiates the App
* reload, unless a crash-and-reload loop is detected.
* @param {string=} opt_message Error message in case of failure.
*/
GSC.Logging.fail = function(opt_message) {
const message = opt_message ? opt_message : 'Failure';
goog.log.error(rootLogger, message);
scheduleAppReloadIfAllowed();
throw new Error(message);
};
/**
* Same as GSC.Logging.fail, but the message is prefixed with the logger title.
* @param {!goog.log.Logger} logger The logger which name is to be prepended
* to the error message.
* @param {string=} opt_message Error message in case of failure.
*/
GSC.Logging.failWithLogger = function(logger, opt_message) {
const messagePrefix = 'Failure in ' + logger.getName();
if (opt_message !== undefined) {
const transformedMessage = messagePrefix + ': ' + opt_message;
GSC.Logging.fail(transformedMessage);
} else {
GSC.Logging.fail(messagePrefix);
}
};
/**
* Returns the log buffer instance.
*
* The log buffer instance was either created during this script execution, or
* was reused from the background page's global attribute.
* @return {!GSC.LogBuffer}
*/
GSC.Logging.getLogBuffer = function() {
return logBuffer;
};
function scheduleAppReloadIfAllowed() {
if (goog.DEBUG || !GSC.Logging.SELF_RELOAD_ON_FATAL_ERROR)
return;
GSC.Logging.CrashLoopDetection.handleImminentCrash()
.then(function(isInCrashLoop) {
if (isInCrashLoop) {
goog.log.info(
rootLogger,
'Crash loop detected. The application is defunct, but the ' +
'execution state is kept in order to retain the failure logs.');
return;
}
goog.log.info(
rootLogger, 'Reloading the application due to the fatal error...');<|fim▁hole|> });
}
function reloadApp() {
// This method works only in non-kiosk mode. Since this is a much more common
// case and as this function doesn't generate errors in any case, this method
// is called first.
chrome.runtime.reload();
// This method works only in kiosk mode.
chrome.runtime.restart();
}
function setupConsoleLogging() {
const console = new goog.debug.Console;
const formatter = console.getFormatter();
formatter.showAbsoluteTime = true;
formatter.showRelativeTime = false;
console.setCapturing(true);
}
function setupRootLoggerLevel() {
goog.log.setLevel(rootLogger, ROOT_LOGGER_LEVEL);
}
function setupLogBuffer() {
GSC.LogBuffer.attachBufferToLogger(
logBuffer, rootLogger, document.location.href);
if (!chrome || !chrome.runtime || !chrome.runtime.getBackgroundPage) {
// We don't know whether we're running inside the background page and
// the API for talking to it is unavailable - therefore no action needed,
// i.e., our page will continue using our log buffer. This should only
// happen in tests or if this code is running outside an app/extension.
return;
}
// Expose our log buffer in the global window properties. Pages other than the
// background will use it to access the background page's log buffer - see the
// code directly below.
goog.global[GLOBAL_LOG_BUFFER_VARIABLE_NAME] = logBuffer;
chrome.runtime.getBackgroundPage(function(backgroundPage) {
GSC.Logging.check(backgroundPage);
goog.asserts.assert(backgroundPage);
if (backgroundPage === window) {
// We're running inside the background page - no action needed.
return;
}
// We've discovered we're running outside the background page - so need to
// switch to using the background page's log buffer in order to keep all
// logs aggregated and available in a single place.
// First, obtain a reference to the background page's log buffer.
const backgroundLogBuffer =
/** @type {GSC.LogBuffer} */ (
backgroundPage[GLOBAL_LOG_BUFFER_VARIABLE_NAME]);
GSC.Logging.check(backgroundLogBuffer);
goog.asserts.assert(backgroundLogBuffer);
// Copy the logs we've accumulated in the current page into the background
// page's log buffer.
logBuffer.copyToOtherBuffer(backgroundLogBuffer);
// From now, start using the background page's buffer for collecting data
// from our page's loggers. Dispose of our log buffer to avoid storing new
// logs twice.
GSC.LogBuffer.attachBufferToLogger(
backgroundLogBuffer, rootLogger, document.location.href);
logBuffer.dispose();
// Switch our reference to the background page's log buffer.
logBuffer = backgroundLogBuffer;
// The global reference is not needed if we're not the background page.
delete goog.global[GLOBAL_LOG_BUFFER_VARIABLE_NAME];
});
}
GSC.Logging.setupLogging();
}); // goog.scope<|fim▁end|>
|
reloadApp();
})
.catch(function() {
// Don't do anything for repeated crashes within a single run.
|
<|file_name|>thisPropertyAssignmentComputed.ts<|end_file_name|><|fim▁begin|>// @allowjs: true
// @checkjs: true<|fim▁hole|>// @strict: true
// @filename: thisPropertyAssignmentComputed.js
this["a" + "b"] = 0<|fim▁end|>
|
// @noemit: true
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>extern crate ramp;
extern crate rpaillier;
use ramp::int::{ Int };
use rpaillier::KeyPairBuilder;
#[test]
fn test_encrypt_decrypt() {
let key_pair = KeyPairBuilder::new().bits(128).finalize();
let public_key = &key_pair.public_key;
let m = Int::from(37);
let c = public_key.encrypt(&m);
let a = key_pair.decrypt(&c);
assert_eq!(m, a);
}
#[test]
fn test_homomorphic_properties() {
let kp = KeyPairBuilder::new().bits(128).finalize();
<|fim▁hole|>
let c1 = pk.encrypt(&m1);
let c2 = pk.encrypt(&m2);
let add = (c1 * c2) % &pk.n_squared;
let e = m1 + m2;
let a = kp.decrypt(&add);
assert_eq!(a, e);
}<|fim▁end|>
|
let pk = &kp.public_key;
let m1 = Int::from(37);
let m2 = Int::from(132);
|
<|file_name|>unrelated_documents.py<|end_file_name|><|fim▁begin|>from odoo import api, fields, models, tools
class UnrelatedDocumentsReport(models.Model):
_name = "sicon.unrelated_documents.report"
_description = 'Documents not related yet to any concession'
_auto = False
dependence_id = fields.Many2one(comodel_name='tmc.dependence',
readonly=True)
document_type_id = fields.Many2one(comodel_name='tmc.document_type',
readonly=True)
number = fields.Integer(readonly=True)
period = fields.Integer(readonly=True)
document_object = fields.Char(readonly=True)
name = fields.Char(string='Document', readonly=True)
<|fim▁hole|> 'tmc.document': ['name', 'document_object', 'main_topic_ids'],
'sicon.event': ['document_id']
}
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""
CREATE OR REPLACE VIEW sicon_unrelated_documents_report AS (
SELECT
doc.id,
doc.document_object,
doc.name
FROM (
tmc_document doc
LEFT JOIN document_main_topic_rel rel
ON (rel.tmc_document_id = doc.id)
LEFT JOIN tmc_document_topic doc_topic
ON (rel.tmc_document_topic_id = doc_topic.id)
LEFT JOIN tmc_dependence dep
ON doc.dependence_id = dep.id
LEFT JOIN tmc_document_type doc_type
ON doc.document_type_id = doc_type.id
)
WHERE doc_topic.name = 'Concesiones Generales'
AND doc_type.abbreviation = 'DEC'
AND doc.id NOT IN (
SELECT
document_id
FROM sicon_event e WHERE document_id IS NOT NULL)
ORDER BY doc.period, doc.number
)
""")<|fim▁end|>
|
_depends = {
|
<|file_name|>nonuniform_random_number_generation.py<|end_file_name|><|fim▁begin|>import sys
import random
import collections
import itertools
import bisect
# @include
def nonuniform_random_number_generation(values, probabilities):
prefix_sum_of_probabilities = (
[0.0] + list(itertools.accumulate(probabilities)))
interval_idx = bisect.bisect(prefix_sum_of_probabilities,
random.random()) - 1
return values[interval_idx]
# @exclude
<|fim▁hole|> T = [float(i) for i in range(n)]
P = []
full_prob = 1.0
for i in range(n - 1):
pi = random.uniform(0.0, full_prob)
P.append(pi)
full_prob -= pi
P.append(full_prob)
print(*T)
print(*P)
print(nonuniform_random_number_generation(T, P))
# Test. Perform the nonuniform random number generation for n * k_times
# times and calculate the distribution of each bucket.
k_times = 100000
counts = collections.Counter(
int(nonuniform_random_number_generation(T, P))
for _ in range(n * k_times))
for i in range(n):
print(counts[i] / (n * k_times), P[i])
assert abs(counts[i] / (n * k_times) - P[i]) < 0.01
if __name__ == '__main__':
main()<|fim▁end|>
|
def main():
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 50)
|
<|file_name|>search-all.module.ts<|end_file_name|><|fim▁begin|>/*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.<|fim▁hole|>import {NgModule} from '@angular/core';
import {CommonModule} from '@angular/common';
import {SharedModule} from '../../../../shared/shared.module';
import {SearchAllComponent} from './search-all.component';
import {SearchCollectionsModule} from '../search-collections/search-collections.module';
import {SearchViewsModule} from '../search-views/search-views.module';
import {SearchTasksModule} from '../search-tasks/search-tasks.module';
@NgModule({
imports: [CommonModule, SharedModule, SearchCollectionsModule, SearchViewsModule, SearchTasksModule],
declarations: [SearchAllComponent],
exports: [SearchAllComponent],
})
export class SearchAllModule {}<|fim▁end|>
|
*/
|
<|file_name|>vec-slice.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or<|fim▁hole|>// except according to those terms.
// run-pass
pub fn main() {
let v = vec![1,2,3,4,5];
let v2 = &v[1..3];
assert_eq!(v2[0], 2);
assert_eq!(v2[1], 3);
}<|fim▁end|>
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
<|file_name|>client.ts<|end_file_name|><|fim▁begin|>// @public
import RavenType = require('raven-js');
import {Breadcrumb, CaptureOptions, User} from './interface';
export {Breadcrumb, CaptureOptions, User};
let output = {
enabled: false,
captureException(ex: Error, options?: CaptureOptions) {},
context<T>(fn: () => T): T {
return fn();
},
wrap<TFunc extends Function>(fn: TFunc): TFunc {
return fn;
},
setUserContext(user?: User) {},
captureMessage(message: string, options?: CaptureOptions) {},
captureBreadcrumb(crumb: Breadcrumb) {},
showReportDialog(options?: {eventId?: string}) {},
};
if (process.env.SENTRY_DSN_CLIENT || process.env.SENTRY_DSN) {
const Raven = require('raven-js') as typeof RavenType;
Raven.config(
(process.env.SENTRY_DSN_CLIENT || process.env.SENTRY_DSN)!,
).install();
(window as any).onunhandledrejection = function(e: any) {
if (Raven) {
Raven.captureException(e.reason);
}
};
output = {
enabled: true,
captureException(ex: Error, options?: CaptureOptions) {
Raven.captureException(ex, options);<|fim▁hole|> context<T>(fn: () => T): T {
return Raven.context(fn) as any;
},
wrap<TFunc extends Function>(fn: TFunc): TFunc {
return Raven.wrap(fn) as any;
},
setUserContext(user?: User) {
user ? Raven.setUserContext(user) : Raven.setUserContext();
},
captureMessage(message: string, options?: CaptureOptions) {
Raven.captureMessage(message, options);
},
captureBreadcrumb(crumb: Breadcrumb) {
Raven.captureBreadcrumb(crumb);
},
showReportDialog(options?: {eventId?: string}) {
Raven.showReportDialog(options);
},
};
}
export default output;
module.exports = output;
module.exports.default = output;<|fim▁end|>
|
},
|
<|file_name|>LToIntObj1Obj0FuncAttest.java<|end_file_name|><|fim▁begin|>/*
* This file is part of "lunisolar-magma".
*
* (C) Copyright 2014-2022 Lunisolar (http://lunisolar.eu/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*<|fim▁hole|> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.lunisolar.magma.asserts.func.function.to;
import eu.lunisolar.magma.asserts.func.FunctionalAttest.AssertionsCheck;
import eu.lunisolar.magma.asserts.func.FunctionalAttest.SemiEvaluation;
import eu.lunisolar.magma.func.supp.Be;
import eu.lunisolar.magma.asserts.func.FunctionalAttest;
import eu.lunisolar.magma.asserts.func.FunctionalAttest.*;
import javax.annotation.Nonnull; // NOSONAR
import javax.annotation.Nullable; // NOSONAR
import eu.lunisolar.magma.func.supp.check.Checks; // NOSONAR
import eu.lunisolar.magma.basics.meta.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.type.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.domain.*; // NOSONAR
import eu.lunisolar.magma.func.action.*; // NOSONAR
import java.util.function.*;
import eu.lunisolar.magma.func.function.to.*;
import eu.lunisolar.magma.func.action.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.bi.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.obj.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.tri.*; // NOSONAR
import eu.lunisolar.magma.func.function.*; // NOSONAR
import eu.lunisolar.magma.func.function.conversion.*; // NOSONAR
import eu.lunisolar.magma.func.function.from.*; // NOSONAR
import eu.lunisolar.magma.func.function.to.*; // NOSONAR
import eu.lunisolar.magma.func.operator.binary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.ternary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.unary.*; // NOSONAR
import eu.lunisolar.magma.func.predicate.*; // NOSONAR
import eu.lunisolar.magma.func.supplier.*; // NOSONAR
import eu.lunisolar.magma.func.function.to.LToIntBiFunction.*;
/** Assert class for LToIntObj1Obj0Func. */
public final class LToIntObj1Obj0FuncAttest<T2, T1> extends FunctionalAttest.Full<LToIntObj1Obj0FuncAttest<T2, T1>, LToIntObj1Obj0Func<T2, T1>, LBiConsumer<T2, T1>, Checks.CheckInt> {
public LToIntObj1Obj0FuncAttest(LToIntObj1Obj0Func<T2, T1> actual) {
super(actual);
}
@Nonnull
public static <T2, T1> LToIntObj1Obj0FuncAttest<T2, T1> attestToIntObj1Obj0Func(LToIntBiFunction.LToIntObj1Obj0Func<T2, T1> func) {
return new LToIntObj1Obj0FuncAttest(func);
}
@Nonnull
public IntEvaluation<LToIntObj1Obj0FuncAttest<T2, T1>, LBiConsumer<T2, T1>> doesApplyAsInt(T2 a2, T1 a1) {
return new IntEvaluation<LToIntObj1Obj0FuncAttest<T2, T1>, LBiConsumer<T2, T1>>(this, () -> String.format("(%s,%s)", a2, a1), (desc, pc) -> {
var func = value();
Checks.check(func).must(Be::notNull, "Actual function is null.");
if (pc != null) {
pc.accept(a2, a1);
}
var result = func.applyAsIntObj1Obj0(a2, a1);
return Checks.attest(result, desc);
}, recurringAssert);
}
}<|fim▁end|>
|
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
|
<|file_name|>posting.rs<|end_file_name|><|fim▁begin|>use amount::MixedAmount;
use transaction::Transaction;
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::Error;
use std::result::Result;
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ClearedStatus {
Uncleared,
Pending,
Cleared
}
impl Display for ClearedStatus {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
&ClearedStatus::Uncleared => write!(f, ""),
&ClearedStatus::Pending => write!(f, "!"),
&ClearedStatus::Cleared => write!(f, "*"),
}
}
}
#[derive(Clone, PartialEq, Eq)]
pub struct Tag(String, String);
#[derive(Clone, PartialEq, Eq)]
pub enum PostingType {
Regular,
Virtual,
BalancedVirtual
}
#[derive(Clone, PartialEq, Eq)]
pub struct Posting {
pub status: ClearedStatus,
pub amount: MixedAmount,
pub posting_type: PostingType,
pub tags: Vec<Tag>,
pub balance_assertion: Option<MixedAmount>,
pub transaction: Option<Transaction>
}
impl Posting {
pub fn is_real(&self) -> bool {<|fim▁hole|> self.posting_type == PostingType::Regular
}
pub fn is_virtual(&self) -> bool {
self.posting_type == PostingType::Virtual
}
pub fn related_postings(&self) -> Vec<Posting> {
match self.transaction.clone() {
Some(t) => t.postings.iter().filter(|&x| x != self).map(|x| x.clone()).collect(),
_ => vec!()
}
}
pub fn sum_postings(postings: Vec<Posting>) -> MixedAmount {
postings.iter().map(|x| x.clone().amount).sum()
}
pub fn status(&self) -> ClearedStatus {
match self.status.clone() {
ClearedStatus::Uncleared => match self.transaction.clone() {
Some(t) => t.status,
_ => ClearedStatus::Uncleared
},
s => s
}
}
pub fn all_tags(&self) -> Vec<Tag> {
self.tags.iter().chain(match self.transaction.clone() {
Some(t) => t.tags,
_ => vec!()
}.iter()).map(|x| x.clone()).collect()
}
}<|fim▁end|>
| |
<|file_name|>CompanyWS.java<|end_file_name|><|fim▁begin|>/*
* JBILLING CONFIDENTIAL
* _____________________
*
* [2003] - [2012] Enterprise jBilling Software Ltd.
* All Rights Reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of Enterprise jBilling Software.
* The intellectual and technical concepts contained
* herein are proprietary to Enterprise jBilling Software
* and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden.
*/
package com.sapienter.jbilling.server.user;
import com.sapienter.jbilling.server.user.contact.db.ContactDTO;
import com.sapienter.jbilling.server.user.db.CompanyDAS;
import com.sapienter.jbilling.server.user.db.CompanyDTO;
import com.sapienter.jbilling.server.util.db.CurrencyDAS;
import com.sapienter.jbilling.server.util.db.CurrencyDTO;
import com.sapienter.jbilling.server.util.db.LanguageDAS;
import javax.validation.Valid;
import javax.validation.constraints.Size;
public class CompanyWS implements java.io.Serializable {
private int id;
private Integer currencyId;
private Integer languageId;
@Size(min = 0, max = 100, message = "validation.error.size,0,100")
private String description;
@Valid
private ContactWS contact;
public CompanyWS() {
}
public CompanyWS(int i) {
id = i;
}
public CompanyWS(CompanyDTO companyDto) {
this.id = companyDto.getId();
this.currencyId= companyDto.getCurrencyId();
this.languageId = companyDto.getLanguageId();
this.description = companyDto.getDescription();
ContactDTO contact = new EntityBL(Integer.valueOf(this.id)).getContact();
if (contact != null) {
this.contact = new ContactWS(contact.getId(),
contact.getAddress1(),
contact.getAddress2(),
contact.getCity(),
contact.getStateProvince(),
contact.getPostalCode(),
contact.getCountryCode(),
contact.getDeleted());
}
}
public CompanyDTO getDTO(){
CompanyDTO dto = new CompanyDAS().find(Integer.valueOf(this.id));<|fim▁hole|> }
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Integer getCurrencyId() {
return currencyId;
}
public void setCurrencyId(Integer currencyId) {
this.currencyId = currencyId;
}
public Integer getLanguageId() {
return languageId;
}
public void setLanguageId(Integer languageId) {
this.languageId = languageId;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public ContactWS getContact() {
return contact;
}
public void setContact(ContactWS contact) {
this.contact = contact;
}
public String toString() {
return "CompanyWS [id=" + id + ", currencyId=" + currencyId
+ ", languageId=" + languageId + ", description=" + description
+ ", contact=" + contact + "]";
}
}<|fim▁end|>
|
dto.setCurrency(new CurrencyDAS().find(this.currencyId));
dto.setLanguage(new LanguageDAS().find(this.languageId));
dto.setDescription(this.description);
return dto;
|
<|file_name|>SubscriberCallback.java<|end_file_name|><|fim▁begin|>package com.thilinamb.mqtt.client.sub;
import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken;
import org.eclipse.paho.client.mqttv3.MqttCallback;
import org.eclipse.paho.client.mqttv3.MqttMessage;
import java.util.logging.Logger;
/**
* Subscriber callback
* Author: Thilina
* Date: 7/19/14
*/
public class SubscriberCallback implements MqttCallback {
private static Logger logger = Logger.getLogger(SubscriberCallback.class.getName());
@Override<|fim▁hole|> }
@Override
public void messageArrived(String s, MqttMessage mqttMessage) throws Exception {
logger.info("Message Arrived. Topic: " + s + ", Message: " + new String(mqttMessage.getPayload()));
}
@Override
public void deliveryComplete(IMqttDeliveryToken iMqttDeliveryToken) {
}
}<|fim▁end|>
|
public void connectionLost(Throwable throwable) {
logger.warning("Connection Lost!");
|
<|file_name|>secondary.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Secondary movement commands."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from .. import Command, COMMANDS
from ...characters import CharacterShell
@COMMANDS.register
class NortheastCommand(Command):
"""A command to allow a character to move northeast."""
def _action(self):
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
return
if not char.room:
self.session.send("You're not in a room!")
return
char.move_direction(x=1, y=1)
@COMMANDS.register
class NorthwestCommand(Command):
"""A command to allow a character to move northwest."""
def _action(self):
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
return
if not char.room:
self.session.send("You're not in a room!")
return
char.move_direction(x=-1, y=1)
@COMMANDS.register
class SoutheastCommand(Command):
"""A command to allow a character to move southeast."""
def _action(self):
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
return
if not char.room:
self.session.send("You're not in a room!")
return
char.move_direction(x=1, y=-1)
@COMMANDS.register
class SouthwestCommand(Command):
"""A command to allow a character to move southwest."""
def _action(self):<|fim▁hole|> return
if not char.room:
self.session.send("You're not in a room!")
return
char.move_direction(x=-1, y=-1)
CharacterShell.add_verbs(NortheastCommand, "northeast", "ne")
CharacterShell.add_verbs(NorthwestCommand, "northwest", "nw")
CharacterShell.add_verbs(SoutheastCommand, "southeast", "se")
CharacterShell.add_verbs(SouthwestCommand, "southwest", "sw")<|fim▁end|>
|
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
|
<|file_name|>Model_mod.py<|end_file_name|><|fim▁begin|>"""
Model class that unites theory with data.
"""
import logging
logger = logging.getLogger('Model_mod')
import copy
import scipy
import SloppyCell
import SloppyCell.Residuals as Residuals
import SloppyCell.Collections as Collections
import SloppyCell.Utility as Utility
from . import KeyedList_mod as KeyedList_mod
KeyedList = KeyedList_mod.KeyedList
_double_epsilon_ = scipy.finfo(scipy.float_).eps
class Model:
"""
A Model object connects a set of experimental data with the objects used to
model that data.
Most importantly, a Model can calculate a cost for a given set of
parameters, characterizing how well those parameters fit the data contained
within the model.
"""
imag_cutoff = 1e-8
def __init__(self, expts, calcs):
"""
expts A sequence of Experiments to be fit to.
calcs A sequence of calculation objects referred to by the
Experiments.
"""
self.calcVals = {}
self.calcSensitivityVals = {}
self.internalVars = {}
self.internalVarsDerivs = {}
self.residuals = KeyedList()
if isinstance(expts, list):
expts = Collections.ExperimentCollection(expts)
elif isinstance(expts, dict):
expts = Collections.ExperimentCollection(expts.values())
self.SetExperimentCollection(expts)
<|fim▁hole|> calcs = Collections.CalculationCollection(calcs)
elif isinstance(calcs, dict):
calcs = Collections.CalculationCollection(calcs.values())
self.SetCalculationCollection(calcs)
self.observers = KeyedList()
self.parameter_bounds = {}
def compile(self):
"""
Compile all the calculations contained within the Model.
"""
for calc in self.get_calcs().values():
calc.compile()
def copy(self):
return copy.deepcopy(self)
def get_params(self):
"""
Return a copy of the current model parameters
"""
return self.calcColl.GetParameters()
def get_ICs(self):
"""
Get the initial conditions currently present in a model
for dynamic variables that are not assigned variables.
Outputs:
KeyedList with keys (calcName,varName) --> initialValue
"""
ics=KeyedList()
for calcName, calc in self.calcColl.items():
for varName in calc.dynamicVars.keys():
if varName in calc.assignedVars.keys(): continue
ics.set( (calcName,varName), calc.get_var_ic(varName))
return ics
def set_ICs(self, ics):
"""
Sets the initial conditions into the model. Uses the input
format defined by 'getICs'.
Inputs:
ics -- Initial conditions to set in KeyedList form:
keys: (calcName, varName) --> intialValue
Outputs:
None
"""
for (calcName, varName), initialValue in ics.items():
self.calcColl.get(calcName).set_var_ic(varName, initialValue)
def _evaluate(self, params, T=1):
"""
Evaluate the cost for the model, returning the intermediate residuals,
and chi-squared.
(Summing up the residuals is a negligible amount of work. This
arrangment makes notification of observers much simpler.)
"""
self.params.update(params)
self.check_parameter_bounds(params)
self.CalculateForAllDataPoints(params)
self.ComputeInternalVariables(T)
resvals = [res.GetValue(self.calcVals, self.internalVars, self.params)
for res in self.residuals.values()]
# Occasionally it's useful to use residuals with a sqrt(-1) in them,
# to get negative squares. Then, however, we might get small imaginary
# parts in our results, which this shaves off.
chisq = scipy.real_if_close(scipy.sum(scipy.asarray(resvals)**2),
tol=self.imag_cutoff)
if scipy.isnan(chisq):
logger.warn('Chi^2 is NaN, converting to Infinity.')
chisq = scipy.inf
cost = 0.5 * chisq
entropy = 0
for expt, sf_ents in self.internalVars['scaleFactor_entropies'].items():
for group, ent in sf_ents.items():
entropy += ent
self._notify(event = 'evaluation',
resvals = resvals,
chisq = chisq,
cost = cost,
free_energy = cost-T*entropy,
entropy = entropy,
params = self.params)
return resvals, chisq, cost, entropy
def res(self, params):
"""
Return the residual values of the model fit given a set of parameters
"""
return self._evaluate(params)[0]
def res_log_params(self, log_params):
"""
Return the residual values given the logarithm of the parameters
"""
return self.res(scipy.exp(log_params))
def res_dict(self, params):
"""
Return the residual values of the model fit given a set of parameters
in dictionary form.
"""
return dict(zip(self.residuals.keys(), self.res(params)))
def chisq(self, params):
"""
Return the sum of the squares of the residuals for the model
"""
return self._evaluate(params)[1]
def redchisq(self, params):
"""
Return chi-squared divided by the number of degrees of freedom
Question: Are priors to be included in the N data points?
How do scale factors change the number of d.o.f.?
"""
return self.chisq(params)/(len(self.residuals) - len(self.params))
def cost(self, params):
"""
Return the cost (1/2 chisq) of the model
"""
return self._evaluate(params)[2]
def cost_log_params(self, log_params):
"""
Return the cost given the logarithm of the input parameters
"""
return self.cost(scipy.exp(log_params))
def free_energy(self, params, T):
temp, temp, c, entropy = self._evaluate(params, T=T)
return c - T * entropy
def _notify(self, **args):
"""
Call all observers with the given arguments.
"""
for obs in self.observers:
obs(**args)
def attach_observer(self, obs_key, observer):
"""
Add an observer to be notified by this Model.
"""
self.observers.set(obs_key, observer)
def detach_observer(self, obs_key):
"""
Remove an observer from the Model.
"""
self.observers.remove_by_key(obs_key)
def get_observers(self):
"""
Return the KeyedList of observers for this model.
"""
return self.observers
def reset_observers(self):
"""
Call reset() for all attached observers.
"""
for obs in self.observers:
if hasattr(obs, 'reset'):
obs.reset()
resDict = res_dict
# ...
def AddResidual(self, res):
self.residuals.setByKey(res.key, res)
def Force(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Force(parameters, epsilon factor) -> list
Returns a list containing the numerical gradient of the cost with
respect to each parameter (in the parameter order of the
CalculationCollection). Each element of the gradient is:
cost(param + eps) - cost(param - eps)/(2 * eps).
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
force = []
params = scipy.array(params)
if stepSizeCutoff==None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale is True:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
for i in range(0,len(eps)):
if eps[i] < stepSizeCutoff:
eps[i] = stepSizeCutoff
for index, param in enumerate(params):
paramsPlus = params.copy()
paramsPlus[index] = param + eps[index]
costPlus = self.cost(paramsPlus)
paramsMinus = params.copy()
paramsMinus[index] = param - eps[index]
costMinus = self.cost(paramsMinus)
force.append((costPlus-costMinus)/(2.0*eps[index]))
return force
def gradient_sens(self, params):
"""
Return the gradient of the cost, d_cost/d_param as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
self.params.update(params)
# The cost is 0.5 * sum(res**2),
# so the gradient is sum(res * dres_dp)
jac_dict = self.jacobian_sens(params)
res_dict = self.res_dict(params)
force = scipy.zeros(len(params), scipy.float_)
for res_key, res_val in res_dict.items():
res_derivs = jac_dict.get(res_key)
force += res_val * scipy.asarray(res_derivs)
gradient = self.params.copy()
gradient.update(force)
return gradient
def gradient_log_params_sens(self, log_params):
"""
Return the gradient of the cost wrt log parameters, d_cost/d_log_param
as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
# We just need to multiply dcost_dp by p.
params = scipy.exp(log_params)
gradient = self.gradient_sens(params)
gradient_log = gradient.copy()
gradient_log.update(scipy.asarray(gradient) * scipy.asarray(params))
return gradient_log
def CalculateForAllDataPoints(self, params):
"""
CalculateForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled] -> calculated value.
"""
self.params.update(params)
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals = self.GetCalculationCollection().Calculate(varsByCalc,
params)
return self.calcVals
def CalculateSensitivitiesForAllDataPoints(self, params):
"""
CalculateSensitivitiesForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled][parameter] -> sensitivity.
"""
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals, self.calcSensitivityVals =\
self.GetCalculationCollection().CalculateSensitivity(varsByCalc,
params)
return self.calcSensitivityVals
def ComputeInternalVariables(self, T=1):
sf, sf_ents = self.compute_scale_factors(T)
self.internalVars['scaleFactors'] = sf
self.internalVars['scaleFactor_entropies'] = sf_ents
def compute_scale_factors(self, T):
"""
Compute the scale factors for the current parameters and return a dict.
The dictionary is of the form dict[exptId][varId] = scale_factor
"""
scale_factors = {}
scale_factor_entropies = {}
for exptId, expt in self.GetExperimentCollection().items():
scale_factors[exptId], scale_factor_entropies[exptId] =\
self._compute_sf_and_sfent_for_expt(expt, T)
return scale_factors, scale_factor_entropies
def _compute_sf_and_sfent_for_expt(self, expt, T):
# Compute the scale factors for a given experiment
scale_factors = {}
scale_factor_entropies = {}
exptData = expt.GetData()
expt_integral_data = expt.GetIntegralDataSets()
fixed_sf = expt.get_fixed_sf()
sf_groups = expt.get_sf_groups()
for group in sf_groups:
# Do any of the variables in this group have fixed scale factors?
fixed = set(group).intersection(set(fixed_sf.keys()))
fixedAt = set([fixed_sf[var] for var in fixed])
# We'll need to index the scale factor entropies on the *group*
# that shares a scale factor, since we only have one entropy per
# shared scale factor. So we need to index on the group of
# variables. We sort the group and make it hashable to avoid any
# double-counting.
hash_group = expt._hashable_group(group)
if len(fixedAt) == 1:
value = fixedAt.pop()
for var in group:
scale_factors[var] = value
scale_factor_entropies[hash_group] = 0
continue
elif len(fixedAt) > 1:
raise ValueError('Shared scale factors fixed at '
'inconsistent values in experiment '
'%s!' % expt.GetName())
# Finally, compute the scale factor for this group
theoryDotData, theoryDotTheory = 0, 0
# For discrete data
for calc in exptData:
# Pull out the vars we have measured for this calculation
for var in set(group).intersection(set(exptData[calc].keys())):
for indVar, (data, error) in exptData[calc][var].items():
theory = self.calcVals[calc][var][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# Now for integral data
for dataset in expt_integral_data:
calc = dataset['calcKey']
theory_traj = self.calcVals[calc]['full trajectory']
data_traj = dataset['trajectory']
uncert_traj = dataset['uncert_traj']
interval = dataset['interval']
T = interval[1] - interval[0]
for var in group.intersection(set(dataset['vars'])):
TheorDotT = self._integral_theorytheory(var, theory_traj,
uncert_traj,
interval)
theoryDotTheory += TheorDotT/T
TheorDotD = self._integral_theorydata(var, theory_traj,
data_traj,
uncert_traj,
interval)
theoryDotData += TheorDotD/T
# Now for the extrema data
for ds in expt.scaled_extrema_data:
calc = ds['calcKey']
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[calc][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for var in group:
if theoryDotTheory != 0:
scale_factors[var] = theoryDotData/theoryDotTheory
else:
scale_factors[var] = 1
entropy = expt.compute_sf_entropy(hash_group, theoryDotTheory,
theoryDotData, T)
scale_factor_entropies[hash_group] = entropy
return scale_factors, scale_factor_entropies
def _integral_theorytheory(self, var, theory_traj, uncert_traj, interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory**2/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def _integral_theorydata(self, var, theory_traj, data_traj, uncert_traj,
interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
data = data_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory*data/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def ComputeInternalVariableDerivs(self):
"""
compute_scale_factorsDerivs() -> dictionary
Returns the scale factor derivatives w.r.t. parameters
appropriate for each chemical in each
experiment, given the current parameters. The returned dictionary
is of the form: internalVarsDerivs['scaleFactors'] \
= dict[experiment][chemical][parametername] -> derivative.
"""
self.internalVarsDerivs['scaleFactors'] = {}
p = self.GetCalculationCollection().GetParameters()
for exptName, expt in self.GetExperimentCollection().items():
self.internalVarsDerivs['scaleFactors'][exptName] = {}
exptData = expt.GetData()
# Get the dependent variables measured in this experiment
exptDepVars = set()
for calc in exptData:
exptDepVars.update(set(expt.GetData()[calc].keys()))
# Now for the extrema data
for ds in expt.scaled_extrema_data:
exptDepVars.add(ds['var'])
for depVar in exptDepVars:
self.internalVarsDerivs['scaleFactors'][exptName][depVar] = {}
if depVar in expt.GetFixedScaleFactors():
for pname in p.keys():
self.internalVarsDerivs['scaleFactors'][exptName]\
[depVar][pname] = 0.0
continue
theoryDotData, theoryDotTheory = 0, 0
for calc in exptData:
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# now get derivative of the scalefactor
for pname in p.keys():
theorysensDotData, theorysensDotTheory = 0, 0
for calc in exptData:
clc = self.calcColl.get(calc)
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
# Default to 0 if sensitivity not calculated for
# that parameter (i.e. it's not in the
# Calculation)
theorysens = self.calcSensitivityVals[calc][depVar][indVar].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
data, error = ds['val'], ds['sigma']
theorysens = self.calcSensitivityVals[ds['calcKey']][var][ds['minTime'],ds['maxTime']].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
deriv_dict = self.internalVarsDerivs['scaleFactors'][exptName][depVar]
try:
deriv_dict[pname] = theorysensDotData/theoryDotTheory\
- 2*theoryDotData*theorysensDotTheory/(theoryDotTheory)**2
except ZeroDivisionError:
deriv_dict[pname] = 0
return self.internalVarsDerivs['scaleFactors']
def jacobian_log_params_sens(self, log_params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
the lograithms of the parameters parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl.get(resId) = [dres/dlogp1, dres/dlogp2...]
"""
params = scipy.exp(log_params)
j = self.jacobian_sens(params)
j_log = j.copy()
j_log.update(scipy.asarray(j) * scipy.asarray(params))
return j_log
def jacobian_sens(self, params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl[resId] = [dres/dp1, dres/dp2...]
"""
self.params.update(params)
# Calculate sensitivities
self.CalculateSensitivitiesForAllDataPoints(params)
self.ComputeInternalVariables()
self.ComputeInternalVariableDerivs()
# Calculate residual derivatives
deriv = [(resId, res.Dp(self.calcVals, self.calcSensitivityVals,
self.internalVars, self.internalVarsDerivs,
self.params))
for (resId, res) in self.residuals.items()]
return KeyedList(deriv)
def jacobian_fd(self, params, eps,
relativeScale=False, stepSizeCutoff=None):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses finite differences.
Inputs:
params -- Parameters about which to calculate the jacobian
eps -- Step size to take, may be vector or scalar.
relativeScale -- If true, the eps is taken to be the fractional
change in parameter to use in finite differences.
stepSizeCutoff -- Minimum step size to take.
"""
res = self.resDict(params)
orig_vals = scipy.array(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale:
eps_l = scipy.maximum(eps * abs(params), stepSizeCutoff)
else:
eps_l = scipy.maximum(eps * scipy.ones(len(params),scipy.float_),
stepSizeCutoff)
J = KeyedList() # will hold the result
for resId in res.keys():
J.set(resId, [])
# Two-sided finite difference
for ii in range(len(params)):
params[ii] = orig_vals[ii] + eps_l[ii]
resPlus = self.resDict(params)
params[ii] = orig_vals[ii] - eps_l[ii]
resMinus = self.resDict(params)
params[ii] = orig_vals[ii]
for resId in res.keys():
res_deriv = (resPlus[resId]-resMinus[resId])/(2.*eps_l[ii])
J.get(resId).append(res_deriv)
# NOTE: after call to ComputeResidualsWithScaleFactors the Model's
# parameters get updated, must reset this:
self.params.update(params)
return J
def GetJacobian(self,params):
"""
GetJacobian(parameters) -> dictionary
Gets a dictionary of the sensitivities at the time points of
the independent variables for the measured dependent variables
for each calculation and experiment.
Form:
dictionary[(experiment,calculation,dependent variable,
independent variable)] -> result
result is a vector of length number of parameters containing
the sensitivity at that time point, in the order of the ordered
parameters
"""
return self.jacobian_sens(params)
def Jacobian(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Finite difference the residual dictionary to get a dictionary
for the Jacobian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.jacobian_fd(params, epsf,
relativeScale, stepSizeCutoff)
def GetJandJtJ(self,params):
j = self.GetJacobian(params)
mn = scipy.zeros((len(params),len(params)),scipy.float_)
for paramind in range(0,len(params)):
for paramind1 in range(0,len(params)):
sum = 0.0
for kys in j.keys():
sum = sum + j.get(kys)[paramind]*j.get(kys)[paramind1]
mn[paramind][paramind1] = sum
return j,mn
def GetJandJtJInLogParameters(self,params):
# Formula below is exact if you have perfect data. If you don't
# have perfect data (residuals != 0) you get an extra term when you
# compute d^2(cost)/(dlogp[i]dlogp[j]) which is
# sum_resname (residual[resname] * jac[resname][j] * delta_jk * p[k])
# but can be ignored when residuals are zeros, and maybe should be
# ignored altogether because it can make the Hessian approximation
# non-positive definite
pnolog = scipy.exp(params)
jac, jtj = self.GetJandJtJ(pnolog)
for i in range(len(params)):
for j in range(len(params)):
jtj[i][j] = jtj[i][j]*pnolog[i]*pnolog[j]
res = self.resDict(pnolog)
for resname in self.residuals.keys():
for j in range(len(params)):
# extra term --- not including it
# jtj[j][j] += res[resname]*jac[resname][j]*pnolog[j]
jac.get(resname)[j] = jac.get(resname)[j]*pnolog[j]
return jac,jtj
def hessian_elem(self, func, f0, params, i, j, epsi, epsj,
relativeScale, stepSizeCutoff, verbose):
"""
Return the second partial derivative for func w.r.t. parameters i and j
f0: The value of the function at params
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
"""
origPi, origPj = params[i], params[j]
if relativeScale:
# Steps sizes are given by eps*the value of the parameter,
# but the minimum step size is stepSizeCutoff
hi, hj = scipy.maximum((epsi*abs(origPi), epsj*abs(origPj)),
(stepSizeCutoff, stepSizeCutoff))
else:
hi, hj = epsi, epsj
if i == j:
params[i] = origPi + hi
fp = func(params)
params[i] = origPi - hi
fm = func(params)
element = (fp - 2*f0 + fm)/hi**2
else:
## f(xi + hi, xj + h)
params[i] = origPi + hi
params[j] = origPj + hj
fpp = func(params)
## f(xi + hi, xj - hj)
params[i] = origPi + hi
params[j] = origPj - hj
fpm = func(params)
## f(xi - hi, xj + hj)
params[i] = origPi - hi
params[j] = origPj + hj
fmp = func(params)
## f(xi - hi, xj - hj)
params[i] = origPi - hi
params[j] = origPj - hj
fmm = func(params)
element = (fpp - fpm - fmp + fmm)/(4 * hi * hj)
params[i], params[j] = origPi, origPj
self._notify(event = 'hessian element', i = i, j = j,
element = element)
if verbose:
print('hessian[%i, %i] = %g' % (i, j, element))
return element
def hessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None,
verbose = False):
"""
Returns the hessian of the model.
epsf: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
jacobian: If the jacobian is passed, it will be used to estimate
the step size to take.
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
params = scipy.asarray(params)
if relativeScale:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
# Make sure we don't take steps smaller than stepSizeCutoff
eps = scipy.maximum(eps, stepSizeCutoff)
if jacobian is not None:
# Turn off the relative scaling since that would overwrite all this
relativeScale = False
jacobian = scipy.asarray(jacobian)
if len(jacobian.shape) == 0:
resDict = self.resDict(params)
new_jacobian = scipy.zeros(len(params),scipy.float_)
for key, value in resDict.items():
new_jacobian += 2.0*value*scipy.array(jacobian[0][key])
jacobian = new_jacobian
elif len(jacobian.shape) == 2: # Need to sum up the total jacobian
residuals = scipy.asarray(self.res(params))
# Changed by rng7. I'm not sure what is meant by "sum up the
# total jacobian". The following line failed due to shape
# mismatch. From the context below, it seems that the dot
# product is appropriate.
#jacobian = 2.0*residuals*jacobian
jacobian = 2.0 * scipy.dot(residuals, jacobian)
# If parameters are independent, then
# epsilon should be (sqrt(2)*J[i])^-1
factor = 1.0/scipy.sqrt(2)
for i in range(nOv):
if jacobian[i] == 0.0:
eps[i] = 0.5*abs(params[i])
else:
# larger than stepSizeCutoff, but not more than
# half of the original parameter value
eps[i] = min(max(factor/abs(jacobian[i]), stepSizeCutoff),
0.5*abs(params[i]))
## compute cost at f(x)
f0 = self.cost(params)
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost, f0,
params, i, j,
eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def hessian_log_params(self, params, eps,
relativeScale=False, stepSizeCutoff=1e-6,
verbose=False):
"""
Returns the hessian of the model in log parameters.
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if scipy.isscalar(eps):
eps = scipy.ones(len(params), scipy.float_) * eps
## compute cost at f(x)
f0 = self.cost_log_params(scipy.log(params))
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost_log_params, f0,
scipy.log(params),
i, j, eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def CalcHessianInLogParameters(self, params, eps, relativeScale = False,
stepSizeCutoff = 1e-6, verbose = False):
return self.hessian_log_params(params, eps, relativeScale,
stepSizeCutoff, verbose)
def CalcHessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None, verbose = False):
"""
Finite difference the residual dictionary to get a dictionary
for the Hessian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.hessian(params, epsf, relativeScale,
stepSizeCutoff, jacobian, verbose)
def CalcResidualResponseArray(self, j, h):
"""
Calculate the Residual Response array. This array represents the change
in a residual obtained by a finite change in a data value.
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((m,m),scipy.float_)
ident = scipy.eye(m,typecode=scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
tmp = scipy.dot(hinv,scipy.transpose(j))
tmp2 = scipy.dot(j,tmp)
response = ident - tmp2
return response
def CalcParameterResponseToResidualArray(self,j,h):
"""
Calculate the parameter response to residual array. This array
represents the change in parameter resulting from a change in data
(residual).
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((n,m),scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
response = -scipy.dot(hinv,scipy.transpose(j))
return response
############################################################################
# Getting/Setting variables below
def SetExperimentCollection(self, exptColl):
self.exptColl = exptColl
for exptKey, expt in exptColl.items():
exptData = expt.GetData()
for calcKey, calcData in exptData.items():
for depVarKey, depVarData in calcData.items():
sortedData = depVarData.items()
sortedData.sort()
for indVar, (value, uncert) in sortedData:
resName = (exptKey, calcKey, depVarKey, indVar)
res = Residuals.ScaledErrorInFit(resName, depVarKey,
calcKey, indVar, value,
uncert, exptKey)
self.residuals.setByKey(resName, res)
# Add in the PeriodChecks
for period in expt.GetPeriodChecks():
calcKey, depVarKey, indVarValue = period['calcKey'], \
period['depVarKey'], period['startTime']
resName = (exptKey, calcKey, depVarKey, indVarValue,
'PeriodCheck')
res = Residuals.PeriodCheckResidual(resName, calcKey, depVarKey,
indVarValue,
period['period'],
period['sigma'])
self.residuals.setByKey(resName, res)
# Add in the AmplitudeChecks
for amplitude in expt.GetAmplitudeChecks():
calcKey, depVarKey = amplitude['calcKey'], \
amplitude['depVarKey']
indVarValue0, indVarValue1 = amplitude['startTime'],\
amplitude['testTime']
resName = (exptKey, calcKey, depVarKey, indVarValue0,
indVarValue1, 'AmplitudeCheck')
res = Residuals.AmplitudeCheckResidual(resName, calcKey,
depVarKey, indVarValue0,
indVarValue1,
amplitude['period'],
amplitude['sigma'],
exptKey)
self.residuals.setByKey(resName, res)
# Add in the integral data
for ds in expt.GetIntegralDataSets():
for var in ds['vars']:
resName = (exptKey, ds['calcKey'], var, 'integral data')
res = Residuals.IntegralDataResidual(resName, var,
exptKey,
ds['calcKey'],
ds['trajectory'],
ds['uncert_traj'],
ds['interval'])
self.residuals.setByKey(resName, res)
for ds in expt.scaled_extrema_data:
ds['exptKey'] = expt.name
ds['key'] = '%s_%simum_%s_%s' % (ds['var'], ds['type'],
str(ds['minTime']),
str(ds['maxTime']))
res = Residuals.ScaledExtremum(**ds)
self.AddResidual(res)
def get_expts(self):
return self.exptColl
def set_var_optimizable(self, var, is_optimizable):
for calc in self.get_calcs().values():
try:
calc.set_var_optimizable(var, is_optimizable)
except KeyError:
pass
self.params = self.calcColl.GetParameters()
GetExperimentCollection = get_expts
def SetCalculationCollection(self, calcColl):
self.calcColl = calcColl
self.params = calcColl.GetParameters()
def get_calcs(self):
return self.calcColl
GetCalculationCollection = get_calcs
def GetScaleFactors(self):
return self.internalVars['scaleFactors']
def GetResiduals(self):
return self.residuals
def GetCalculatedValues(self):
return self.calcVals
def GetInternalVariables(self):
return self.internalVars
def add_parameter_bounds(self, param_id, pmin, pmax):
"""
Add bounds on a specific parameter.
Cost evaluations will raise an exception if these bounds are violated.
"""
self.parameter_bounds[param_id] = pmin, pmax
def check_parameter_bounds(self, params):
self.params.update(params)
for id, (pmin, pmax) in self.parameter_bounds.items():
if not pmin <= self.params.get(id) <= pmax:
err = 'Parameter %s has value %f, which is outside of given bounds %f to %f.' % (id, self.params.get(id), pmin, pmax)
raise Utility.SloppyCellException(err)<|fim▁end|>
|
if isinstance(calcs, list):
|
<|file_name|>edns0codes.rs<|end_file_name|><|fim▁begin|>pub use super::IdentifierError;
#[repr(u16)]
#[derive(PartialEq,Debug,Clone)]
pub enum EDNS0OptionCode {
LLQ = 1,
UL = 2,
NSID = 3,
DAU = 5,
DHU = 6,
N3U = 7,
EdnsClientSubnet = 8,
EDNS = 9,
}
impl EDNS0OptionCode {
pub fn from_u16(value: u16) -> Result<EDNS0OptionCode, IdentifierError> {
match value {
1 => Ok(EDNS0OptionCode::LLQ),
2 => Ok(EDNS0OptionCode::UL),
3 => Ok(EDNS0OptionCode::NSID),
5 => Ok(EDNS0OptionCode::DAU),
6 => Ok(EDNS0OptionCode::DHU),
7 => Ok(EDNS0OptionCode::N3U),
8 => Ok(EDNS0OptionCode::EdnsClientSubnet),
9 => Ok(EDNS0OptionCode::EDNS),
0 => Err(IdentifierError::ReservedIdentifierError(0 as i64)),
4 => Err(IdentifierError::ReservedIdentifierError(4 as i64)),
x @ 65001...65534 => Err(IdentifierError::ReservedIdentifierError(x as i64)),
65535 => Err(IdentifierError::ReservedIdentifierError(65535 as i64)),
x @ 10...65000 => Err(IdentifierError::UnassignedIdentifierError(x as i64)),
x @ _ => Err(IdentifierError::UnknownIdentifierError(x as i64)),
}<|fim▁hole|> }
}
#[cfg(test)]
mod test_edns0optioncode {
use super::EDNS0OptionCode;
use super::IdentifierError;
#[test]
fn test_variant_identity() {
assert_eq!(EDNS0OptionCode::LLQ, EDNS0OptionCode::from_u16(1).ok().unwrap());
assert_eq!(EDNS0OptionCode::UL, EDNS0OptionCode::from_u16(2).ok().unwrap());
assert_eq!(EDNS0OptionCode::NSID, EDNS0OptionCode::from_u16(3).ok().unwrap());
assert_eq!(EDNS0OptionCode::DAU, EDNS0OptionCode::from_u16(5).ok().unwrap());
assert_eq!(EDNS0OptionCode::DHU, EDNS0OptionCode::from_u16(6).ok().unwrap());
assert_eq!(EDNS0OptionCode::N3U, EDNS0OptionCode::from_u16(7).ok().unwrap());
assert_eq!(EDNS0OptionCode::EdnsClientSubnet, EDNS0OptionCode::from_u16(8).ok().unwrap());
assert_eq!(EDNS0OptionCode::EDNS, EDNS0OptionCode::from_u16(9).ok().unwrap());
}
#[test]
fn test_range_reserved_identity() {
assert_eq!(IdentifierError::ReservedIdentifierError(0), EDNS0OptionCode::from_u16(0).err().unwrap());
assert_eq!(IdentifierError::ReservedIdentifierError(4), EDNS0OptionCode::from_u16(4).err().unwrap());
for i in 65001..(65534u64+1) {
assert_eq!(IdentifierError::ReservedIdentifierError(i as i64), EDNS0OptionCode::from_u16(i as u16).err().unwrap());
}
assert_eq!(IdentifierError::ReservedIdentifierError(65535), EDNS0OptionCode::from_u16(65535).err().unwrap());
}
#[test]
fn test_range_unassigned_identity() {
for i in 10..(65000u64+1) {
assert_eq!(IdentifierError::UnassignedIdentifierError(i as i64), EDNS0OptionCode::from_u16(i as u16).err().unwrap());
}
}
}<|fim▁end|>
| |
<|file_name|>ImagePixel.cpp<|end_file_name|><|fim▁begin|>// ImagePixel.cpp: implementation of the CImagePixel class.
//
//////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "ImagePixel.h"
#include "Image.h"
CImagePixel::CImagePixel()
{
m_pInput=NULL;
}
CImagePixel::CImagePixel(CImage* pInput)
{
m_pInput=pInput;
Init(pInput);
}
CImagePixel::~CImagePixel()
{
}
void CImagePixel::Init(CImage* pInput)
{
m_pInput=pInput;
m_pCPP.resize(m_pInput->GetHeight());
for(int i=0; i<m_pInput->GetHeight(); i++)
{
m_pCPP[i]=m_pInput->GetPixel(0, i);
}
}
CPixelRGB8 CImagePixel::GetPixel(float x, float y, int& count)
{
///< bilinear filtering getpixel
CPixelRGB8 color;
int x1,y1,x2,y2;
x1=(int)x;
x2=x1+1;
y1=(int)y;
y2=y1+1;
count=1;
// 4°³ Çȼ¿Áß Çϳª¶óµµ ¹Û¿¡ ÀÖ´Â °æ¿ì
if(x1<0 || x2>=m_pInput->GetWidth() || y1<0 || y2>=m_pInput->GetHeight())
{
count=0;
color=CPixelRGB8 (0,0,0);
}
// ¸ðµÎ ¾È¿¡ Àִ°æ¿ì
else
{
CPixelRGB8 c1,c2,c3,c4;
float errx=x-x1;
float erry=y-y1;
float ex1=(1.f-errx)*(1.f-errx);
float ex2=errx*errx;
float ey1=(1.f-erry)*(1.f-erry);
float ey2=erry*erry;
// p1: x1,y1
// p2: x1,y2
// p3: x2,y1
// p4: y2,y2
float w1,w2,w3,w4;
w1=ex1+ey1;
w2=ex1+ey2;
w3=ex2+ey1;
w4=ex2+ey2;
float sum=w1+w2+w3+w4;
w1/=sum;
w2/=sum;
w3/=sum;
w4/=sum;
count=4;
c1=GetPixel(x1,y1);
c2=GetPixel(x1,y2);
c3=GetPixel(x2,y1);
c4=GetPixel(x2,y2);
color=CPixelRGB8 (int((c1.R)*w1+(c2.R)*w2+(c3.R)*w3+(c4.R)*w4),
int((c1.G)*w1+(c2.G)*w2+(c3.G)*w3+(c4.G)*w4),
int((c1.B)*w1+(c2.B)*w2+(c3.B)*w3+(c4.B)*w4));
}
return color;
}
void CImagePixel::SetPixel(float fx, float fy, CPixelRGB8 color)
{
int width=m_pInput->GetWidth();
int height=m_pInput->GetHeight();
int x,y;
x=int(fx*width);
y=int(fy*height);
if(x<0) x=0;
if(y<0) y=0;
if(x>=width) x=width-1;
if(y>=height) y=height-1;
SetPixel(x,y,color);
}
<|fim▁hole|>void CImagePixel::DrawHorizLine(int x, int y, int width, CPixelRGB8 color)
{
{
std::vector<CPixelRGB8 *> & inputptr=m_pCPP;
if(x<0) return;
if(x>=m_pInput->GetWidth()) return;
if(y<0) return;
if(y>=m_pInput->GetHeight()) return;
if (x+width>=m_pInput->GetWidth())
width=m_pInput->GetWidth()-x-1;
for(int i=x; i<x+width; i++)
{
inputptr[y][i]=color;
}
}
}
void CImagePixel::DrawVertLine(int x, int y, int height, CPixelRGB8 color,bool bDotted)
{
int step=1;
if(bDotted) step=3;
std::vector<CPixelRGB8 *> & inputptr=(m_pCPP);
if(x<0) return;
if(x>=m_pInput->GetWidth()) return;
if(y<0) return;
if(y>=m_pInput->GetHeight()) return;
if (y+height>=m_pInput->GetHeight())
height=m_pInput->GetHeight()-y-1;
for(int j=y; j<y+height; j+=step)
{
inputptr[j][x]=color;
}
}
void CImagePixel::DrawLineBox(const TRect& rect, CPixelRGB8 color)
{
DrawHorizLine(rect.left, rect.top, rect.Width(), color);
DrawHorizLine(rect.left, rect.bottom-1, rect.Width(), color);
DrawVertLine(rect.left, rect.top, rect.Height(), color);
DrawVertLine(rect.right-1, rect.top, rect.Height(), color);
}
void CImagePixel::DrawBox(const TRect& _rect, CPixelRGB8 sColor)
{
TRect rect=_rect;
if(rect.left> rect.right) std::swap(rect.left, rect.right);
if(rect.top> rect.bottom) std::swap(rect.top, rect.bottom);
if(rect.left<0) rect.left=0;
if(rect.top<0) rect.top=0;
if(rect.bottom>Height())rect.bottom=Height();
if(rect.right>Width())rect.right=Width();
{
std::vector<CPixelRGB8 *> & inputptr=m_pCPP;
/*
// easy to read version
for(int j=rect.top; j<rect.bottom; j++)
{
CPixelRGB8* ptr=inputptr[j];
for(int i=rect.left; i<rect.right; i++)
{
memcpy(&ptr[i],&sColor,sizeof(CPixelRGB8));
}
}
*/
// fast version
CPixelRGB8* aBuffer;
int width=rect.right-rect.left;
if(width>0)
{
aBuffer=new CPixelRGB8[width];
for(int i=0; i<width; i++)
aBuffer[i]=sColor;
for(int j=rect.top; j<rect.bottom; j++)
{
CPixelRGB8* ptr=inputptr[j];
memcpy(&ptr[rect.left],aBuffer, sizeof(CPixelRGB8)*(width));
}
delete[] aBuffer;
}
}
}
void CImagePixel::Clear(CPixelRGB8 color)
{
int width=m_pInput->GetWidth();
int height=m_pInput->GetHeight();
DrawBox(TRect(0,0, width, height), color);
}
void CImagePixel::DrawPattern(int x, int y, const CImagePixel& patternPixel, bool bUseColorKey, CPixelRGB8 sColorkey, bool bOverideColor, CPixelRGB8 overrideColor)
{
int imageWidth=m_pInput->GetWidth();
int imageHeight=m_pInput->GetHeight();
int patternWidth=patternPixel.m_pInput->GetWidth();
int patternHeight=patternPixel.m_pInput->GetHeight();
int imagex, imagey;
if(bUseColorKey)
{
if(bOverideColor)
{
float ovR=float((overrideColor.R))/255.f;
float ovG=float((overrideColor.G))/255.f;
float ovB=float((overrideColor.B))/255.f;
for(int j=0; j<patternHeight; j++)
for(int i=0; i<patternWidth; i++)
{
imagex=x+i; imagey=y+j;
if(imagex>=0 && imagex<imageWidth && imagey>=0 && imagey <imageHeight)
{
if(memcmp(&patternPixel.GetPixel(i,j),&sColorkey, sizeof(CPixelRGB8))!=0)
{
// SetPixel( imagex, imagey, overrideColor);
CPixelRGB8& c=Pixel(imagex, imagey);
CPixelRGB8& cc=patternPixel.Pixel(i,j);
c.R=cc.R*ovR;
c.G=cc.G*ovG;
c.B=cc.B*ovB;
}
}
}
}
else
{
for(int j=0; j<patternHeight; j++)
for(int i=0; i<patternWidth; i++)
{
imagex=x+i; imagey=y+j;
if(imagex>=0 && imagex<imageWidth && imagey>=0 && imagey <imageHeight)
{
if(memcmp(&patternPixel.Pixel(i,j),&sColorkey, sizeof(CPixelRGB8))!=0)
GetPixel(imagex,imagey)=patternPixel.GetPixel(i,j);
}
}
}
}
else
{
ASSERT(!bOverideColor);
for(int j=0; j<patternHeight; j++)
{
CPixelRGB8* target=&GetPixel(x,y+j);
CPixelRGB8* source=&patternPixel.Pixel(0,j);
memcpy(target,source, sizeof(CPixelRGB8)*patternWidth);
}
}
}
void CImagePixel::DrawPattern(int x, int y, CImage* pPattern, bool bUseColorkey, CPixelRGB8 colorkey, bool bOverideColor, CPixelRGB8 overrideColor)
{
CImagePixel patternPixel(pPattern);
DrawPattern(x,y,patternPixel,bUseColorkey,colorkey,bOverideColor,overrideColor);
}
void CImagePixel::DrawLine(int x1, int y1, int x2, int y2, CPixelRGB8 color) //!< inputptr, inputptr2Áß Çϳª´Â NULL·Î ÁÙ°Í.
{
int dx,dy,x,y,x_end,p,const1,const2,y_end;
int delta;
dx=abs(x1-x2);
dy=abs(y1-y2);
if (((y1-y2)>0 && (x1-x2)>0 ) || (y1-y2)<0 && (x1-x2)<0)
{
delta=1; //±â¿ï±â >0
}
else
{
delta=-1; //±â¿ï±â <0
}
if(dx>dy) //±â¿ï±â 0 < |m| <=1
{
p=2*dy-dx;
const1=2*dy;
const2=2*(dy-dx);
if(x1>x2)
{
x=x2;y=y2;
x_end=x1;
}
else
{
x=x1;y=y1;
x_end=x2;
}
SetPixel( x,y, color);
while(x<x_end)
{
x=x+1;
if(p<0)
{
p=p+const1;
}
else
{
y=y+delta;
p=p+const2;
}
SetPixel( x,y, color);
} //±â¿ï±â |m| > 1
}
else
{
p=2*dx-dy;
const1=2*dx;
const2=2*(dx-dy);
if(y1>y2)
{
y=y2;x=x2;
y_end=y1;
}
else
{
y=y1;x=x1;
y_end=y2;
}
SetPixel( x,y, color);
while(y<y_end)
{
y=y+1;
if(p<0)
{
p=p+const1;
}
else
{
x=x+delta;
p=p+const2;
}
SetPixel( x,y, color);
}
}
}
void CImagePixel::DrawSubPattern(int x, int y, const CImagePixel& patternPixel, const TRect& patternRect, bool bUseColorKey, CPixelRGB8 sColorkey)
{
int imageWidth=m_pInput->GetWidth();
int imageHeight=m_pInput->GetHeight();
int patternWidth=patternPixel.m_pInput->GetWidth();
int patternHeight=patternPixel.m_pInput->GetHeight();
ASSERT(patternRect.right<=patternWidth);
ASSERT(patternRect.top<=patternHeight);
int imagex, imagey;
if(bUseColorKey)
{
for(int j=patternRect.top; j<patternRect.bottom; j++)
for(int i=patternRect.left; i<patternRect.right; i++)
{
imagex=x+i-patternRect.left; imagey=y+j-patternRect.top;
if(imagex>=0 && imagex<imageWidth && imagey>=0 && imagey <imageHeight)
{
if(memcmp(&patternPixel.Pixel(i,j),&sColorkey, sizeof(CPixelRGB8))!=0)
Pixel(imagex,imagey)=patternPixel.Pixel(i,j);
}
}
}
else
{
TRect rect=patternRect;
if(x<0)
{
rect.left-=x;
x-=x;
}
if(x+rect.Width()>imageWidth)
{
int delta=x+rect.Width()-imageWidth;
rect.right-=delta;
}
if(rect.Width()>0)
{
for(int j=rect.top; j<rect.bottom; j++)
{
imagey=y+j-rect.top;
if(imagey>=0 && imagey <imageHeight)
{
CPixelRGB8* target=&Pixel(x,imagey);
CPixelRGB8* source=&patternPixel.Pixel(rect.left,j);
memcpy(target,source, sizeof(CPixelRGB8)*rect.Width());
}
}
}
}
}
void CImagePixel::DrawText(int x, int y, const char* str, bool bUserColorKey, CPixelRGB8 colorkey)
{
static CImage* pText=NULL;
if(!pText)
{
pText=new CImage();
pText->Load("resource/default/ascii.bmp");
}
CImage& cText=*pText;
CImagePixel patternPixel(&cText);
#define FONT_HEIGHT 16
#define FONT_WIDTH 8
int len=strlen(str);
for(int i=0; i<len; i++)
{
char c=str[i];
int code=(c-' ');
ASSERT(code>=0 && code<32*3);
int left=code%32*FONT_WIDTH ;
int top=code/32*FONT_HEIGHT;
DrawSubPattern(x+i*FONT_WIDTH , y, patternPixel, TRect(left,top,left+FONT_WIDTH , top+FONT_HEIGHT), bUserColorKey, colorkey);
}
}<|fim▁end|>
| |
<|file_name|>tray_icon_exceptions.py<|end_file_name|><|fim▁begin|>__author__ = 'himanshu'
<|fim▁hole|><|fim▁end|>
|
# TrayIcon
class TrayIcon(Exception):
pass
|
<|file_name|>register.go<|end_file_name|><|fim▁begin|>package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
GroupName = "console.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
}
// addKnownTypes adds types to API group
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
&ConsoleLink{},
&ConsoleLinkList{},
&ConsoleCLIDownload{},
&ConsoleCLIDownloadList{},
&ConsoleNotification{},
&ConsoleNotificationList{},<|fim▁hole|> &ConsoleQuickStart{},
&ConsoleQuickStartList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}<|fim▁end|>
|
&ConsoleExternalLogLink{},
&ConsoleExternalLogLinkList{},
&ConsoleYAMLSample{},
&ConsoleYAMLSampleList{},
|
<|file_name|>pipe-pingpong-bounded.rs<|end_file_name|><|fim▁begin|>// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Ping-pong is a bounded protocol. This is place where I can
// experiment with what code the compiler should generate for bounded
// protocols.
use core::cell::Cell;
// This was generated initially by the pipe compiler, but it's been
// modified in hopefully straightforward ways.
mod pingpong {
use core::pipes;
use core::pipes::*;
use core::ptr;
pub struct Packets {
ping: Packet<ping>,
pong: Packet<pong>,
}
pub fn init() -> (client::ping, server::ping) {
let buffer = ~Buffer {
header: BufferHeader(),
data: Packets {
ping: mk_packet::<ping>(),
pong: mk_packet::<pong>()
}
};
do pipes::entangle_buffer(buffer) |buffer, data| {
data.ping.set_buffer(buffer);
data.pong.set_buffer(buffer);
ptr::addr_of(&(data.ping))
}
}
pub struct ping(server::pong);
pub struct pong(client::ping);
pub mod client {
use core::pipes;
use core::pipes::*;
use core::ptr;
pub fn ping(+pipe: ping) -> pong {
{<|fim▁hole|> send(pipe, message);
c
}
}
pub type ping = pipes::SendPacketBuffered<::pingpong::ping,
::pingpong::Packets>;
pub type pong = pipes::RecvPacketBuffered<::pingpong::pong,
::pingpong::Packets>;
}
pub mod server {
use core::pipes;
use core::pipes::*;
use core::ptr;
pub type ping = pipes::RecvPacketBuffered<::pingpong::ping,
::pingpong::Packets>;
pub fn pong(+pipe: pong) -> ping {
{
let b = pipe.reuse_buffer();
let s = SendPacketBuffered(ptr::addr_of(&(b.buffer.data.ping)));
let c = RecvPacketBuffered(ptr::addr_of(&(b.buffer.data.ping)));
let message = ::pingpong::pong(s);
send(pipe, message);
c
}
}
pub type pong = pipes::SendPacketBuffered<::pingpong::pong,
::pingpong::Packets>;
}
}
mod test {
use core::pipes::recv;
use pingpong::{ping, pong};
pub fn client(+chan: ::pingpong::client::ping) {
use pingpong::client;
let chan = client::ping(chan); return;
error!("Sent ping");
let pong(_chan) = recv(chan);
error!("Received pong");
}
pub fn server(+chan: ::pingpong::server::ping) {
use pingpong::server;
let ping(chan) = recv(chan); return;
error!("Received ping");
let _chan = server::pong(chan);
error!("Sent pong");
}
}
pub fn main() {
let (client_, server_) = ::pingpong::init();
let client_ = Cell(client_);
let server_ = Cell(server_);
do task::spawn {
let client__ = client_.take();
test::client(client__);
};
do task::spawn {
let server__ = server_.take();
test::server(server__);
};
}<|fim▁end|>
|
let b = pipe.reuse_buffer();
let s = SendPacketBuffered(ptr::addr_of(&(b.buffer.data.pong)));
let c = RecvPacketBuffered(ptr::addr_of(&(b.buffer.data.pong)));
let message = ::pingpong::ping(s);
|
<|file_name|>deployments.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
<|fim▁hole|> const job = this.modelFor('jobs.job');
return RSVP.all([job.get('deployments'), job.get('versions')]).then(() => job);
},
});<|fim▁end|>
|
const { Route, RSVP } = Ember;
export default Route.extend({
model() {
|
<|file_name|>fullBlockActions.ts<|end_file_name|><|fim▁begin|>import { gql } from '@apollo/client'
import fullBlockShareFragment from 'v2/components/FullBlock/components/FullBlockShare/fragments/fullBlockShare'
<|fim▁hole|> fragment FullBlockActions on Konnectable {
__typename
... on Image {
find_original_url
downloadable_image: resized_image_url(downloadable: true)
}
... on Text {
find_original_url
}
... on ConnectableInterface {
source {
title
url
}
}
... on Block {
can {
mute
potentially_edit_thumbnail
edit_thumbnail
}
}
...FullBlockShare
}
${fullBlockShareFragment}
`<|fim▁end|>
|
export default gql`
|
<|file_name|>x-large.directive.ts<|end_file_name|><|fim▁begin|>import { Directive, ElementRef, Renderer } from '@angular/core';
/*
* Directive
* XLarge is a simple directive to show how one is made
*/
@Directive({
selector: '[x-large]' // using [ ] means selecting attributes
})
export class XLargeDirective {
constructor(
public element: ElementRef,
public renderer: Renderer
) {
// simple DOM manipulation to set font size to x-large
// `nativeElement` is the direct reference to the DOM element<|fim▁hole|> // element.nativeElement.style.fontSize = 'x-large';
// for server/webworker support use the renderer
renderer.setElementStyle(element.nativeElement, 'fontSize', 'x-large');
}
}<|fim▁end|>
| |
<|file_name|>http.py<|end_file_name|><|fim▁begin|>from django.http import HttpResponse
# this redirect key is (hopefully) unique but generic so it doesn't signpost the use of DMP/Django.
# not prefixing with X- because that's now deprecated.
REDIRECT_HEADER_KEY = 'Redirect-Location'
###############################################################################
### Redirect with Javascript instead of 301/302
### See also exceptions.py for two additional redirect methods<|fim▁hole|> '''
Sends a regular HTTP 200 OK response that contains Javascript to
redirect the browser:
<script>window.location.assign("...");</script>.
If redirect_to is empty, it redirects to the current location (essentially refreshing
the current page):
<script>window.location.assign(window.location.href);</script>.
Normally, redirecting should be done via HTTP 302 rather than Javascript.
Use this class when your only choice is through Javascript.
For example, suppose you need to redirect the top-level page from an Ajax response.
Ajax redirects normally only redirects the Ajax itself (not the page that initiated the call),
and this default behavior is usually what is needed. However, there are instances when the
entire page must be redirected, even if the call is Ajax-based.
After the redirect_to parameter, you can use any of the normal HttpResponse constructor arguments.
If you need to omit the surrounding <script> tags, send "include_script_tag=False" to
the constructor. One use case for omitting the tags is when the caller is a
JQuery $.script() ajax call.
A custom header is set in the response. This allows middleware, your web server, or
calling JS code to adjust the redirect if needed.
Note that this method doesn't use the <meta> tag or Refresh header method because
they aren't predictable within Ajax (for example, JQuery seems to ignore them).
'''
def __init__(self, redirect_to=None, *args, **kwargs):
# set up the code
if redirect_to:
script = 'window.location.assign("{}");'.format(redirect_to.split('#')[0])
else:
script = 'window.location.assign(window.location.href.split("#")[0])'
# do we need to add the <script> tag? (that's the default)
if kwargs.pop('include_script_tag', True):
script = '<script>{}</script>'.format(script)
# call the super
super().__init__(script, *args, **kwargs)
# add the custom header
self[REDIRECT_HEADER_KEY] = redirect_to or 'window.location.href'<|fim▁end|>
|
class HttpResponseJavascriptRedirect(HttpResponse):
|
<|file_name|>validate.js<|end_file_name|><|fim▁begin|>import is from 'is';
// eslint-disable-next-line import/prefer-default-export
export const validNumber = (num, min, max) => {
if (is.number(min)) {
// Make sure the new value isn't less than the min value.
if (num < Number(min)) {
return{
showError: true,
errorMsg: `Please enter a valid number greater than or equal to ${min}.`
};
}
}
if (is.number(max)) {
if (num > Number(max)) {
return{
showError: true,
errorMsg: `Please enter a valid number less than or equal to ${max}.`
};
}
}
return{<|fim▁hole|><|fim▁end|>
|
showError: false,
errorMsg: ''
};
};
|
<|file_name|>data_manager_qiime_download.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Data manager for reference data for the QIIME Galaxy tools
import argparse
import ftplib
import json
import os
import tarfile
import zipfile
import requests
protocol = {
"unite": "http",
"greengenes": "ftp",
"silva": "http",
"img": "ftp"
}
baseUrl = {
"unite": "http://unite.ut.ee/sh_files/sh_qiime_release_",
"greengenes": "greengenes.microbio.me",
"silva": "http://www.arb-silva.de/fileadmin/silva_databases/qiime/Silva_",
"img": "ftp.microbio.me"
}
ftp_dir = {
"greengenes": "/greengenes_release/gg_",
"img": ""
}
ftp_file_prefix = {
"greengenes": "gg_",
"img": ""
}
ftp_file_suffix = {
"greengenes": "_otus",
"img": ""
}
extension = {
"unite": "zip",
"greengenes": "tar.gz",
"silva": {
"104_release": "tgz",
"108_release": "tgz",
"108_release_curated": "tgz",
"111_release": "tgz",
"119_consensus_majority_taxonomy": "zip",
"119_release": "zip",
"119_release_aligned_rep_files": "tar.gz",
"123_release": "zip",
"128_release": "tgz"},
"img": "tgz"
}
filetypes = ["rep_set", "rep_set_aligned", "taxonomy", "trees"]
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""<|fim▁hole|> d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def get_ftp_file(ftp, filename):
"""
"""
try:
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
except:
print("Error")
def download_archive(db, version, ext):
"""
"""
filepath = "%s_%s.%s" % (db, version, ext)
if protocol[db] == "http":
url = "%s%s.%s" % (baseUrl[db], version, ext)
r = requests.get(url, stream=True)
r.raise_for_status()
with open(filepath, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
elif protocol[db] == "ftp":
ftp = ftplib.FTP(baseUrl[db])
ftp.login("anonymous", "ftplib-example-1")
if db == "greengenes" and version == "13_8":
ftp.cwd("%s%s" % (ftp_dir[db], "13_5"))
else:
ftp.cwd("%s%s" % (ftp_dir[db], version))
filepath = "%s%s%s.%s" % (
ftp_file_prefix[db],
version,
ftp_file_suffix[db],
ext)
get_ftp_file(ftp, filepath)
ftp.quit()
return filepath
def find_archive_content_path(archive_content_path):
"""
"""
content = os.listdir(archive_content_path)
archive_content = []
for x in content:
if not x.startswith(".") and not x.startswith("_"):
archive_content.append(x)
if len(archive_content) == 1:
archive_content_path = os.path.join(
archive_content_path,
archive_content[0])
return archive_content_path
def extract_archive(filepath, ext, db):
"""
"""
archive_content_path = "tmp"
if ext == "tar.gz" or ext == "tgz":
tar = tarfile.open(filepath)
tar.extractall(path=archive_content_path)
tar.close()
archive_content_path = find_archive_content_path(archive_content_path)
elif ext == "zip":
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(archive_content_path)
zip_ref.close()
archive_content_path = find_archive_content_path(archive_content_path)
return archive_content_path
def move_unite_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir):
"""
"""
archive_content = os.listdir(archive_content_path)
for content in archive_content:
content_filepath = os.path.join(archive_content_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if content.find("refs") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "rep_set"),
"rep_set")
elif content.find("taxonomy") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "taxonomy"),
"taxonomy")
def move_file(input_filepath, filename, name, data_tables, target_dir, filetype):
"""
"""
output_filepath = os.path.join(target_dir, filename)
os.rename(input_filepath, output_filepath)
add_data_table_entry(
data_tables,
"qiime_%s" % (filetype),
dict(
dbkey=filename,
value=os.path.splitext(filename)[0],
name=name,
path=output_filepath))
def move_dir_content(input_path, filename_prefix, name_prefix, data_tables, target_dir, filetype):
"""
"""
for content in os.listdir(input_path):
if content.startswith("."):
continue
content_path = os.path.join(input_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if os.path.isdir(content_path):
move_dir_content(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
else:
move_file(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
def move_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir, db, version):
"""
"""
for filetype in filetypes:
if filetype == "rep_set_aligned":
if db == "greengenes" and version == "12_10":
continue
filetype_target_dir = os.path.join(
target_dir,
filetype)
filetype_path = os.path.join(
archive_content_path,
filetype)
move_dir_content(
filetype_path,
filename_prefix,
name_prefix,
data_tables,
filetype_target_dir,
filetype)
def download_db(data_tables, db, version, target_dir):
"""Download QIIME database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
db: name of the database
version: version of the database
table_name: name of the table
target_dir: directory to put copy or link to the data file
"""
ext = extension[db]
if db == "silva":
ext = ext[version]
print("Download archive")
filepath = download_archive(db, version, ext)
print("Extract archive %s" % filepath)
archive_content_path = extract_archive(filepath, ext, db)
print("Moving file from %s" % archive_content_path)
filename_prefix = "%s_%s" % (db, version)
name_prefix = "%s (%s)" % (db, version)
if db == "greengenes" or db == "silva":
move_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir,
db,
version)
elif db == "unite":
move_unite_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir)
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = argparse.ArgumentParser(
description='Download QIIME reference database')
parser.add_argument('--database', help="Database name")
parser.add_argument('--version', help="Database version")
parser.add_argument('--jsonfile', help="Output JSON file")
args = parser.parse_args()
jsonfile = args.jsonfile
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
os.mkdir(os.path.join(target_dir, "rep_set"))
os.mkdir(os.path.join(target_dir, "rep_set_aligned"))
os.mkdir(os.path.join(target_dir, "taxonomy"))
os.mkdir(os.path.join(target_dir, "trees"))
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, "qiime_rep_set")
add_data_table(data_tables, "qiime_rep_set_aligned")
add_data_table(data_tables, "qiime_taxonomy")
add_data_table(data_tables, "qiime_trees")
# Fetch data from specified data sources
download_db(
data_tables,
args.database,
args.version,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
with open(jsonfile, 'w') as out:
json.dump(data_tables, out)
print("Done.")<|fim▁end|>
|
d = {}
|
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>"""
DotStar_Emulator
config.py in current working directory will be automatically read and loaded.
Author: Christopher Ross
License: MIT Something Rather
"""
from DotStar_Emulator.manage import manage
<|fim▁hole|><|fim▁end|>
|
if __name__ == "__main__":
manage()
|
<|file_name|>locales.service.d.ts<|end_file_name|><|fim▁begin|>import { Locale, LocaleData } from './locale.class';
export declare function getLocale(key: string): Locale;
export declare function listLocales(): string[];
export declare function mergeConfigs(parentConfig: LocaleData, childConfig: LocaleData): {
[key: string]: any;
};
export declare function getSetGlobalLocale(key: string, values?: LocaleData): string;
<|fim▁hole|><|fim▁end|>
|
export declare function defineLocale(name: string, config?: LocaleData): Locale;
|
<|file_name|>tree.rs<|end_file_name|><|fim▁begin|>use std::cmp;
use utils::crypto::hash::{Digest, Hash};
use errors::common::CommonError;
pub use services::ledger::merkletree::proof::{
Proof,
Lemma,
Positioned
};
pub type TreeLeafData = Vec<u8>;
/// Binary Tree where leaves hold a stand-alone value.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tree {
Empty {
hash: Vec<u8>
},
Leaf {
hash: Vec<u8>,
value: TreeLeafData
},
Node {
hash: Vec<u8>,
left: Box<Tree>,
right: Box<Tree>
}
}
impl Tree {
/// Create an empty tree
pub fn empty(hash: Digest) -> Self {
Tree::Empty {
hash: hash.to_vec()
}
}
/// Create a new tree
pub fn new(hash: Digest, value: TreeLeafData) -> Self {
Tree::Leaf {
hash: hash.to_vec(),
value: value
}
}
/// Create a new leaf
pub fn new_leaf(value: TreeLeafData) -> Result<Tree, CommonError> {
let hash = Hash::hash_leaf(&value)?;
Ok(Tree::new(hash, value))
}
/// Returns a hash from the tree.
pub fn hash(&self) -> &Vec<u8> {
match *self {
Tree::Empty { ref hash } => hash,
Tree::Leaf { ref hash, .. } => hash,
Tree::Node { ref hash, .. } => hash
}
}
/// Returns a borrowing iterator over the leaves of the tree.
pub fn iter(&self) -> LeavesIterator {
LeavesIterator::new(self)<|fim▁hole|> pub fn get_height(&self) -> usize {
match *self {
Tree::Empty { .. } => { 0 },
Tree::Node { ref left, ref right, .. } => {
1 + cmp::max(left.get_height(),right.get_height())
},
Tree::Leaf { .. } => { 0 }
}
}
pub fn get_count(&self) -> usize {
match *self {
Tree::Empty { .. } => { 0 },
Tree::Node { ref left, ref right, .. } => {
left.get_count() + right.get_count()
},
Tree::Leaf { .. } => { 1 }
}
}
}
/// An borrowing iterator over the leaves of a `Tree`.
/// Adapted from http://codereview.stackexchange.com/q/110283.
#[allow(missing_debug_implementations)]
pub struct LeavesIterator<'a> {
current_value: Option<&'a TreeLeafData>,
right_nodes: Vec<&'a Tree>
}
impl <'a> LeavesIterator<'a> {
fn new(root: &'a Tree) -> Self {
let mut iter = LeavesIterator {
current_value: None,
right_nodes: Vec::new()
};
iter.add_left(root);
iter
}
fn add_left(&mut self, mut tree: &'a Tree) {
loop {
match *tree {
Tree::Empty { .. } => {
self.current_value = None;
break;
},
Tree::Node { ref left, ref right, .. } => {
self.right_nodes.push(right);
tree = left;
},
Tree::Leaf { ref value, .. } => {
self.current_value = Some(value);
break;
}
}
}
}
}
impl <'a> Iterator for LeavesIterator<'a> {
type Item = &'a TreeLeafData;
fn next(&mut self) -> Option<&'a TreeLeafData> {
let result = self.current_value.take();
if let Some(rest) = self.right_nodes.pop() {
self.add_left(rest);
}
result
}
}
/// An iterator over the leaves of a `Tree`.
#[allow(missing_debug_implementations)]
pub struct LeavesIntoIterator {
current_value: Option<TreeLeafData>,
right_nodes: Vec<Tree>
}
impl LeavesIntoIterator {
fn new(root: Tree) -> Self {
let mut iter = LeavesIntoIterator {
current_value: None,
right_nodes: Vec::new()
};
iter.add_left(root);
iter
}
fn add_left(&mut self, mut tree: Tree) {
loop {
match tree {
Tree::Empty { .. } => {
self.current_value = None;
break;
},
Tree::Node { left, right, .. } => {
self.right_nodes.push(*right);
tree = *left;
},
Tree::Leaf { value, .. } => {
self.current_value = Some(value);
break;
}
}
}
}
}
impl Iterator for LeavesIntoIterator {
type Item = TreeLeafData;
fn next(&mut self) -> Option<TreeLeafData> {
let result = self.current_value.take();
if let Some(rest) = self.right_nodes.pop() {
self.add_left(rest);
}
result
}
}
impl IntoIterator for Tree {
type Item = TreeLeafData;
type IntoIter = LeavesIntoIterator;
fn into_iter(self) -> Self::IntoIter {
LeavesIntoIterator::new(self)
}
}<|fim▁end|>
|
}
|
<|file_name|>5.py<|end_file_name|><|fim▁begin|>x = 2
cont = 0
while x >= 0:
y = 0
while y <= 4:
print(y)#comando qualquer<|fim▁hole|> x = x - 1<|fim▁end|>
|
y = y - 1
|
<|file_name|>fix_usercache_processing.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
from future import standard_library
standard_library.install_aliases()
from builtins import *
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
uc = edb.get_usercache_db()
te = edb.get_timeseries_error_db()
logging.info("Found %d errors in this round" % edb.get_timeseries_error_db.estimate_document_count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = uc.save(error)
remove_result = te.remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
<|fim▁hole|>def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()<|fim▁end|>
| |
<|file_name|>controller.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit, OnDestroy } from '@angular/core';
import { FormControl } from '@angular/forms';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import 'rxjs/add/observable/interval';
import { LifeService } from '../life.service';
import { FileService } from '../file.service';
import config from '../app.config';
/**
* This component holds the buttons that allows
* interaction with the application. Also it calls the proper
* service methods.
*/
@Component({
selector: 'app-controller',
templateUrl: './controller.component.html'
})
export class ControllerComponent implements OnInit, OnDestroy {
private handler: Subscription;
private playing = false;
private hasFile = false;
private subs: Array<Subscription> = [];
constructor(
private service: LifeService,
private fileService: FileService
) { }
ngOnInit() {
this.subs.push(this.fileService.parsedFile.subscribe(state => {
this.service.initUniverse(config.cols, config.rows);
this.service.setRules(state.rules);
this.service.loadState(state);
}));
this.subs.push(this.service.state.subscribe(_ => this.hasFile = true));
}
ngOnDestroy() {
this.subs.forEach(sub => sub.unsubscribe());
}
next() {
this.stop();
this.service.nextGeneration();
}
play() {
if (this.playing) {
return;
}
this.playing = true;
this.handler = Observable.interval(config.interval)
.subscribe(() => this.service.nextGeneration());
}
stop() {
if (this.playing) {
this.handler.unsubscribe();<|fim▁hole|> }
upload() {
this.fileService.upload(config.cols, config.rows);
}
}<|fim▁end|>
|
this.playing = false;
}
|
<|file_name|>mapnik_geometry_to_geojson.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2021 Artem Pavlenko
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,<|fim▁hole|> * Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
// mapnik
#include <mapnik/util/geometry_to_geojson.hpp>
#include <mapnik/json/geometry_generator_grammar.hpp>
namespace mapnik {
namespace util {
bool to_geojson(std::string& json, mapnik::geometry::geometry<double> const& geom)
{
using sink_type = std::back_insert_iterator<std::string>;
static const mapnik::json::geometry_generator_grammar<sink_type, mapnik::geometry::geometry<double>> grammar;
sink_type sink(json);
return boost::spirit::karma::generate(sink, grammar, geom);
}
} // namespace util
} // namespace mapnik<|fim▁end|>
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
<|file_name|>list.js<|end_file_name|><|fim▁begin|>export default function formatList(xs, { ifEmpty = 'нет', joint = ', ' } = {}) {
return (!xs || xs.length === 0) ? ifEmpty : xs.join(joint)<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>utils.ts<|end_file_name|><|fim▁begin|>declare var math: any;
class Utils {
static scopeClone(scope): any {
var newScope = {};
_.each(scope, function (value, name) {
if (value instanceof Function) {<|fim▁hole|> } else {
newScope[name] = math.clone(value);
}
});
return newScope;
}
}
export = Utils;<|fim▁end|>
|
newScope[name] = value;
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
from django.db import models
import reversion
<|fim▁hole|> Page,
PageSection,
Section,
)<|fim▁end|>
|
from base.model_utils import TimeStampedModel
from base.singleton import SingletonModel
from block.models import (
|
<|file_name|>pyfftw_bindings_test.py<|end_file_name|><|fim▁begin|># Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import division
import numpy as np
import pytest
import odl
from odl.trafos.backends import pyfftw_call, PYFFTW_AVAILABLE
from odl.util import (
is_real_dtype, complex_dtype)
from odl.util.testutils import (
all_almost_equal, simple_fixture)
pytestmark = pytest.mark.skipif(not PYFFTW_AVAILABLE,
reason='`pyfftw` backend not available')
# --- pytest fixtures --- #
planning = simple_fixture('planning', ['estimate', 'measure', 'patient',
'exhaustive'])
direction = simple_fixture('direction', ['forward', 'backward'])
# --- helper functions --- #
def _random_array(shape, dtype):
if is_real_dtype(dtype):
return np.random.rand(*shape).astype(dtype)
else:
return (np.random.rand(*shape).astype(dtype) +
1j * np.random.rand(*shape).astype(dtype))
def _params_from_dtype(dtype):
if is_real_dtype(dtype):
halfcomplex = True
else:
halfcomplex = False
return halfcomplex, complex_dtype(dtype)
def _halfcomplex_shape(shape, axes=None):
if axes is None:
axes = tuple(range(len(shape)))
try:
axes = (int(axes),)
except TypeError:
pass
shape = list(shape)
shape[axes[-1]] = shape[axes[-1]] // 2 + 1
return shape
# ---- pyfftw_call ---- #
def test_pyfftw_call_forward(odl_floating_dtype):
# Test against Numpy's FFT
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, out_dtype = _params_from_dtype(dtype)
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype)
if halfcomplex:
true_dft = np.fft.rfftn(arr)
dft_arr = np.empty(_halfcomplex_shape(shape), dtype=out_dtype)
else:
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype=out_dtype)
pyfftw_call(arr, dft_arr, direction='forward',
halfcomplex=halfcomplex, preserve_input=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_threads():
shape = (3, 4, 5)
arr = _random_array(shape, dtype='complex64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex64')
pyfftw_call(arr, dft_arr, direction='forward', preserve_input=False,
threads=4)
assert all_almost_equal(dft_arr, true_dft)
shape = (1000,) # Trigger cpu_count() as number of threads
arr = _random_array(shape, dtype='complex64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex64')
pyfftw_call(arr, dft_arr, direction='forward', preserve_input=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward(odl_floating_dtype):
# Test against Numpy's IFFT, no normalization
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, in_dtype = _params_from_dtype(dtype)
for shape in [(10,), (3, 4, 5)]:
# Scaling happens wrt output (large) shape
idft_scaling = np.prod(shape)
if halfcomplex:
arr = _random_array(_halfcomplex_shape(shape), in_dtype)
true_idft = np.fft.irfftn(arr, shape) * idft_scaling
else:
arr = _random_array(shape, in_dtype)
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype=dtype)
pyfftw_call(arr, idft_arr, direction='backward',
halfcomplex=halfcomplex)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_bad_input(direction):
# Complex
# Bad dtype
dtype_in = np.dtype('complex128')
arr_in = np.empty(3, dtype=dtype_in)
bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex']
if dtype_in in bad_dtypes_out:
# This one is correct, so we remove it
bad_dtypes_out.remove(dtype_in)
for bad_dtype in bad_dtypes_out:
arr_out = np.empty(3, dtype=bad_dtype)
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=False,
direction=direction)
# Bad shape
shape = (3, 4)
arr_in = np.empty(shape, dtype='complex128')
bad_shapes_out = [(3, 3), (3,), (4,), (3, 4, 5), ()]
for bad_shape in bad_shapes_out:
arr_out = np.empty(bad_shape, dtype='complex128')
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=False,
direction=direction)
# Duplicate axes
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
bad_axes_list = [(0, 0, 1), (1, 1, 1), (-1, -1)]
for bad_axes in bad_axes_list:
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, axes=bad_axes,
direction=direction)
# Axis entry out of range
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
bad_axes_list = [(0, 3), (-4,)]
for bad_axes in bad_axes_list:
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, axes=bad_axes,
direction=direction)
# Halfcomplex not possible for complex data
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=True,
direction=direction)
# Data type mismatch
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in, dtype='complex64')
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, direction=direction)
# Halfcomplex
# Bad dtype
dtype_in = 'float64'
arr_in = np.empty(10, dtype=dtype_in)
bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex']
try:
# This one is correct, so we remove it
bad_dtypes_out.remove(np.dtype('complex128'))
except ValueError:
pass
for bad_dtype in bad_dtypes_out:
arr_out = np.empty(6, dtype=bad_dtype)
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, halfcomplex=True,
direction='backward')
# Bad shape
shape = (3, 4, 5)
axes_list = [None, (0, 1), (1,), (1, 2), (2, 1), (-1, -2, -3)]
arr_in = np.empty(shape, dtype='float64')
# Correct shapes:
# [(3, 4, 3), (3, 3, 5), (3, 3, 5), (3, 4, 3), (3, 3, 5), (2, 4, 5)]
bad_shapes_out = [(3, 4, 2), (3, 4, 3), (2, 3, 5), (3, 2, 3),
(3, 4, 3), (3, 4, 3)]
always_bad_shapes = [(3, 4), (3, 4, 5)]
for bad_shape, axes in zip(bad_shapes_out, axes_list):
for always_bad_shape in always_bad_shapes:
arr_out = np.empty(always_bad_shape, dtype='complex128')
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, axes=axes, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, axes=axes, halfcomplex=True,
direction='backward')
arr_out = np.empty(bad_shape, dtype='complex128')
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, axes=axes, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, axes=axes, halfcomplex=True,
direction='backward')
def test_pyfftw_call_forward_real_not_halfcomplex():
# Test against Numpy's FFT
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='float64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, dft_arr, direction='forward', halfcomplex=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward_real_not_halfcomplex():
# Test against Numpy's IFFT, no normalization
for shape in [(10,), (3, 4, 5)]:
# Scaling happens wrt output (large) shape
idft_scaling = np.prod(shape)
arr = _random_array(shape, dtype='float64')
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', halfcomplex=False)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_plan_preserve_input(planning):
for shape in [(10,), (3, 4)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
idft_scaling = np.prod(shape)
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', halfcomplex=False,
planning=planning)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_forward_with_axes(odl_floating_dtype):
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, out_dtype = _params_from_dtype(dtype)
shape = (3, 4, 5)
test_axes = [(0, 1), [1], (-1,), (1, 0), (-1, -2, -3)]
for axes in test_axes:
arr = _random_array(shape, dtype)
if halfcomplex:
true_dft = np.fft.rfftn(arr, axes=axes)
dft_arr = np.empty(_halfcomplex_shape(shape, axes),
dtype=out_dtype)
else:
true_dft = np.fft.fftn(arr, axes=axes)
dft_arr = np.empty(shape, dtype=out_dtype)
pyfftw_call(arr, dft_arr, direction='forward', axes=axes,
halfcomplex=halfcomplex)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward_with_axes(odl_floating_dtype):
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, in_dtype = _params_from_dtype(dtype)
shape = (3, 4, 5)
test_axes = [(0, 1), [1], (-1,), (1, 0), (-1, -2, -3)]
for axes in test_axes:
# Only the shape indexed by axes count for the scaling
active_shape = np.take(shape, axes)
idft_scaling = np.prod(active_shape)
if halfcomplex:
arr = _random_array(_halfcomplex_shape(shape, axes), in_dtype)
true_idft = (np.fft.irfftn(arr, s=active_shape, axes=axes) *
idft_scaling)
else:
arr = _random_array(shape, in_dtype)
true_idft = (np.fft.ifftn(arr, s=active_shape, axes=axes) *
idft_scaling)
idft_arr = np.empty(shape, dtype=dtype)
pyfftw_call(arr, idft_arr, direction='backward', axes=axes,
halfcomplex=halfcomplex)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_forward_with_plan():
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
true_dft = np.fft.fftn(arr)
# First run, create plan
dft_arr = np.empty(shape, dtype='complex128')
plan = pyfftw_call(arr, dft_arr, direction='forward',
halfcomplex=False, planning_effort='measure')
# Second run, reuse with fresh output array
dft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, dft_arr, direction='forward', fftw_plan=plan,
halfcomplex=False)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(dft_arr, true_dft)
<|fim▁hole|> for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
idft_scaling = np.prod(shape)
true_idft = np.fft.ifftn(arr) * idft_scaling
# First run, create plan
idft_arr = np.empty(shape, dtype='complex128')
plan = pyfftw_call(arr, idft_arr, direction='backward',
halfcomplex=False, planning_effort='measure')
# Second run, reuse with fresh output array
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', fftw_plan=plan,
halfcomplex=False)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(idft_arr, true_idft)
if __name__ == '__main__':
odl.util.test_file(__file__)<|fim▁end|>
|
def test_pyfftw_call_backward_with_plan():
|
<|file_name|>FileDialogDelegateQt.py<|end_file_name|><|fim▁begin|>""" FileDialogDelegateQt.py: Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
import os.path<|fim▁hole|> from PyQt5.QtWidgets import QStyledItemDelegate, QFileDialog
except ImportError:
try:
from PyQt4.QtCore import Qt, QT_VERSION_STR
from PyQt4.QtGui import QStyledItemDelegate, QFileDialog
except ImportError:
raise ImportError("FileDialogDelegateQt: Requires PyQt5 or PyQt4.")
__author__ = "Marcel Goldschen-Ohm <[email protected]>"
class FileDialogDelegateQt(QStyledItemDelegate):
""" Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
""" Instead of creating an editor, just popup a modal file dialog
and set the model data to the selected file name, if any.
"""
pathToFileName = ""
if QT_VERSION_STR[0] == '4':
pathToFileName = QFileDialog.getOpenFileName(None, "Open")
elif QT_VERSION_STR[0] == '5':
pathToFileName, temp = QFileDialog.getOpenFileName(None, "Open")
pathToFileName = str(pathToFileName) # QString ==> str
if len(pathToFileName):
index.model().setData(index, pathToFileName, Qt.EditRole)
index.model().dataChanged.emit(index, index) # Tell model to update cell display.
return None
def displayText(self, value, locale):
""" Show file name without path.
"""
try:
if QT_VERSION_STR[0] == '4':
pathToFileName = str(value.toString()) # QVariant ==> str
elif QT_VERSION_STR[0] == '5':
pathToFileName = str(value)
path, fileName = os.path.split(pathToFileName)
return fileName
except:
return ""<|fim▁end|>
|
try:
from PyQt5.QtCore import Qt, QT_VERSION_STR
|
<|file_name|>xstream.ts<|end_file_name|><|fim▁begin|>import {mockTimeSource} from '../src/index';
import {setAdapt} from '@cycle/run/lib/adapt';
import xs from 'xstream';
setAdapt(stream => stream);
describe('xstream', () => {
before(() => setAdapt(stream => stream));
describe('of', () => {
it('emits the given values immediately', done => {
const Time = mockTimeSource();
Time.assertEqual(xs.of('A'), Time.diagram('(A|)'));
Time.run(done);
});
});
describe('map', () => {
it('applies a function to each item in the stream', done => {
const Time = mockTimeSource();
const input = Time.diagram('--1--2--3--|');
const actual = input.map(i => i * 2);
const expected = Time.diagram('--2--4--6--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('mapTo', () => {
it('replaces each occurence with the given value', done => {
const Time = mockTimeSource();
const input = Time.diagram('--1--2--3--|');
const actual = input.mapTo(7);
const expected = Time.diagram('--7--7--7--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('merge', () => {
it('merges two streams', done => {
const Time = mockTimeSource();
const A = Time.diagram('-----1-----1--|');
const B = Time.diagram('--2-----2-----|');
const actual = xs.merge(A, B);
const expected = Time.diagram('--2--1--2--1--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('combine', () => {
it('combines two streams', done => {
const Time = mockTimeSource();
const A = Time.diagram('0-1-----3-----|');
const B = Time.diagram('0---2------5--|');
const actual = xs.combine(A, B).map(([a, b]) => a + b);
const expected = Time.diagram('0-1-3---5--8--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('filter', () => {
it('only allows events that pass the given conditional', done => {
const Time = mockTimeSource();
const input = Time.diagram('--1--2--3--4--5--6--|');
const actual = input.filter(i => i % 2 === 0);
const expected = Time.diagram('-----2-----4-----6--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('take', () => {
it('takes the first n items', done => {
const Time = mockTimeSource();
const input = Time.diagram('--1--2--3--4--5--6--|');
const actual = input.take(3);
const expected = Time.diagram('--1--2--(3|)');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('drop', () => {
it('drops the first n items', done => {
const Time = mockTimeSource();
const input = Time.diagram('--1--2--3--4--5--6--|');
const actual = input.drop(3);
const expected = Time.diagram('-----------4--5--6--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('last', () => {
it('returns the last item after the stream completes', done => {
const Time = mockTimeSource();
const input = Time.diagram('--a--b--c--|');
const actual = input.last();
const expected = Time.diagram('-----------(c|)');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('startWith', () => {
it('prepends a starting value', done => {
const Time = mockTimeSource();
const input = Time.diagram('---1--2--3--|');
const actual = input.startWith(0);
const expected = Time.diagram('0--1--2--3--|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('endWhen', () => {
it('ends the stream when the given stream emits', done => {
const Time = mockTimeSource();
const input = Time.diagram('---1--2--3--4--5--6-|');
const endWhen = Time.diagram('-----------x--------|');
const actual = input.endWhen(endWhen);
const expected = Time.diagram('---1--2--3-|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('fold', () => {
it('accumulates a value from a seed', done => {
const Time = mockTimeSource();
const input = Time.diagram('---1--1--1--1--1--1-|');
const actual = input.fold((acc, val) => acc + val, 0);
const expected = Time.diagram('0--1--2--3--4--5--6-|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('replaceError', () => {
it('replaces the stream with another stream following an error', done => {
const Time = mockTimeSource();
<|fim▁hole|> const input = Time.diagram('---1--2--3--#');
const replace = Time.diagram('---------------7-|');
const actual = input.replaceError(() => replace);
const expected = Time.diagram('---1--2--3-----7-|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('flatten', () => {
it('turns a stream of streams into a flat stream', done => {
const Time = mockTimeSource();
const A = Time.diagram('--1--1--1--1--1--|');
const B = Time.diagram('---2--2---2--2--2|');
const input = Time.diagram('-A-------B-------|', {A, B});
const actual = input.flatten();
const expected = Time.diagram('--1--1--1-2--2--2|');
Time.assertEqual(actual, expected);
Time.run(done);
});
});
describe('imitate', () => {
it('creates a circular dependency', done => {
const Time = mockTimeSource();
const proxy = xs.create();
const input = Time.diagram('--a--b--c|');
const actual = proxy;
const expected = Time.diagram('--a--b--c|');
proxy.imitate(input);
Time.assertEqual(actual, expected);
Time.run(done);
});
});
});<|fim▁end|>
| |
<|file_name|>kindck-impl-type-params-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
trait Foo {
}
impl<T:Copy> Foo for T {
}
fn take_param<T:Foo>(foo: &T) { }
fn main() {
let x: Box<_> = box 3;
take_param(&x);<|fim▁hole|> //~^ ERROR `std::boxed::Box<{integer}>: std::marker::Copy` is not satisfied
}<|fim▁end|>
| |
<|file_name|>issue-12187-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn new<'r, T>() -> &'r T {
panic!()
}
fn main() {
let &v = new();
//~^ ERROR type annotations required
}<|fim▁end|>
| |
<|file_name|>time_thread.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding=utf-8
import threading
import time
class timer(threading.Thread): #The timer class is derived from the class threading.Thread
def __init__(self, num, interval):
threading.Thread.__init__(self)
self.thread_num = num
self.interval = interval
self.thread_stop = False
def run(self): #Overwrite run() method, put what you want the thread do here
while not self.thread_stop:
print 'Thread Object(%d), Time:%s/n' %(self.thread_num, time.ctime())
time.sleep(self.interval)
def stop(self):<|fim▁hole|>def test():
thread1 = timer(1, 1)
thread2 = timer(2, 2)
thread1.start()
thread2.start()
time.sleep(10)
thread1.stop()
thread2.stop()
return
if __name__ == '__main__':
test()<|fim▁end|>
|
self.thread_stop = True
|
<|file_name|>no084.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from random import choice
from python.decorators import euler_timer
SQUARES = ["GO",
"A1", "CC1", "A2", "T1", "R1", "B1", "CH1", "B2", "B3",
"JAIL",
"C1", "U1", "C2", "C3", "R2", "D1", "CC2", "D2", "D3",
"FP",
"E1", "CH2", "E2", "E3", "R3", "F1", "F2", "U2", "F3",
"G2J",
"G1", "G2", "CC3", "G3", "R4", "CH3", "H1", "T2", "H2"]
def roll_die(size):
first_die = choice(range(1, size + 1))
second_die = choice(range(1, size + 1))
return (first_die + second_die, (first_die == second_die))
def back(square, step):
index = SQUARES.index(square)
new_index = (index - step) % len(SQUARES)
return SQUARES[new_index]
def next_specific(square, next_type):
if next_type not in ["R", "U"]:
raise Exception("next_specific only intended for R and U")
# R1=5, R2=15, R3=25, R4=35
index = SQUARES.index(square)
if next_type == "R":
if 0 <= index < 5 or 35 < index:
return "R1"
elif 5 < index < 15:
return "R2"
elif 15 < index < 25:
return "R3"
elif 25 < index < 35:
return "R4"
else:
raise Exception("Case should not occur")
# U1=12, U2=28
elif next_type == "U":
if 0 <= index < 12 or index > 28:
return "U1"
elif 12 < index < 28:
return "U2"
else:
return Exception("Case should not occur")
else:
raise Exception("Case should not occur")
def next_square(landing_square, chance_card, chest_card):
if landing_square not in ["CC1", "CC2", "CC3", "CH1", "CH2", "CH3", "G2J"]:
return (landing_square, chance_card, chest_card)
if landing_square == "G2J":
return ("JAIL", chance_card, chest_card)
elif landing_square in ["CC1", "CC2", "CC3"]:
# 1/16 Go, Jail
# 14/16 Stay
chest_card = (chest_card + 1) % 16
if chest_card == 0:
return ("GO", chance_card, chest_card)
elif chest_card == 1:
return ("JAIL", chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
elif landing_square in ["CH1", "CH2", "CH3"]:
# 1/16 Go, Jail, C1, E3, H2, R1, next U, back 3
# 1/8 Next R
chance_card = (chance_card + 1) % 16
if chance_card == 0:
return ("GO", chance_card, chest_card)
elif chance_card == 1:
return ("JAIL", chance_card, chest_card)
elif chance_card == 2:
return ("C1", chance_card, chest_card)
elif chance_card == 3:
return ("E3", chance_card, chest_card)
elif chance_card == 4:
return ("H2", chance_card, chest_card)
elif chance_card == 5:
return ("R1", chance_card, chest_card)
elif chance_card == 6:
return (next_specific(landing_square, "U"),
chance_card, chest_card)
elif chance_card == 7:
return next_square(back(landing_square, 3),
chance_card, chest_card)
elif chance_card in [8, 9]:
return (next_specific(landing_square, "R"),
chance_card, chest_card)
else:
return (landing_square, chance_card, chest_card)
else:
raise Exception("Case should not occur")
def main(verbose=False):
GAME_PLAY = 10 ** 6
dice_size = 4
visited = {"GO": 1}
current = "GO"
chance_card = 0
chest_card = 0
doubles = 0
for place in xrange(GAME_PLAY):
total, double = roll_die(dice_size)
if double:
doubles += 1
else:
doubles = 0
if doubles == 3:
doubles = 0
current = "JAIL"
else:
index = SQUARES.index(current)
landing_square = SQUARES[(index + total) % len(SQUARES)]
(current, chance_card,
chest_card) = next_square(landing_square, chance_card, chest_card)
# if current is not in visited, sets to 1
# (default 0 returned by get)
visited[current] = visited.get(current, 0) + 1<|fim▁hole|> key=lambda pair: pair[1],
reverse=True)
top_visited = [SQUARES.index(square[0]) for square in top_visited[:3]]
return ''.join(str(index).zfill(2) for index in top_visited)
if __name__ == '__main__':
print euler_timer(84)(main)(verbose=True)<|fim▁end|>
|
top_visited = sorted(visited.items(),
|
<|file_name|>local_cli_wrapper_test.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self, command_args_sequence, sess, dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_args_sequence: (list of list of str) A list of arguments for the
"run" command.
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_args_sequence = command_args_sequence
self._response_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
command_args = self._command_args_sequence[self._response_pointer]
self._response_pointer += 1
try:
self._run_handler(command_args)
except debugger_cli_common.CommandLineExit as e:
response = e.exit_token
return response
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sess = session.Session()
# Initialize variable.
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
# Test command sequence: run; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
<|fim▁hole|> # they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
# Test command sequence: run -n; run -n; run -n;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], ["-n"]], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunsUnderNonDebugThenDebugMode(self):
# Test command sequence: run -n; run -n; run; run;
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], [], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
# Test command sequence: run -t 3; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
# Test command sequence: run -t 3;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
# Test command sequence: run -n; run -t 2; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-t", "2"], [], []], self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], []], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
# Test command sequence:
# run -f greater_than_twelve; run -f greater_than_twelve; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-f", "v_greater_than_twelve"], ["-f", "v_greater_than_twelve"], []],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
if __name__ == "__main__":
googletest.main()<|fim▁end|>
|
# Verify that the TensorFlow runtime errors are picked up and in this case,
|
<|file_name|>psrldq.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;<|fim▁hole|>fn psrldq_1() {
run_test(&Instruction { mnemonic: Mnemonic::PSRLDQ, operand1: Some(Direct(XMM3)), operand2: Some(Literal8(24)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 115, 219, 24], OperandSize::Dword)
}
fn psrldq_2() {
run_test(&Instruction { mnemonic: Mnemonic::PSRLDQ, operand1: Some(Direct(XMM6)), operand2: Some(Literal8(34)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 115, 222, 34], OperandSize::Qword)
}<|fim▁end|>
| |
<|file_name|>codec.go<|end_file_name|><|fim▁begin|>/*
* Spreed WebRTC.
* Copyright (C) 2013-2015 struktur AG
*
* This file is part of Spreed WebRTC.
*<|fim▁hole|> *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package channelling
import (
"bytes"
"encoding/json"
"errors"
"log"
"github.com/strukturag/spreed-webrtc/go/buffercache"
)
type IncomingDecoder interface {
DecodeIncoming(buffercache.Buffer) (*DataIncoming, error)
}
type OutgoingEncoder interface {
EncodeOutgoing(*DataOutgoing) (buffercache.Buffer, error)
}
type Codec interface {
NewBuffer() buffercache.Buffer
IncomingDecoder
OutgoingEncoder
}
type incomingCodec struct {
buffers buffercache.BufferCache
incomingLimit int
}
func NewCodec(incomingLimit int) Codec {
return &incomingCodec{buffercache.NewBufferCache(1024, bytes.MinRead), incomingLimit}
}
func (codec incomingCodec) NewBuffer() buffercache.Buffer {
return codec.buffers.New()
}
func (codec incomingCodec) DecodeIncoming(b buffercache.Buffer) (*DataIncoming, error) {
length := b.GetBuffer().Len()
if length > codec.incomingLimit {
return nil, errors.New("Incoming message size limit exceeded")
}
incoming := &DataIncoming{}
return incoming, json.Unmarshal(b.Bytes(), incoming)
}
func (codec incomingCodec) EncodeOutgoing(outgoing *DataOutgoing) (buffercache.Buffer, error) {
b := codec.NewBuffer()
if err := json.NewEncoder(b).Encode(outgoing); err != nil {
log.Println("Error while encoding JSON", err)
b.Decref()
return nil, err
}
return b, nil
}<|fim▁end|>
|
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
|
<|file_name|>CommonsSessionStorage.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2016 The Language Archive, Max Planck Institute for Psycholinguistics
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package nl.mpi.arbil.userstorage;
import java.awt.GraphicsEnvironment;
import java.awt.HeadlessException;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.swing.JOptionPane;
import nl.mpi.flap.plugin.PluginDialogHandler;
import nl.mpi.flap.plugin.PluginSessionStorage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Created on : Nov 8, 2012, 4:25:54 PM
*
* @author Peter Withers <[email protected]>
*/
public abstract class CommonsSessionStorage implements PluginSessionStorage {
private final static Logger logger = LoggerFactory.getLogger(CommonsSessionStorage.class);
protected File localCacheDirectory = null;
private File storageDirectory = null;
protected PluginDialogHandler messageDialogHandler;
protected abstract String[] getAppDirectoryAlternatives();
protected abstract String getProjectDirectoryName();
protected abstract void logError(Exception exception);
protected abstract void logError(String message, Exception exception);
public abstract Object loadObject(String filename) throws Exception;
private void checkForMultipleStorageDirectories(String[] locationOptions) {
// look for any additional storage directories
int foundDirectoryCount = 0;
StringBuilder storageDirectoryMessageString = new StringBuilder();
for (String currentStorageDirectory : locationOptions) {
File storageFile = new File(currentStorageDirectory);
if (storageFile.exists()) {
foundDirectoryCount++;
storageDirectoryMessageString.append(currentStorageDirectory).append("\n");
}
}
if (foundDirectoryCount > 1) {
String errorMessage = "More than one storage directory has been found.\nIt is recommended to remove any unused directories in this list.\nNote that the first occurrence is currently in use:\n" + storageDirectoryMessageString;
logError(new Exception(errorMessage));
try {
if (!GraphicsEnvironment.isHeadless()) {
JOptionPane.showMessageDialog(null,
"More than one storage directory has been found.\nIt is recommended to remove any unused directories in this list.\nNote that the first occurrence is currently in use:\n" + storageDirectoryMessageString, "Multiple storage directories",
JOptionPane.WARNING_MESSAGE);
}
} catch (HeadlessException hEx) {
// Should never occur since we're checking whether headless
throw new AssertionError(hEx);
}
}
}
protected String[] getLocationOptions() {
List<String> locationOptions = new ArrayList<String>();
for (String appDir : getAppDirectoryAlternatives()) {
locationOptions.add(System.getProperty("user.home") + File.separatorChar + "Local Settings" + File.separatorChar + "Application Data" + File.separatorChar + appDir + File.separatorChar);
locationOptions.add(System.getenv("APPDATA") + File.separatorChar + appDir + File.separatorChar);
locationOptions.add(System.getProperty("user.home") + File.separatorChar + appDir + File.separatorChar);
locationOptions.add(System.getenv("USERPROFILE") + File.separatorChar + appDir + File.separatorChar);
locationOptions.add(System.getProperty("user.dir") + File.separatorChar + appDir + File.separatorChar);
}
List<String> uniqueArray = new ArrayList<String>();
for (String location : locationOptions) {
if (location != null
&& !location.startsWith("null")
&& !uniqueArray.contains(location)) {
uniqueArray.add(location);
}
}
locationOptions = uniqueArray;
for (String currentLocationOption : locationOptions) {
logger.debug("LocationOption: " + currentLocationOption);
}
return locationOptions.toArray(new String[]{});
}
private File determineStorageDirectory() throws RuntimeException {
File storageDirectoryFile = null;
String storageDirectoryArray[] = getLocationOptions();
// look for an existing storage directory
for (String currentStorageDirectory : storageDirectoryArray) {
File storageFile = new File(currentStorageDirectory);
if (storageFile.exists()) {
logger.debug("existing storage directory found: " + currentStorageDirectory);
storageDirectoryFile = storageFile;
break;
}
}
String testedStorageDirectories = "";
if (storageDirectoryFile == null) {
for (String currentStorageDirectory : storageDirectoryArray) {
if (!currentStorageDirectory.startsWith("null")) {
File storageFile = new File(currentStorageDirectory);
if (!storageFile.exists()) {
if (!storageFile.mkdir()) {
testedStorageDirectories = testedStorageDirectories + currentStorageDirectory + "\n";
logError("failed to create: " + currentStorageDirectory, null);
} else {
logger.debug("created new storage directory: " + currentStorageDirectory);
storageDirectoryFile = storageFile;
break;
}<|fim▁hole|> }
}
}
}
if (storageDirectoryFile == null) {
logError("Could not create a working directory in any of the potential location:\n" + testedStorageDirectories + "Please check that you have write permissions in at least one of these locations.\nThe application will now exit.", null);
System.exit(-1);
} else {
try {
File testFile = File.createTempFile("testfile", ".tmp", storageDirectoryFile);
boolean success = testFile.exists();
if (!success) {
success = testFile.createNewFile();
}
if (success) {
testFile.deleteOnExit();
success = testFile.exists();
if (success) {
success = testFile.delete();
}
}
if (!success) {
// test the storage directory is writable and add a warning message box here if not
logError("Could not write to the working directory.\nThere will be issues creating, editing and saving any file.", null);
}
} catch (IOException exception) {
logger.debug("Sending exception to logger", exception);
logError(exception);
messageDialogHandler.addMessageDialogToQueue("Could not create a test file in the working directory.", "Arbil Critical Error");
throw new RuntimeException("Exception while testing working directory writability", exception);
}
}
logger.debug("storageDirectory: " + storageDirectoryFile);
checkForMultipleStorageDirectories(storageDirectoryArray);
return storageDirectoryFile;
}
/**
* @return the storageDirectory
*/
public synchronized File getApplicationSettingsDirectory() {
if (storageDirectory == null) {
storageDirectory = determineStorageDirectory();
}
return storageDirectory;
}
/**
* @return the project directory
*/
public File getProjectDirectory() {
return getProjectWorkingDirectory().getParentFile();
}
/**
* Tests that the project directory exists and creates it if it does not.
*
* @return the project working files directory
*/
public File getProjectWorkingDirectory() {
if (localCacheDirectory == null) {
// load from the text based properties file
String localCacheDirectoryPathString = loadString("cacheDirectory");
if (localCacheDirectoryPathString != null) {
localCacheDirectory = new File(localCacheDirectoryPathString);
} else {
// otherwise load from the to be replaced binary based storage file
try {
File localWorkingDirectory = (File) loadObject("cacheDirectory");
localCacheDirectory = localWorkingDirectory;
} catch (Exception exception) {
if (new File(getApplicationSettingsDirectory(), "imdicache").exists()) {
localCacheDirectory = new File(getApplicationSettingsDirectory(), "imdicache");
} else {
localCacheDirectory = new File(getApplicationSettingsDirectory(), getProjectDirectoryName());
}
}
saveString("cacheDirectory", localCacheDirectory.getAbsolutePath());
}
boolean cacheDirExists = localCacheDirectory.exists();
if (!cacheDirExists) {
if (!localCacheDirectory.mkdirs()) {
logError("Could not create cache directory", null);
return null;
}
}
}
return localCacheDirectory;
}
}<|fim▁end|>
| |
<|file_name|>client.py<|end_file_name|><|fim▁begin|># Copyright (c) 2008 Duncan Fordyce
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import time
import traceback
from oyoyo.parse import *
from oyoyo import helpers
from oyoyo.cmdhandler import CommandError
class IRCClientError(Exception):
pass
class IRCClient:
""" IRC Client class. This handles one connection to a server.
This can be used either with or without IRCApp ( see connect() docs )
"""
def __init__(self, cmd_handler, **kwargs):
""" the first argument should be an object with attributes/methods named
as the irc commands. You may subclass from one of the classes in
oyoyo.cmdhandler for convenience but it is not required. The
methods should have arguments (prefix, args). prefix is
normally the sender of the command. args is a list of arguments.
Its recommened you subclass oyoyo.cmdhandler.DefaultCommandHandler,
this class provides defaults for callbacks that are required for
normal IRC operation.
all other arguments should be keyword arguments. The most commonly
used will be nick, host and port. You can also specify an "on connect"
callback. ( check the source for others )
Warning: By default this class will not block on socket operations, this
means if you use a plain while loop your app will consume 100% cpu.
To enable blocking pass blocking=True.
>>> from oyoyo import helpers >>> class My_Handler(DefaultCommandHandler):
... def privmsg(self, prefix, command, args):
... print "%s said %s" % (prefix, args[1])
...
>>> def connect_callback(c):
... helpers.join(c, '#myroom')
...
>>> cli = IRCClient(My_Handler,
... host="irc.freenode.net",
... port=6667,
... nick="myname",
... connect_cb=connect_callback)
...
>>> cli_con = cli.connect()
>>> while 1:
... cli_con.next()
...
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.nick = None
self.real_name = None
self.host = None
self.port = None
self.connect_cb = None
self.blocking = False
self.__dict__.update(kwargs)
self.command_handler = cmd_handler(self)
self._end = 0
def send(self, *args, **kwargs):
""" send a message to the connected server. all arguments are joined
with a space for convenience, for example the following are identical
>>> cli.send("JOIN %s" % some_room)
>>> cli.send("JOIN", some_room)
In python 2, all args must be of type str or unicode, *BUT* if they are
unicode they will be converted to str with the encoding specified by
the 'encoding' keyword argument (default 'utf8').
In python 3, all args must be of type str or bytes, *BUT* if they are
str they will be converted to bytes with the encoding specified by the
'encoding' keyword argument (default 'utf8').
"""
# Convert all args to bytes if not already
encoding = kwargs.get('encoding') or 'utf8'
bargs = []
for arg in args:
if isinstance(arg, str):
bargs.append(bytes(arg, encoding))
elif isinstance(arg, bytes):
bargs.append(arg)
elif type(arg).__name__ == 'unicode':
bargs.append(arg.encode(encoding))
else:
raise IRCClientError('Refusing to send one of the args from provided: %s'
% repr([(type(arg), arg) for arg in args]))
msg = bytes(" ", "ascii").join(bargs)
logging.info('---> send "%s"' % msg)
self.socket.send(msg + bytes("\r\n", "ascii"))
def connect(self):
""" initiates the connection to the server set in self.host:self.port
and returns a generator object.
>>> cli = IRCClient(my_handler, host="irc.freenode.net", port=6667)
>>> g = cli.connect()
>>> while 1:
... g.next()
"""
try:
logging.info('connecting to %s:%s' % (self.host, self.port))
self.socket.connect(("%s" % self.host, self.port))
if self.blocking:
# this also overrides default timeout
self.socket.setblocking(1)
else:
self.socket.setblocking(0)
helpers.nick(self, self.nick)
helpers.user(self, self.nick, self.real_name)
if self.connect_cb:
self.connect_cb(self)
buffer = bytes()
while not self._end:
try:
buffer += self.socket.recv(1024)
except socket.error as e:
try: # a little dance of compatibility to get the errno
errno = e.errno
except AttributeError:
errno = e[0]
if not self.blocking and errno == 11:
pass
else:
raise e
else:
data = buffer.split(bytes("\n", "ascii"))
buffer = data.pop()
for el in data:
prefix, command, args = parse_raw_irc_command(el)
try:
self.command_handler.run(command, prefix, *args)
except CommandError:
# error will of already been loggingged by the handler
pass
yield True
finally:
if self.socket:
logging.info('closing socket')
self.socket.close()
<|fim▁hole|># noinspection PyPep8Naming
class IRCApp:
""" This class manages several IRCClient instances without the use of threads.
(Non-threaded) Timer functionality is also included.
"""
class _ClientDesc:
def __init__(self, **kwargs):
self.con = None
self.autoreconnect = False
self.__dict__.update(kwargs)
def __init__(self):
self._clients = {}
self._timers = []
self.running = False
self.sleep_time = 0.5
def addClient(self, client, autoreconnect=False):
""" add a client object to the application. setting autoreconnect
to true will mean the application will attempt to reconnect the client
after every disconnect. you can also set autoreconnect to a number
to specify how many reconnects should happen.
warning: if you add a client that has blocking set to true,
timers will no longer function properly """
logging.info('added client %s (ar=%s)' % (client, autoreconnect))
self._clients[client] = self._ClientDesc(autoreconnect=autoreconnect)
def addTimer(self, seconds, cb):
""" add a timed callback. accuracy is not specified, you can only
garuntee the callback will be called after seconds has passed.
( the only advantage to these timers is they dont use threads )
"""
assert callable(cb)
logging.info('added timer to call %s in %ss' % (cb, seconds))
self._timers.append((time.time() + seconds, cb))
def run(self):
""" run the application. this will block until stop() is called """
# TODO: convert this to use generators too?
self.running = True
while self.running:
found_one_alive = False
for client, clientdesc in self._clients.items():
if clientdesc.con is None:
clientdesc.con = client.connect()
try:
clientdesc.con.next()
except Exception as e:
logging.error('client error %s' % e)
logging.error(traceback.format_exc())
if clientdesc.autoreconnect:
clientdesc.con = None
if isinstance(clientdesc.autoreconnect, (int, float)):
clientdesc.autoreconnect -= 1
found_one_alive = True
else:
clientdesc.con = False
else:
found_one_alive = True
if not found_one_alive:
logging.info('nothing left alive... quiting')
self.stop()
now = time.time()
timers = self._timers[:]
self._timers = []
for target_time, cb in timers:
if now > target_time:
logging.info('calling timer cb %s' % cb)
cb()
else:
self._timers.append((target_time, cb))
time.sleep(self.sleep_time)
def stop(self):
""" stop the application """
self.running = False<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>"""Main view for geo locator application"""
from django.shortcuts import render
def index(request):<|fim▁hole|>
return render(request, "homepage.html", {'location': location})<|fim▁end|>
|
if request.location:
location = request.location
else:
location = None
|
<|file_name|>uievent.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::UIEventBinding;
use dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use dom::bindings::codegen::InheritTypes::{EventCast, UIEventDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, RootedReference, Temporary};
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use dom::window::Window;
use util::str::DOMString;
use std::cell::Cell;
use std::default::Default;
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#interface-UIEvent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableJS<Window>,
detail: Cell<i32>
}
impl UIEventDerived for Event {
fn is_uievent(&self) -> bool {
*self.type_id() == EventTypeId::UIEvent
}
}
impl UIEvent {
pub fn new_inherited(type_id: EventTypeId) -> UIEvent {
UIEvent {
event: Event::new_inherited(type_id),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: JSRef<Window>) -> Temporary<UIEvent> {
reflect_dom_object(box UIEvent::new_inherited(EventTypeId::UIEvent),
GlobalRef::Window(window),
UIEventBinding::Wrap)<|fim▁hole|> type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<JSRef<Window>>,
detail: i32) -> Temporary<UIEvent> {
let ev = UIEvent::new_uninitialized(window).root();
ev.r().InitUIEvent(type_, can_bubble == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable, view, detail);
Temporary::from_rooted(ev.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &UIEventBinding::UIEventInit) -> Fallible<Temporary<UIEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable { EventCancelable::Cancelable } else { EventCancelable::NotCancelable };
let event = UIEvent::new(global.as_window(), type_,
bubbles, cancelable,
init.view.r(), init.detail);
Ok(event)
}
}
impl<'a> UIEventMethods for JSRef<'a, UIEvent> {
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-view
fn GetView(self) -> Option<Temporary<Window>> {
self.view.get()
}
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#widl-UIEvent-detail
fn Detail(self) -> i32 {
self.detail.get()
}
fn InitUIEvent(self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<JSRef<Window>>,
detail: i32) {
let event: JSRef<Event> = EventCast::from_ref(self);
if event.dispatching() {
return;
}
event.InitEvent(type_, can_bubble, cancelable);
self.view.assign(view);
self.detail.set(detail);
}
}<|fim▁end|>
|
}
pub fn new(window: JSRef<Window>,
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|># coding: utf-8
from __future__ import absolute_import, unicode_literals
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_sso import claims
from rest_framework_sso.models import SessionToken
from rest_framework_sso.serializers import SessionTokenSerializer, AuthorizationTokenSerializer
from rest_framework_sso.settings import api_settings
import logging
logger = logging.getLogger(__name__)
create_session_payload = api_settings.CREATE_SESSION_PAYLOAD
create_authorization_payload = api_settings.CREATE_AUTHORIZATION_PAYLOAD
encode_jwt_token = api_settings.ENCODE_JWT_TOKEN
decode_jwt_token = api_settings.DECODE_JWT_TOKEN
class BaseAPIView(APIView):
"""
Base API View that various JWT interactions inherit from.
"""
throttle_classes = ()
permission_classes = ()
serializer_class = None
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {"request": self.request, "view": self}
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method." % self.__class__.__name__
)
return self.serializer_class
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs["context"] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
<|fim▁hole|>class ObtainSessionTokenView(BaseAPIView):
"""
Returns a JSON Web Token that can be used for authenticated requests.
"""
permission_classes = ()
serializer_class = SessionTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
session_token = SessionToken.objects.active().filter(user=user).with_user_agent(request=request).first()
if session_token is None:
session_token = SessionToken(user=user)
session_token.update_attributes(request=request)
session_token.save()
payload = create_session_payload(session_token=session_token, user=user)
jwt_token = encode_jwt_token(payload=payload)
return Response({"token": jwt_token})
class ObtainAuthorizationTokenView(BaseAPIView):
"""
Returns a JSON Web Token that can be used for authenticated requests.
"""
permission_classes = (IsAuthenticated,)
serializer_class = AuthorizationTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if hasattr(request.auth, "get") and request.auth.get(claims.SESSION_ID):
try:
session_token = SessionToken.objects.active().get(
pk=request.auth.get(claims.SESSION_ID), user=request.user
)
except SessionToken.DoesNotExist:
return Response({"detail": "Invalid token."}, status=status.HTTP_401_UNAUTHORIZED)
else:
session_token = (
SessionToken.objects.active().filter(user=request.user).with_user_agent(request=request).first()
)
if session_token is None:
session_token = SessionToken(user=request.user)
session_token.update_attributes(request=request)
session_token.save()
payload = create_authorization_payload(
session_token=session_token, user=request.user, **serializer.validated_data
)
jwt_token = encode_jwt_token(payload=payload)
return Response({"token": jwt_token})
obtain_session_token = ObtainSessionTokenView.as_view()
obtain_authorization_token = ObtainAuthorizationTokenView.as_view()<|fim▁end|>
| |
<|file_name|>MimeHTML_HTTP.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C), 2012 Adaptinet.org (Todd Fearn, Anthony Graffeo)
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package org.adaptinet.mimehandlers;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URLDecoder;
import java.security.MessageDigest;
import java.util.Enumeration;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.Vector;
import org.adaptinet.http.HTTP;
import org.adaptinet.http.Request;
import org.adaptinet.http.Response;
import org.adaptinet.socket.PropData;
import org.adaptinet.transceiver.ITransceiver;
public class MimeHTML_HTTP implements MimeBase {
private String url = null;
private String urlBase = null;
private String webBase = null;
private String mimeType = null;
private String pathName = null;
private ITransceiver transceiver = null;
private int status = 200;
private boolean bAdminPort = false;
static private String PLUGIN_ENTRY = "plugins/plugin?entry=";
static private String PEER_ENTRY = "peers/peer?entry=";
static private int PLUGIN_ENTRYLEN = 0;
static private int PEER_ENTRYLEN = 0;
static final String footer = "<br><table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" "
+ "width=\"100%\" ><tr><td width=\"127\" class=\"footerorange\">"
+ "<IMG height=\"1\" src=\"/images/space.gif\" width=\"127\"></td><td width=\"400\" "
+ "class=\"footerorange\"><IMG height=\"1\" src=\"/images/space.gif\" width=\"400\"></td>"
+ "<td width=\"100%\" class=\"footerorange\"><IMG height=\"1\" src=\"/images/space.gif\" "
+ "width=\"1\"></td></tr><tr><td width=\"127\" class=\"footerorange\">"
+ "<IMG height=27 src=\"/images/space.gif\" width=127></td><td align=\"left\" "
+ "class=\"footerorange\" nowrap>25 B Vreeland Rd. Suite 103, Florham Park, NJ"
+ " 07932 973-451-9600 fx 973-439-1745</td>"
+ "<td width=\"100%\" class=\"footerorange\"><IMG height=27 "
+ "src=\"/images/space.gif\" width=1></td></tr><tr><td width=\"127\">"
+ "<IMG height=1 src=\"/images/space.gif\" width=127></td><td align=\"left\" "
+ "class=\"footer\" nowrap>all materials © Adaptinet Inc. All rights reserved.</td>"
+ "<td align=\"right\" width=\"100%\" class=\"footer\" nowrap>"
+ "<A class=\"footerlink\" href=\"#\">contact Adaptinet</A> | "
+ " <A class=\"footerlink\" href=\"#\">support</A> </td></tr></table><br><br><br><br>";
static {
PLUGIN_ENTRYLEN = PLUGIN_ENTRY.length();
PEER_ENTRYLEN = PEER_ENTRY.length();
}
public MimeHTML_HTTP() {
}
public void init(String u, ITransceiver s) {
transceiver = s;
urlBase = s.getHTTPRoot();
webBase = s.getWebRoot();
if (urlBase == null || urlBase.equals(".")) {
urlBase = System.getProperty("user.dir", "");
}
if (!urlBase.endsWith(File.separator))
urlBase += File.separator;
if (webBase == null || webBase.equals(".")) {
webBase = System.getProperty("user.dir", "");
}
if (!webBase.endsWith(File.separator))
webBase += File.separator;
url = u;
parseUrl();
}
private void parseUrl() {
try {
pathName = URLDecoder.decode(url, "UTF-8");
} catch (Exception e) {
pathName = url;
}
if (pathName.endsWith("/"))
pathName += "index.html";
if (pathName.startsWith("/"))
pathName = pathName.substring(1);
int dot = pathName.indexOf(".");
if (dot > 0) {
String ext = pathName.substring(dot + 1);
setMimeForExt(ext);
} else
mimeType = HTTP.contentTypeHTML;
}
public String getContentType() {
return mimeType;
}
private void setMimeForExt(String ext) {
if (ext.equalsIgnoreCase("jpg"))
mimeType = HTTP.contentTypeJPEG;
else if (ext.equalsIgnoreCase("htm") || ext.equalsIgnoreCase("html"))
mimeType = HTTP.contentTypeHTML;
else if (ext.equalsIgnoreCase("gif"))
mimeType = HTTP.contentTypeGIF;
else if (ext.equalsIgnoreCase("xsl"))
mimeType = HTTP.contentTypeXML;
else
mimeType = HTTP.contentTypeHTML;
}
public ByteArrayOutputStream process(ITransceiver transceiver,
Request request) {
// note: this process has gotten pretty big, really fast
// need to revisit the exception handling here, there is a better way
File file = null;
String monitor = null;
long length = 0;
try {
bAdminPort = (request.getPort() == transceiver.getAdminPort());
status = isAuthorized(request.getUsername(), request.getPassword());
if (!bAdminPort) {
// non-admin port are regular html requests...
String base = webBase;
if (bAdminPort)
base = urlBase;
file = new File(base + pathName);
if (file.isFile() == false || !file.canRead())
throw new FileNotFoundException();
else
length = file.length();
} else {
if (pathName.equalsIgnoreCase("configuration.html")) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
monitor = getConfiguration(transceiver);
System.out.println(monitor);
if (monitor != null)
length = monitor.length();
else
status = HTTP.NOT_FOUND;
} else if (pathName.equalsIgnoreCase("monitor.html")) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
try {
monitor = getMonitor();
length = monitor.length();
} catch (Exception e) {
monitor = e.getMessage();
status = HTTP.INTERNAL_SERVER_ERROR;
} catch (Throwable t) {
monitor = t.getMessage();
status = HTTP.INTERNAL_SERVER_ERROR;
}
} else if (pathName.equalsIgnoreCase("plugins.html")) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
monitor = getPlugins(transceiver);
if (monitor != null)
length = monitor.length();
else
status = HTTP.INTERNAL_SERVER_ERROR;
} else if (pathName.equalsIgnoreCase("peers.html")) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
monitor = getPeers(transceiver);
if (monitor != null)
length = monitor.length();
else
status = HTTP.INTERNAL_SERVER_ERROR;
} else if (pathName.startsWith(PLUGIN_ENTRY)) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
if (pathName.length() > PLUGIN_ENTRYLEN)
monitor = getPluginEntry(transceiver, pathName
.substring(PLUGIN_ENTRYLEN));
else
monitor = getPluginEntry(transceiver, "");
if (monitor != null)
length = monitor.length();
else
status = HTTP.NOT_FOUND;
} else if (pathName.startsWith(PEER_ENTRY)) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
if (pathName.length() > PEER_ENTRYLEN)
monitor = getPeerEntry(transceiver, pathName
.substring(PEER_ENTRYLEN));
else
monitor = getPeerEntry(transceiver, "");
if (monitor != null)
length = monitor.length();
else
status = HTTP.NOT_FOUND;
} else if (pathName.startsWith("setadmin?")) {
if (status == HTTP.UNAUTHORIZED || bAdminPort == false)
throw new IllegalAccessException();
monitor = setAdmin(pathName.substring(9), request);
length = monitor.length();
} else // files
{
String base = webBase;
if (bAdminPort)
base = urlBase;
file = new File(base + pathName);
if (file.isFile() == false || !file.canRead())
throw new FileNotFoundException();
else
length = file.length();
}
}
} catch (IOException e) {
status = HTTP.NOT_FOUND;
} catch (IllegalAccessException iae) {
status = HTTP.UNAUTHORIZED;
} catch (Exception e) {
status = HTTP.INTERNAL_SERVER_ERROR;
}
try {
Response resp = new Response(request.getOutStream(), transceiver
.getHost(), mimeType);
resp.setResponse(new ByteArrayOutputStream());
resp.setStatus(status);
resp.writeHeader(length);
if (status != HTTP.OK)
return null;
BufferedOutputStream out = new BufferedOutputStream(request
.getOutStream());
if (file != null) {
BufferedInputStream in = new BufferedInputStream(
new FileInputStream(file));
byte[] bytes = new byte[255];
while (true) {
int read = in.read(bytes);
if (read < 0)
break;
out.write(bytes, 0, read);
}
in.close();
} else if (monitor != null) {
out.write(monitor.getBytes());
}
out.flush();
} catch (Exception e) {
}
return null;
}
public static String getPlugins(ITransceiver transceiver) {
try {
return PluginXML.getEntries(transceiver);
} catch (Exception e) {
return null;
}
}
public static String getPeers(ITransceiver transceiver) {
try {
return PeerXML.getEntries(transceiver);
} catch (Exception e) {
return null;
}
}
public static String getConfiguration(ITransceiver transceiver) {
try {
return TransceiverConfiguration.getConfiguration(transceiver);
} catch (Exception e) {
return null;
}
}
public static String getPluginEntry(ITransceiver transceiver, String entry) {
try {
return PluginXML.getEntry(transceiver, entry);
} catch (Exception e) {
return null;
}
}
public static String getPeerEntry(ITransceiver transceiver, String peer) {
try {
return PeerXML.getEntry(transceiver, peer);
} catch (Exception e) {
return null;
}
}
public static String getLicenseKey(ITransceiver transceiver) {
try {
// return
// simpleTransform.transform(licensingXML.getLicense(transceiver),
// transceiver.getHTTPRoot() + "/licensing.xsl");
return null;
} catch (Exception e) {
return null;
}
}
private String setAdmin(String params, Request request) {
try {
StringTokenizer tokenizer = new StringTokenizer(params, "&");
int size = tokenizer.countTokens() * 2;
String token = null;
Properties properties = new Properties();
for (int i = 0; i < size; i += 2) {
if (tokenizer.hasMoreTokens()) {
token = tokenizer.nextToken();
int loc = token.indexOf('=');
properties.setProperty(token.substring(0, loc), token
.substring(loc + 1, token.length()));
}
}
String userid = properties.getProperty("userid");
String current = properties.getProperty("current");
String password = properties.getProperty("password");
String confirm = properties.getProperty("password2");
// check current password here
if (isAuthorized(request.getUsername(), current) == HTTP.UNAUTHORIZED)
return "<H2>The current password is incorrect for user: "
+ request.getUsername() + "</H2>";
if (!password.equals(confirm))
return "<H2>The password does not match the confirm password</H2>";
if (password.equals(""))
return "<H2>The password cannot be empty.</H2>";
MessageDigest md = MessageDigest.getInstance("MD5");
String digest = new String(md.digest(password.getBytes()));
String userpass = userid + ":" + digest;
String authfile = urlBase + ".xbpasswd";
FileOutputStream fs = new FileOutputStream(authfile);
fs.write(userpass.getBytes());
fs.close();
return "<H2>Change password success for " + userid + "</H2>";
} catch (Exception e) {
}
return "<H2>Change password failure.</H2>";
}
public int getStatus() {
return status;
}
private int isAuthorized(String username, String password) {
try {
<|fim▁hole|> return HTTP.OK;
String authfile = urlBase + ".xbpasswd";
File file = new File(authfile);
if (!file.isFile())
return HTTP.OK;
BufferedReader br = new BufferedReader(new InputStreamReader(
new FileInputStream(file)));
String userpass = br.readLine();
br.close();
StringTokenizer st = new StringTokenizer(userpass, ":");
String user = st.hasMoreTokens() ? st.nextToken() : "";
String pass = st.hasMoreTokens() ? st.nextToken() : "";
MessageDigest md = MessageDigest.getInstance("MD5");
String digest = new String(md.digest(password != null ? password
.getBytes() : "".getBytes()));
if (user.equals(username) && pass.equals(digest))
return HTTP.OK;
} catch (IOException ioe) {
} catch (NullPointerException npe) {
} catch (Exception e) {
e.printStackTrace();
}
return HTTP.UNAUTHORIZED;
}
String getMonitor() {
StringBuffer buffer = new StringBuffer();
try {
Vector<PropData> vec = transceiver.requestList();
buffer.append("<html><head><title>Status</title>");
buffer.append("<link rel=\"Stylesheet\" href=\"/style.css\">");
buffer.append("<script language=\"JavaScript\" src=\"/css.js\"></script></head>");
buffer.append("<body BGCOLOR=\"#FFFFFF\">");
buffer.append("<TABLE cellPadding=0 cellSpacing=0 border=0 WIDTH=\"500\"<tr><TD><IMG alt=\"\" src=\"images/empty.gif\" width=30 border=0></TD><td>");
buffer.append("<table border=\"1\" cellspacing=\"0\" cellpadding=\"4\">");
buffer.append("<tr valign=\"top\" class=\"header\">");
buffer.append("<td>Plugin Name</td>");
buffer.append("<td>System ID</td>");
buffer.append("<td>Status</td>");
buffer.append("<td>Control</td></tr>");
Enumeration<PropData> e = vec.elements();
while (e.hasMoreElements()) {
PropData data = e.nextElement();
buffer.append("<tr class=\"text\">");
buffer.append("<td><strong>");
buffer.append(data.getName());
buffer.append("</strong></td>");
buffer.append("<td>");
buffer.append(data.getId());
buffer.append("</td>");
buffer.append("<td>");
buffer.append(data.getState());
buffer.append("</td>");
buffer.append("<td>");
buffer.append("<FORM Method=\"POST\" Action=\"\">");
buffer
.append("<INPUT type=\"hidden\" name=\"command\" value=\"killrequest\"></INPUT>");
buffer
.append("<INPUT TYPE=\"Submit\" value=\" Kill \"></INPUT>");
buffer
.append("<INPUT type=\"checkbox\" name=\"Force\" value=\"true\">Force</INPUT>");
buffer.append("<INPUT type=\"hidden\" ");
buffer.append("name=\"SYSTEMID\" ");
buffer.append("value=\">");
buffer.append(data.getId());
buffer.append("\"></INPUT>");
buffer.append("<INPUT type=\"hidden\" ");
buffer.append("name=\"Name\" ");
buffer.append("value=\">");
buffer.append(data.getName());
buffer.append("\"></INPUT>");
buffer.append("</td>");
}
buffer.append("</FORM>");
buffer.append("</table></td></tr></table>");
buffer.append(footer);
buffer.append("</body>");
buffer.append("</html>");
} catch (Exception e) {
return null;
}
return buffer.toString();
}
}<|fim▁end|>
|
if (!bAdminPort)
|
<|file_name|>provisioner.go<|end_file_name|><|fim▁begin|>// This package implements a provisioner for Packer that executes
// shell scripts within the remote machine.
package shell
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"sort"
"strings"
"time"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/config"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/template/interpolate"
)
const DefaultRemotePath = "c:/Windows/Temp/script.bat"
var retryableSleep = 2 * time.Second
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// If true, the script contains binary and line endings will not be
// converted from Windows to Unix-style.
Binary bool
// An inline script to execute. Multiple strings are all executed
// in the context of a single shell.
Inline []string
// The local path of the shell script to upload and execute.
Script string
// An array of multiple scripts to run.
Scripts []string
// An array of environment variables that will be injected before
// your command(s) are executed.
Vars []string `mapstructure:"environment_vars"`
// The remote path where the local shell script will be uploaded to.
// This should be set to a writable file that is in a pre-existing directory.
RemotePath string `mapstructure:"remote_path"`
// The command used to execute the script. The '{{ .Path }}' variable
// should be used to specify where the script goes, {{ .Vars }}
// can be used to inject the environment_vars into the environment.
ExecuteCommand string `mapstructure:"execute_command"`
// The timeout for retrying to start the process. Until this timeout
// is reached, if the provisioner can't start a process, it retries.
// This can be set high to allow for reboots.
StartRetryTimeout time.Duration `mapstructure:"start_retry_timeout"`
// This is used in the template generation to format environment variables
// inside the `ExecuteCommand` template.
EnvVarFormat string
ctx interpolate.Context
}
type Provisioner struct {
config Config
}
type ExecuteCommandTemplate struct {
Vars string
Path string
}
func (p *Provisioner) Prepare(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"execute_command",
},
},
}, raws...)
if err != nil {
return err
}
if p.config.EnvVarFormat == "" {
p.config.EnvVarFormat = `set "%s=%s" && `
}
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = `{{.Vars}}"{{.Path}}"`
}
if p.config.Inline != nil && len(p.config.Inline) == 0 {
p.config.Inline = nil
}
if p.config.StartRetryTimeout == 0 {
p.config.StartRetryTimeout = 5 * time.Minute
}
if p.config.RemotePath == "" {
p.config.RemotePath = DefaultRemotePath
}
if p.config.Scripts == nil {
p.config.Scripts = make([]string, 0)
}
if p.config.Vars == nil {
p.config.Vars = make([]string, 0)
}
var errs error
if p.config.Script != "" && len(p.config.Scripts) > 0 {
errs = packer.MultiErrorAppend(errs,
errors.New("Only one of script or scripts can be specified."))
}
if p.config.Script != "" {
p.config.Scripts = []string{p.config.Script}
}
if len(p.config.Scripts) == 0 && p.config.Inline == nil {
errs = packer.MultiErrorAppend(errs,
errors.New("Either a script file or inline script must be specified."))
} else if len(p.config.Scripts) > 0 && p.config.Inline != nil {
errs = packer.MultiErrorAppend(errs,
errors.New("Only a script file or an inline script can be specified, not both."))
}
for _, path := range p.config.Scripts {
if _, err := os.Stat(path); err != nil {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("Bad script '%s': %s", path, err))
}
}
// Do a check for bad environment variables, such as '=foo', 'foobar'
for _, kv := range p.config.Vars {
vs := strings.SplitN(kv, "=", 2)
if len(vs) != 2 || vs[0] == "" {<|fim▁hole|> }
}
if errs != nil {
return errs
}
return nil
}
// This function takes the inline scripts, concatenates them
// into a temporary file and returns a string containing the location
// of said file.
func extractScript(p *Provisioner) (string, error) {
temp, err := ioutil.TempFile(os.TempDir(), "packer-windows-shell-provisioner")
if err != nil {
log.Printf("Unable to create temporary file for inline scripts: %s", err)
return "", err
}
writer := bufio.NewWriter(temp)
for _, command := range p.config.Inline {
log.Printf("Found command: %s", command)
if _, err := writer.WriteString(command + "\n"); err != nil {
return "", fmt.Errorf("Error preparing shell script: %s", err)
}
}
if err := writer.Flush(); err != nil {
return "", fmt.Errorf("Error preparing shell script: %s", err)
}
temp.Close()
return temp.Name(), nil
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
ui.Say(fmt.Sprintf("Provisioning with windows-shell..."))
scripts := make([]string, len(p.config.Scripts))
copy(scripts, p.config.Scripts)
if p.config.Inline != nil {
temp, err := extractScript(p)
if err != nil {
ui.Error(fmt.Sprintf("Unable to extract inline scripts into a file: %s", err))
}
scripts = append(scripts, temp)
}
for _, path := range scripts {
ui.Say(fmt.Sprintf("Provisioning with shell script: %s", path))
log.Printf("Opening %s for reading", path)
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("Error opening shell script: %s", err)
}
defer f.Close()
// Create environment variables to set before executing the command
flattendVars, err := p.createFlattenedEnvVars()
if err != nil {
return err
}
// Compile the command
p.config.ctx.Data = &ExecuteCommandTemplate{
Vars: flattendVars,
Path: p.config.RemotePath,
}
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
if err != nil {
return fmt.Errorf("Error processing command: %s", err)
}
// Upload the file and run the command. Do this in the context of
// a single retryable function so that we don't end up with
// the case that the upload succeeded, a restart is initiated,
// and then the command is executed but the file doesn't exist
// any longer.
var cmd *packer.RemoteCmd
err = p.retryable(func() error {
if _, err := f.Seek(0, 0); err != nil {
return err
}
if err := comm.Upload(p.config.RemotePath, f, nil); err != nil {
return fmt.Errorf("Error uploading script: %s", err)
}
cmd = &packer.RemoteCmd{Command: command}
return cmd.StartWithUi(comm, ui)
})
if err != nil {
return err
}
// Close the original file since we copied it
f.Close()
if cmd.ExitStatus != 0 {
return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
}
}
return nil
}
func (p *Provisioner) Cancel() {
// Just hard quit. It isn't a big deal if what we're doing keeps
// running on the other side.
os.Exit(0)
}
// retryable will retry the given function over and over until a
// non-error is returned.
func (p *Provisioner) retryable(f func() error) error {
startTimeout := time.After(p.config.StartRetryTimeout)
for {
var err error
if err = f(); err == nil {
return nil
}
// Create an error and log it
err = fmt.Errorf("Retryable error: %s", err)
log.Printf(err.Error())
// Check if we timed out, otherwise we retry. It is safe to
// retry since the only error case above is if the command
// failed to START.
select {
case <-startTimeout:
return err
default:
time.Sleep(retryableSleep)
}
}
}
func (p *Provisioner) createFlattenedEnvVars() (flattened string, err error) {
flattened = ""
envVars := make(map[string]string)
// Always available Packer provided env vars
envVars["PACKER_BUILD_NAME"] = p.config.PackerBuildName
envVars["PACKER_BUILDER_TYPE"] = p.config.PackerBuilderType
httpAddr := common.GetHTTPAddr()
if httpAddr != "" {
envVars["PACKER_HTTP_ADDR"] = httpAddr
}
// Split vars into key/value components
for _, envVar := range p.config.Vars {
keyValue := strings.SplitN(envVar, "=", 2)
if len(keyValue) != 2 || keyValue[0] == "" {
err = errors.New(fmt.Sprintf("Shell provisioner environment variables must be in key=value format. Currently it is '%s'", envVar))
return
}
envVars[keyValue[0]] = keyValue[1]
}
// Create a list of env var keys in sorted order
var keys []string
for k := range envVars {
keys = append(keys, k)
}
sort.Strings(keys)
// Re-assemble vars using OS specific format pattern and flatten
for _, key := range keys {
flattened += fmt.Sprintf(p.config.EnvVarFormat, key, envVars[key])
}
return
}<|fim▁end|>
|
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("Environment variable not in format 'key=value': %s", kv))
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/**
* Created by huangxinghui on 2016/1/20.
*/
var $ = require('jquery')
var Widget = require('../../widget')
var plugin = require('../../plugin')
var TreeNode = require('./treenode')
var Tree = Widget.extend({
options: {
'labelField': null,
'labelFunction': null,
'childrenField': 'children',
'autoOpen': true
},
events: {
'click li': '_onSelect',
'click i': '_onExpand'
},
_create: function() {
this.$element.addClass('tree')
var that = this
var $ul = $('<ul></ul>')
this._loadFromDataSource()
this.nodes.forEach(function(node) {
that._createNode(node)
$ul.append(node.element)
})
this.$element.append($ul)
},
_onSelect: function(e) {
var $li = $(e.currentTarget),
node = $li.data('node')
e.preventDefault()
if (!$li.hasClass('active')) {
this._setSelectedNode(node)
this._trigger('itemClick', node.data)
}
},
_onExpand: function(e) {
var $li = $(e.currentTarget).closest('li'),
node = $li.data('node')
e.preventDefault()
if (node.isOpen) {
this.collapseNode(node)
}
else {
this.expandNode(node)
}
},
_setSelectedNode: function(node) {
var $active = this.$element.find('.active')
$active.removeClass('active')
var $li = node.element
$li.addClass('active')
this._trigger('change', node.data)
},
_createNode: function(node) {
if (node.isBranch()) {
this._createFolder(node)
}
else {
this._createLeaf(node)
}
},
_createLeaf: function(node) {
var html = ['<li><a href="#"><span>']
html.push(this._createIndentationHtml(node.getLevel()))
html.push(this.itemToLabel(node.data))
html.push('</span></a></li>')
var $li = $(html.join(''))
$li.data('node', node)
node.element = $li
return $li
},
_createFolder: function(node) {
var that = this
var html = []
if (node.isOpen) {
html.push('<li class="open"><a href="#"><span>')
html.push(this._createIndentationHtml(node.getLevel() - 1))
html.push('<i class="glyphicon glyphicon-minus-sign js-folder"></i>')
}
else {
html.push('<li><a href="#"><span>')
html.push(this._createIndentationHtml(node.getLevel() - 1))
html.push('<i class="glyphicon glyphicon-plus-sign js-folder"></i>')
}
html.push(this.itemToLabel(node.data))
html.push('</span></a></li>')
var $li = $(html.join(''))
var $ul = $('<ul class="children-list"></ul>')
node.children.forEach(function(childNode) {
that._createNode(childNode)
$ul.append(childNode.element)
})
$li.append($ul)
$li.data('node', node)
node.element = $li
return $li
},
_createLabel: function(node) {
var html = ['<span>']
var level = node.getLevel()
if (node.isBranch()) {
html.push(this._createIndentationHtml(level - 1))
html.push('<i class="glyphicon ',
node.isOpen ? 'glyphicon-minus-sign' : 'glyphicon-plus-sign',
' js-folder"></i>')
}
else {
html.push(this._createIndentationHtml(level))
}
html.push(this.itemToLabel(node.data))
html.push('</span>')
return html.join('')
},
_createIndentationHtml: function(count) {
var html = []
for (var i = 0; i < count; i++) {
html.push('<i class="glyphicon tree-indentation"></i>')
}
return html.join('')
},
_loadFromDataSource: function() {
var node, children, nodes = [], that = this
if (this.options.dataSource) {
this.options.dataSource.forEach(function(item) {
node = new TreeNode(item)
children = item[that.options.childrenField]
if (children) {
node.isOpen = that.options.autoOpen
that._loadFromArray(children, node)
}
nodes.push(node)
})
}
this.nodes = nodes
},
_loadFromArray: function(array, parentNode) {
var node, children, that = this
array.forEach(function(item) {
node = new TreeNode(item)
parentNode.addChild(node)
children = item[that.childrenField]
if (children) {
node.isOpen = that.autoOpen
that._loadFromArray(children, node)
}
})
},
expandNode: function(node) {
if (!node.isBranch()) {
return
}
var $li = node.element
var $disclosureIcon = $li.children('a').find('.js-folder')
if (!node.isOpen) {
node.isOpen = true
$li.addClass('open')
$disclosureIcon.removeClass('glyphicon-plus-sign').addClass('glyphicon-minus-sign')
this._trigger('itemOpen')
}
},
collapseNode: function(node) {
if (!node.isBranch()) {
return
}
var $li = node.element
var $disclosureIcon = $li.children('a').find('.js-folder')
if (node.isOpen) {
node.isOpen = false
$li.removeClass('open')
$disclosureIcon.removeClass('glyphicon-minus-sign').addClass('glyphicon-plus-sign')
this._trigger('itemClose')
}
},
expandAll: function() {
var that = this
this.nodes.forEach(function(node) {
that.expandNode(node)
})
},
collapseAll: function() {
var that = this
this.nodes.forEach(function(node) {
that.collapseNode(node)
})
},
append: function(item, parentNode) {
var $ul, $li, $prev, node = new TreeNode(item)
if (parentNode.isBranch()) {
parentNode.addChild(node)
$ul = parentNode.element.children('ul')
this._createNode(node)
$li = node.element
$ul.append($li)
}
else {
parentNode.addChild(node)
$li = parentNode.element
$prev = $li.prev()
$ul = $li.parent()
parentNode.element = null
$li.remove()
$li = this._createFolder(parentNode)
if ($prev.length) {
$prev.after($li)
}
else {
$ul.append($li)
}
}
this.expandNode(parentNode)
this._setSelectedNode(node)
},
remove: function(node) {
var parentNode = node.parent
node.element.remove()
node.destroy()
this._setSelectedNode(parentNode)
},
update: function(node) {
var $li = node.element
$li.children('a').html(this._createLabel(node))
},
getSelectedNode: function() {
var $li = this.$element.find('.active')
return $li.data('node')
},
<|fim▁hole|> },
itemToLabel: function(data) {
if (!data) {
return ''
}
if (this.options.labelFunction != null) {
return this.options.labelFunction(data)
}
else if (this.options.labelField != null) {
return data[this.options.labelField]
}
else {
return data
}
}
})
plugin('tree', Tree)<|fim▁end|>
|
getSelectedItem: function() {
var node = this.getSelectedNode()
return node.data
|
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>SAMPLE_POLICIES = [
{
'resources': {
'indices': ['kibana-int', ],
},
'users': ['*'],
'permissions': ['index_write', 'index_read']
},
{
'resources': {
'cluster': True
},<|fim▁hole|> {
'resources': {
'indices': ['joes_index', ],
},
'users': ['joe'],
'permissions': ['index_write', 'index_read']
},
{
'resources': {
'indices': ['*', ],
},
'users': ['auditor', ],
'permissions': ['index_read']
},
]<|fim▁end|>
|
'users': ['alan'],
'permissions': ['kibana_admin', ]
},
|
<|file_name|>test_parse.py<|end_file_name|><|fim▁begin|>from unittest import TestCase, main
from io import StringIO
from brocclib.parse import (<|fim▁hole|> read_blast, iter_fasta, parse_accession,
)
class AccessionTests(TestCase):
def test_parse_accession_old_format(self):
self.assertEqual(
parse_accession('gi|259100874|gb|GQ513762.1|'),
"GQ513762.1")
self.assertEqual(
parse_accession("gi|1857499|gb|U83468.1|TSU83468"),
"U83468.1")
self.assertEqual(
parse_accession("gi|163263088|emb|AM922223.1|"),
"AM922223.1")
def test_parse_accession_new_format(self):
self.assertEqual(parse_accession('GQ513762.1'), "GQ513762.1")
class FastaTests(TestCase):
def test_basic(self):
lines = [
">lab1",
"TTTTCCC",
">lab2",
"CCAAAA",
]
seqs = iter_fasta(lines)
self.assertEqual(next(seqs), ("lab1", "TTTTCCC"))
self.assertEqual(next(seqs), ("lab2", "CCAAAA"))
self.assertRaises(StopIteration, next, seqs)
class BlastOutputTests(TestCase):
def test_normal_output(self):
obs = read_blast(StringIO(normal_output))
h = obs['0 E7_168192'][0]
self.assertEqual(h.accession, "GQ513762.1")
self.assertEqual(h.pct_id, 98.74)
self.assertEqual(h.length, 159)
def test_malformed_output(self):
obs = read_blast(StringIO(malformed_output))
h = obs['0 E7_168192'][0]
self.assertEqual(h.accession, "GQ513762.1")
self.assertEqual(h.pct_id, 98.74)
self.assertEqual(h.length, 159)
def test_missing_read(self):
obs = read_blast(StringIO(normal_output))
self.assertEqual(obs['sdlkj'], [])
normal_output = """\
# BLASTN 2.2.25+
# Query: 0 E7_168192
# Database: /home/rohinis/blastdb/blast_nt/nt
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
# 100 hits found
0 gi|259100874|gb|GQ513762.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098555|gb|GQ520853.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098210|gb|GQ520508.1| 98.11 159 2 1 407 564 1 159 1e-68 269
0 gi|259092808|gb|GQ524514.1| 98.11 159 1 2 407 564 1 158 1e-67 266
0 gi|259107208|gb|GQ510686.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259103360|gb|GQ516248.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259101730|gb|GQ514618.1| 98.68 152 1 1 414 564 1 152 2e-66 262
# BLAST processed 608 queries
"""
malformed_output = """\
# BLASTN 2.2.25+
# Query: 0 E7_168192
# Database: /home/rohinis/blastdb/blast_nt/nt
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
# 100 hits found
0 gi|259100874|gb|GQ513762.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098555|gb|GQ520853.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098210|gb|GQ520508.1| 98.11 159 2 1 407 564 1 159 1e-68 269
0 gi|259092808|gb|GQ524514.1| 98.11 159 1 2 407 564 1 158 1e-67 266
0 gi|259107208|gb|GQ510686.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259103360|gb|GQ516248.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259101730|gb|GQ514618.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259093119|gb|GQ524825.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259100068|gb|GQ522366.1| 98.67 150 1 1 416 564 1 150 2e-65 259
0 gi|259099396|gb|GQ521694.1| 98.67 150 1 1 416 564 1 150 2e-65 259
"""
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>xml_module.py<|end_file_name|><|fim▁begin|>import copy
import json
import logging
import os
import sys
from lxml import etree
from lxml.etree import Element, ElementTree, XMLParser
from xblock.core import XML_NAMESPACES
from xblock.fields import Dict, Scope, ScopeIds
from xblock.runtime import KvsFieldData
import dogstats_wrapper as dog_stats_api
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT, XModuleDescriptor
log = logging.getLogger(__name__)
# assume all XML files are persisted as utf-8.
EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True,
encoding='utf-8')
def name_to_pathname(name):
"""
Convert a location name for use in a path: replace ':' with '/'.
This allows users of the xml format to organize content into directories
"""
return name.replace(':', '/')
def is_pointer_tag(xml_obj):
"""
Check if xml_obj is a pointer tag: <blah url_name="something" />.
No children, one attribute named url_name, no text.
Special case for course roots: the pointer is
<course url_name="something" org="myorg" course="course">
xml_obj: an etree Element
Returns a bool.
"""
if xml_obj.tag != "course":
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = xml_obj.text is not None and len(xml_obj.text.strip()) > 0
return len(xml_obj) == 0 and actual_attr == expected_attr and not has_text
def serialize_field(value):
"""
Return a string version of the value (where value is the JSON-formatted, internally stored value).
If the value is a string, then we simply return what was passed in.
Otherwise, we return json.dumps on the input value.
"""
if isinstance(value, basestring):
return value
return json.dumps(value, cls=EdxJSONEncoder)
def deserialize_field(field, value):
"""
Deserialize the string version to the value stored internally.
Note that this is not the same as the value returned by from_json, as model types typically store
their value internally as JSON. By default, this method will return the result of calling json.loads
on the supplied value, unless json.loads throws a TypeError, or the type of the value returned by json.loads
is not supported for this class (from_json throws an Error). In either of those cases, this method returns
the input value.
"""
try:
deserialized = json.loads(value)
if deserialized is None:
return deserialized
try:
field.from_json(deserialized)
return deserialized
except (ValueError, TypeError):
# Support older serialized version, which was just a string, not result of json.dumps.
# If the deserialized version cannot be converted to the type (via from_json),
# just return the original value. For example, if a string value of '3.4' was
# stored for a String field (before we started storing the result of json.dumps),
# then it would be deserialized as 3.4, but 3.4 is not supported for a String
# field. Therefore field.from_json(3.4) will throw an Error, and we should
# actually return the original value of '3.4'.
return value
except (ValueError, TypeError):
# Support older serialized version.
return value
class XmlParserMixin(object):
"""
Class containing XML parsing functionality shared between XBlock and XModuleDescriptor.
"""
# Extension to append to filename paths
filename_extension = 'xml'
xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export",
default={}, scope=Scope.settings)
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
@classmethod<|fim▁hole|> """
VS[compat]
"""
return cls.metadata_translations.get(key, key)
# The attributes will be removed from the definition xml passed
# to definition_from_xml, and from the xml returned by definition_to_xml
# Note -- url_name isn't in this list because it's handled specially on
# import and export.
metadata_to_strip = ('data_dir',
'tabs', 'grading_policy',
'discussion_blackouts',
# VS[compat] -- remove the below attrs once everything is in the CMS
'course', 'org', 'url_name', 'filename',
# Used for storing xml attributes between import and export, for roundtrips
'xml_attributes')
metadata_to_export_to_policy = ('discussion_topics',)
@staticmethod
def _get_metadata_from_xml(xml_object, remove=True):
"""
Extract the metadata from the XML.
"""
meta = xml_object.find('meta')
if meta is None:
return ''
dmdata = meta.text
if remove:
xml_object.remove(meta)
return dmdata
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Return the definition to be passed to the newly created descriptor
during from_xml
xml_object: An etree Element
"""
raise NotImplementedError("%s does not implement definition_from_xml" % cls.__name__)
@classmethod
def clean_metadata_from_xml(cls, xml_object):
"""
Remove any attribute named for a field with scope Scope.settings from the supplied
xml_object
"""
for field_name, field in cls.fields.items():
if field.scope == Scope.settings and xml_object.get(field_name) is not None:
del xml_object.attrib[field_name]
@classmethod
def file_to_xml(cls, file_object):
"""
Used when this module wants to parse a file object to xml
that will be converted to the definition.
Returns an lxml Element
"""
return etree.parse(file_object, parser=EDX_XML_PARSER).getroot()
@classmethod
def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name
"""
Open the specified file in fs, and call cls.file_to_xml on it,
returning the lxml object.
Add details and reraise on error.
"""
try:
with fs.open(filepath) as xml_file:
return cls.file_to_xml(xml_file)
except Exception as err:
# Add info about where we are, but keep the traceback
msg = 'Unable to load file contents at path %s for item %s: %s ' % (
filepath, def_id, err)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def load_definition(cls, xml_object, system, def_id, id_generator):
"""
Load a descriptor definition from the specified xml_object.
Subclasses should not need to override this except in special
cases (e.g. html module)
Args:
xml_object: an lxml.etree._Element containing the definition to load
system: the modulestore system (aka, runtime) which accesses data and provides access to services
def_id: the definition id for the block--used to compute the usage id and asides ids
id_generator: used to generate the usage_id
"""
# VS[compat] -- the filename attr should go away once everything is
# converted. (note: make sure html files still work once this goes away)
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
filepath = ''
aside_children = []
else:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_filename"]
)
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
)
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
usage_id = id_generator.create_usage(def_id)
aside_children = system.parse_asides(definition_xml, def_id, usage_id, id_generator)
# Add the attributes from the pointer node
definition_xml.attrib.update(xml_object.attrib)
definition_metadata = cls._get_metadata_from_xml(definition_xml)
cls.clean_metadata_from_xml(definition_xml)
definition, children = cls.definition_from_xml(definition_xml, system)
if definition_metadata:
definition['definition_metadata'] = definition_metadata
definition['filename'] = [filepath, filename]
if aside_children:
definition['aside_children'] = aside_children
return definition, children
@classmethod
def load_metadata(cls, xml_object):
"""
Read the metadata attributes from this xml_object.
Returns a dictionary {key: value}.
"""
metadata = {'xml_attributes': {}}
for attr, val in xml_object.attrib.iteritems():
# VS[compat]. Remove after all key translations done
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
if attr in ('course', 'org', 'url_name', 'filename'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xmlparser_util_mixin_load_metadata",
"metadata:{}".format(attr),
)
)
# don't load these
continue
if attr not in cls.fields:
metadata['xml_attributes'][attr] = val
else:
metadata[attr] = deserialize_field(cls.fields[attr], val)
return metadata
@classmethod
def apply_policy(cls, metadata, policy):
"""
Add the keys in policy to metadata, after processing them
through the attrmap. Updates the metadata dict in place.
"""
for attr, value in policy.iteritems():
attr = cls._translate(attr)
if attr not in cls.fields:
# Store unknown attributes coming from policy.json
# in such a way that they will export to xml unchanged
metadata['xml_attributes'][attr] = value
else:
metadata[attr] = value
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator): # pylint: disable=unused-argument
"""
Use `node` to construct a new block.
Arguments:
node (etree.Element): The xml node to parse into an xblock.
runtime (:class:`.Runtime`): The runtime to use while parsing.
keys (:class:`.ScopeIds`): The keys identifying where this block
will store its data.
id_generator (:class:`.IdGenerator`): An object that will allow the
runtime to generate correct definition and usage ids for
children of this block.
Returns (XBlock): The newly parsed XBlock
"""
# VS[compat] -- just have the url_name lookup, once translation is done
url_name = cls._get_url_name(node)
def_id = id_generator.create_definition(node.tag, url_name)
usage_id = id_generator.create_usage(def_id)
aside_children = []
# VS[compat] -- detect new-style each-in-a-file mode
if is_pointer_tag(node):
# new style:
# read the actual definition file--named using url_name.replace(':','/')
definition_xml, filepath = cls.load_definition_xml(node, runtime, def_id)
aside_children = runtime.parse_asides(definition_xml, def_id, usage_id, id_generator)
else:
filepath = None
definition_xml = node
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_parse_xml"]
)
# Note: removes metadata.
definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)
# VS[compat] -- make Ike's github preview links work in both old and
# new file layouts
if is_pointer_tag(node):
# new style -- contents actually at filepath
definition['filename'] = [filepath, filepath]
metadata = cls.load_metadata(definition_xml)
# move definition metadata into dict
dmdata = definition.get('definition_metadata', '')
if dmdata:
metadata['definition_metadata_raw'] = dmdata
try:
metadata.update(json.loads(dmdata))
except Exception as err:
log.debug('Error in loading metadata %r', dmdata, exc_info=True)
metadata['definition_metadata_err'] = str(err)
definition_aside_children = definition.pop('aside_children', None)
if definition_aside_children:
aside_children.extend(definition_aside_children)
# Set/override any metadata specified by policy
cls.apply_policy(metadata, runtime.get_policy(usage_id))
field_data = {}
field_data.update(metadata)
field_data.update(definition)
field_data['children'] = children
field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
xblock = runtime.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
ScopeIds(None, node.tag, def_id, usage_id),
field_data,
)
if aside_children:
asides_tags = [x.tag for x in aside_children]
asides = runtime.get_asides(xblock)
for asd in asides:
if asd.scope_ids.block_type in asides_tags:
xblock.add_aside(asd)
return xblock
@classmethod
def _get_url_name(cls, node):
"""
Reads url_name attribute from the node
"""
return node.get('url_name', node.get('slug'))
@classmethod
def load_definition_xml(cls, node, runtime, def_id):
"""
Loads definition_xml stored in a dedicated file
"""
url_name = cls._get_url_name(node)
filepath = cls._format_filepath(node.tag, name_to_pathname(url_name))
definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id)
return definition_xml, filepath
@classmethod
def _format_filepath(cls, category, name):
return u'{category}/{name}.{ext}'.format(category=category,
name=name,
ext=cls.filename_extension)
def export_to_file(self):
"""If this returns True, write the definition of this descriptor to a separate
file.
NOTE: Do not override this without a good reason. It is here
specifically for customtag...
"""
return True
def add_xml_to_node(self, node):
"""
For exporting, set data on `node` from ourselves.
"""
# Get the definition
xml_object = self.definition_to_xml(self.runtime.export_fs)
for aside in self.runtime.get_asides(self):
if aside.needs_serialization():
aside_node = etree.Element("unknown_root", nsmap=XML_NAMESPACES)
aside.add_xml_to_node(aside_node)
xml_object.append(aside_node)
self.clean_metadata_from_xml(xml_object)
# Set the tag on both nodes so we get the file path right.
xml_object.tag = self.category
node.tag = self.category
# Add the non-inherited metadata
for attr in sorted(own_metadata(self)):
# don't want e.g. data_dir
if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:
val = serialize_field(self._field_data.get(self, attr))
try:
xml_object.set(attr, val)
except Exception:
logging.exception(
u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',
attr, val, self.url_name
)
for key, value in self.xml_attributes.items():
if key not in self.metadata_to_strip:
xml_object.set(key, serialize_field(value))
if self.export_to_file():
# Write the definition to a file
url_path = name_to_pathname(self.url_name)
filepath = self._format_filepath(self.category, url_path)
self.runtime.export_fs.makedirs(os.path.dirname(filepath), recreate=True)
with self.runtime.export_fs.open(filepath, 'wb') as fileobj:
ElementTree(xml_object).write(fileobj, pretty_print=True, encoding='utf-8')
else:
# Write all attributes from xml_object onto node
node.clear()
node.tag = xml_object.tag
node.text = xml_object.text
node.tail = xml_object.tail
node.attrib.update(xml_object.attrib)
node.extend(xml_object)
node.set('url_name', self.url_name)
# Special case for course pointers:
if self.category == 'course':
# add org and course attributes on the pointer tag
node.set('org', self.location.org)
node.set('course', self.location.course)
def definition_to_xml(self, resource_fs):
"""
Return a new etree Element object created from this modules definition.
"""
raise NotImplementedError(
"%s does not implement definition_to_xml" % self.__class__.__name__)
@property
def non_editable_metadata_fields(self):
"""
Return a list of all metadata fields that cannot be edited.
"""
non_editable_fields = super(XmlParserMixin, self).non_editable_metadata_fields
non_editable_fields.append(XmlParserMixin.xml_attributes)
return non_editable_fields
class XmlDescriptor(XmlParserMixin, XModuleDescriptor): # pylint: disable=abstract-method
"""
Mixin class for standardized parsing of XModule xml.
"""
resources_dir = None
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
# Shim from from_xml to the parse_xml defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define from_xml themselves
# b) call super(..).from_xml(..)
return super(XmlDescriptor, cls).parse_xml(
etree.fromstring(xml_data),
system,
None, # This is ignored by XmlParserMixin
id_generator,
)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
if cls.from_xml != XmlDescriptor.from_xml:
# Skip the parse_xml from XmlParserMixin to get the shim parse_xml
# from XModuleDescriptor, which actually calls `from_xml`.
return super(XmlParserMixin, cls).parse_xml(node, runtime, keys, id_generator) # pylint: disable=bad-super-call
else:
return super(XmlDescriptor, cls).parse_xml(node, runtime, keys, id_generator)
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
# Shim from export_to_xml to the add_xml_to_node defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define export_to_xml themselves
# b) call super(..).export_to_xml(..)
node = Element(self.category)
super(XmlDescriptor, self).add_xml_to_node(node)
return etree.tostring(node)
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
if self.export_to_xml != XmlDescriptor.export_to_xml:
# Skip the add_xml_to_node from XmlParserMixin to get the shim add_xml_to_node
# from XModuleDescriptor, which actually calls `export_to_xml`.
super(XmlParserMixin, self).add_xml_to_node(node) # pylint: disable=bad-super-call
else:
super(XmlDescriptor, self).add_xml_to_node(node)<|fim▁end|>
|
def _translate(cls, key):
|
<|file_name|>trendline.src.js<|end_file_name|><|fim▁begin|>/**
* @license Highstock JS v8.1.0 (2020-05-05)
*
* Indicator series type for Highstock
*
* (c) 2010-2019 Sebastian Bochan
*
* License: www.highcharts.com/license
*/
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
factory['default'] = factory;
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define('highcharts/indicators/trendline', ['highcharts', 'highcharts/modules/stock'], function (Highcharts) {
factory(Highcharts);
factory.Highcharts = Highcharts;
return factory;
});
} else {
factory(typeof Highcharts !== 'undefined' ? Highcharts : undefined);
}
}(function (Highcharts) {
var _modules = Highcharts ? Highcharts._modules : {};
function _registerModule(obj, path, args, fn) {
if (!obj.hasOwnProperty(path)) {
obj[path] = fn.apply(null, args);
}
}
_registerModule(_modules, 'indicators/trendline.src.js', [_modules['parts/Utilities.js']], function (U) {
/* *
*
* License: www.highcharts.com/license
*
* !!!!!!! SOURCE GETS TRANSPILED BY TYPESCRIPT. EDIT TS FILE ONLY. !!!!!!!
*
* */
var isArray = U.isArray,
seriesType = U.seriesType;
/**
* The Trend line series type.
*
* @private
* @class
* @name Highcharts.seriesTypes.trendline
*
* @augments Highcharts.Series
*/
seriesType('trendline', 'sma',
/**
* Trendline (linear regression) fits a straight line to the selected data
* using a method called the Sum Of Least Squares. This series requires the
* `linkedTo` option to be set.
*
* @sample stock/indicators/trendline
* Trendline indicator
*
* @extends plotOptions.sma
* @since 7.1.3
* @product highstock
* @requires stock/indicators/indicators
* @requires stock/indicators/trendline
* @optionparent plotOptions.trendline
*/
{
/**
* @excluding period
*/
params: {
/**
* The point index which indicator calculations will base. For
* example using OHLC data, index=2 means the indicator will be
* calculated using Low values.
*
* @default 3
*/
index: 3
}
},
/**
* @lends Highcharts.Series#
*/
{
nameBase: 'Trendline',
nameComponents: false,<|fim▁hole|> getValues: function (series, params) {
var xVal = series.xData,
yVal = series.yData,
LR = [],
xData = [],
yData = [],
sumX = 0,
sumY = 0,
sumXY = 0,
sumX2 = 0,
xValLength = xVal.length,
index = params.index,
alpha,
beta,
i,
x,
y;
// Get sums:
for (i = 0; i < xValLength; i++) {
x = xVal[i];
y = isArray(yVal[i]) ? yVal[i][index] : yVal[i];
sumX += x;
sumY += y;
sumXY += x * y;
sumX2 += x * x;
}
// Get slope and offset:
alpha = (xValLength * sumXY - sumX * sumY) /
(xValLength * sumX2 - sumX * sumX);
if (isNaN(alpha)) {
alpha = 0;
}
beta = (sumY - alpha * sumX) / xValLength;
// Calculate linear regression:
for (i = 0; i < xValLength; i++) {
x = xVal[i];
y = alpha * x + beta;
// Prepare arrays required for getValues() method
LR[i] = [x, y];
xData[i] = x;
yData[i] = y;
}
return {
xData: xData,
yData: yData,
values: LR
};
}
});
/**
* A `TrendLine` series. If the [type](#series.trendline.type) option is not
* specified, it is inherited from [chart.type](#chart.type).
*
* @extends series,plotOptions.trendline
* @since 7.1.3
* @product highstock
* @excluding dataParser, dataURL
* @requires stock/indicators/indicators
* @requires stock/indicators/trendline
* @apioption series.trendline
*/
''; // to include the above in the js output
});
_registerModule(_modules, 'masters/indicators/trendline.src.js', [], function () {
});
}));<|fim▁end|>
| |
<|file_name|>timer.rs<|end_file_name|><|fim▁begin|>use crate::sys;
use libc::c_void;
use std::marker::PhantomData;
use std::mem;
use crate::TimerSubsystem;
impl TimerSubsystem {
/// Constructs a new timer using the boxed closure `callback`.
///
/// The timer is started immediately, it will be cancelled either:
///
/// * when the timer is dropped
/// * or when the callback returns a non-positive continuation interval
#[must_use = "if unused the Timer will be dropped immediately"]<|fim▁hole|> let callback = Box::new(callback);
let timer_id = sys::SDL_AddTimer(
delay,
Some(c_timer_callback),
mem::transmute_copy(&callback),
);
Timer {
callback: Some(callback),
raw: timer_id,
_marker: PhantomData,
}
}
}
/// Gets the number of milliseconds elapsed since the timer subsystem was initialized.
///
/// It's recommended that you use another library for timekeeping, such as `time`.
#[doc(alias = "SDL_GetTicks")]
pub fn ticks(&self) -> u32 {
// Google says this is probably not thread-safe (TODO: prove/disprove this).
unsafe { sys::SDL_GetTicks() }
}
/// Sleeps the current thread for the specified amount of milliseconds.
///
/// It's recommended that you use `std::thread::sleep()` instead.
#[doc(alias = "SDL_Delay")]
pub fn delay(&mut self, ms: u32) {
// Google says this is probably not thread-safe (TODO: prove/disprove this).
unsafe { sys::SDL_Delay(ms) }
}
#[doc(alias = "SDL_GetPerformanceCounter")]
pub fn performance_counter(&self) -> u64 {
unsafe { sys::SDL_GetPerformanceCounter() }
}
#[doc(alias = "SDL_GetPerformanceFrequency")]
pub fn performance_frequency(&self) -> u64 {
unsafe { sys::SDL_GetPerformanceFrequency() }
}
}
pub type TimerCallback<'a> = Box<dyn FnMut() -> u32 + 'a + Sync>;
pub struct Timer<'b, 'a> {
callback: Option<Box<TimerCallback<'a>>>,
raw: sys::SDL_TimerID,
_marker: PhantomData<&'b ()>,
}
impl<'b, 'a> Timer<'b, 'a> {
/// Returns the closure as a trait-object and cancels the timer
/// by consuming it...
pub fn into_inner(mut self) -> TimerCallback<'a> {
*self.callback.take().unwrap()
}
}
impl<'b, 'a> Drop for Timer<'b, 'a> {
#[inline]
#[doc(alias = "SDL_RemoveTimer")]
fn drop(&mut self) {
// SDL_RemoveTimer returns SDL_FALSE if the timer wasn't found (impossible),
// or the timer has been cancelled via the callback (possible).
// The timer being cancelled isn't an issue, so we ignore the result.
unsafe { sys::SDL_RemoveTimer(self.raw) };
}
}
extern "C" fn c_timer_callback(_interval: u32, param: *mut c_void) -> u32 {
let f = param as *mut std::boxed::Box<dyn std::ops::Fn() -> u32>;
unsafe { (*f)() }
}
#[cfg(not(target_os = "macos"))]
#[cfg(test)]
mod test {
use std::sync::{Arc, Mutex};
use std::time::Duration;
#[test]
fn test_timer() {
test_timer_runs_multiple_times();
test_timer_runs_at_least_once();
test_timer_can_be_recreated();
}
fn test_timer_runs_multiple_times() {
let sdl_context = crate::sdl::init().unwrap();
let timer_subsystem = sdl_context.timer().unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
let _timer = timer_subsystem.add_timer(
20,
Box::new(|| {
// increment up to 10 times (0 -> 9)
// tick again in 100ms after each increment
//
let mut num = timer_num.lock().unwrap();
if *num < 9 {
*num += 1;
20
} else {
0
}
}),
);
// tick the timer at least 10 times w/ 200ms of "buffer"
::std::thread::sleep(Duration::from_millis(250));
let num = local_num.lock().unwrap(); // read the number back
assert_eq!(*num, 9); // it should have incremented at least 10 times...
}
fn test_timer_runs_at_least_once() {
let sdl_context = crate::sdl::init().unwrap();
let timer_subsystem = sdl_context.timer().unwrap();
let local_flag = Arc::new(Mutex::new(false));
let timer_flag = local_flag.clone();
let _timer = timer_subsystem.add_timer(
20,
Box::new(|| {
let mut flag = timer_flag.lock().unwrap();
*flag = true;
0
}),
);
::std::thread::sleep(Duration::from_millis(50));
let flag = local_flag.lock().unwrap();
assert_eq!(*flag, true);
}
fn test_timer_can_be_recreated() {
let sdl_context = crate::sdl::init().unwrap();
let timer_subsystem = sdl_context.timer().unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
// run the timer once and reclaim its closure
let timer_1 = timer_subsystem.add_timer(
20,
Box::new(move || {
let mut num = timer_num.lock().unwrap();
*num += 1; // increment the number
0 // do not run timer again
}),
);
// reclaim closure after timer runs
::std::thread::sleep(Duration::from_millis(50));
let closure = timer_1.into_inner();
// create a second timer and increment again
let _timer_2 = timer_subsystem.add_timer(20, closure);
::std::thread::sleep(Duration::from_millis(50));
// check that timer was incremented twice
let num = local_num.lock().unwrap();
assert_eq!(*num, 2);
}
}<|fim▁end|>
|
#[doc(alias = "SDL_AddTimer")]
pub fn add_timer<'b, 'c>(&'b self, delay: u32, callback: TimerCallback<'c>) -> Timer<'b, 'c> {
unsafe {
|
<|file_name|>tags-on-commits-list.tsx<|end_file_name|><|fim▁begin|>import React from 'dom-chef';
import cache from 'webext-storage-cache';
import select from 'select-dom';
import {TagIcon} from '@primer/octicons-react';
import arrayUnion from 'array-union';
import * as pageDetect from 'github-url-detection';
import features from '.';
import * as api from '../github-helpers/api';
import {getCommitHash} from './mark-merge-commits-in-list';
import {buildRepoURL, getRepo} from '../github-helpers';
type CommitTags = Record<string, string[]>;
interface BaseTarget {<|fim▁hole|>type TagTarget = {
tagger: {
date: Date;
};
} & BaseTarget;
type CommitTarget = {
committedDate: Date;
} & BaseTarget;
type CommonTarget = TagTarget | CommitTarget;
interface TagNode {
name: string;
target: CommonTarget;
}
function mergeTags(oldTags: CommitTags, newTags: CommitTags): CommitTags {
const result: CommitTags = {...oldTags};
for (const commit in newTags) {
if (result[commit]) {
result[commit] = arrayUnion(result[commit], newTags[commit]);
} else {
result[commit] = newTags[commit];
}
}
return result;
}
function isTagTarget(target: CommonTarget): target is TagTarget {
return 'tagger' in target;
}
async function getTags(lastCommit: string, after?: string): Promise<CommitTags> {
const {repository} = await api.v4(`
repository() {
refs(
first: 100,
refPrefix: "refs/tags/",
orderBy: {
field: TAG_COMMIT_DATE,
direction: DESC
}
${after ? `, after: "${after}"` : ''}
) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
target {
commitResourcePath
... on Tag {
tagger {
date
}
}
... on Commit {
committedDate
}
}
}
}
object(expression: "${lastCommit}") {
... on Commit {
committedDate
}
}
}
`);
const nodes = repository.refs.nodes as TagNode[];
// If there are no tags in the repository
if (nodes.length === 0) {
return {};
}
let tags: CommitTags = {};
for (const node of nodes) {
const commit = node.target.commitResourcePath.split('/')[4];
if (!tags[commit]) {
tags[commit] = [];
}
tags[commit].push(node.name);
}
const lastTag = nodes[nodes.length - 1].target;
const lastTagIsYounger = new Date(repository.object.committedDate) < new Date(isTagTarget(lastTag) ? lastTag.tagger.date : lastTag.committedDate);
// If the last tag is younger than last commit on the page, then not all commits are accounted for, keep looking
if (lastTagIsYounger && repository.refs.pageInfo.hasNextPage) {
tags = mergeTags(tags, await getTags(lastCommit, repository.refs.pageInfo.endCursor));
}
// There are no tags for this commit
return tags;
}
async function init(): Promise<void | false> {
const cacheKey = `tags:${getRepo()!.nameWithOwner}`;
const commitsOnPage = select.all('li.js-commits-list-item');
const lastCommitOnPage = getCommitHash(commitsOnPage[commitsOnPage.length - 1]);
let cached = await cache.get<Record<string, string[]>>(cacheKey) ?? {};
const commitsWithNoTags = [];
for (const commit of commitsOnPage) {
const targetCommit = getCommitHash(commit);
let targetTags = cached[targetCommit];
if (!targetTags) {
// No tags for this commit found in the cache, check in github
cached = mergeTags(cached, await getTags(lastCommitOnPage)); // eslint-disable-line no-await-in-loop
targetTags = cached[targetCommit];
}
if (!targetTags) {
// There was no tags for this commit, save that info to the cache
commitsWithNoTags.push(targetCommit);
} else if (targetTags.length > 0) {
select('.flex-auto .d-flex.mt-1', commit)!.append(
<div className="ml-2">
<TagIcon/>
<span className="ml-1">{targetTags.map((tags, i) => (
<>
<a href={buildRepoURL('releases/tag', tags)}>{tags}</a>
{(i + 1) === targetTags.length ? '' : ', '}
</>
))}
</span>
</div>,
);
commit.classList.add('rgh-tagged');
}
}
if (commitsWithNoTags.length > 0) {
for (const commit of commitsWithNoTags) {
cached[commit] = [];
}
}
await cache.set(cacheKey, cached, {days: 1});
}
void features.add(__filebasename, {
include: [
pageDetect.isRepoCommitList,
],
init,
});<|fim▁end|>
|
commitResourcePath: string;
}
|
<|file_name|>UserMenu.tsx<|end_file_name|><|fim▁begin|>/*
Copyright 2020, 2021 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { createRef, useContext, useRef, useState } from "react";
import { Room } from "matrix-js-sdk/src/models/room";
import * as fbEmitter from "fbemitter";
import classNames from "classnames";
import { MatrixClientPeg } from "../../MatrixClientPeg";
import defaultDispatcher from "../../dispatcher/dispatcher";
import dis from "../../dispatcher/dispatcher";
import { ActionPayload } from "../../dispatcher/payloads";
import { Action } from "../../dispatcher/actions";
import { _t } from "../../languageHandler";
import { ChevronFace, ContextMenuButton } from "./ContextMenu";
import { UserTab } from "../views/dialogs/UserSettingsDialog";
import { OpenToTabPayload } from "../../dispatcher/payloads/OpenToTabPayload";
import FeedbackDialog from "../views/dialogs/FeedbackDialog";
import Modal from "../../Modal";
import LogoutDialog from "../views/dialogs/LogoutDialog";
import SettingsStore from "../../settings/SettingsStore";
import { findHighContrastTheme, getCustomTheme, isHighContrastTheme } from "../../theme";
import {
RovingAccessibleButton,
RovingAccessibleTooltipButton,
useRovingTabIndex,
} from "../../accessibility/RovingTabIndex";
import AccessibleButton, { ButtonEvent } from "../views/elements/AccessibleButton";
import SdkConfig from "../../SdkConfig";
import { getHomePageUrl } from "../../utils/pages";
import { OwnProfileStore } from "../../stores/OwnProfileStore";
import { UPDATE_EVENT } from "../../stores/AsyncStore";
import BaseAvatar from '../views/avatars/BaseAvatar';
import { SettingLevel } from "../../settings/SettingLevel";
import IconizedContextMenu, {
IconizedContextMenuCheckbox,
IconizedContextMenuOption,
IconizedContextMenuOptionList,
} from "../views/context_menus/IconizedContextMenu";
import GroupFilterOrderStore from "../../stores/GroupFilterOrderStore";
import { UIFeature } from "../../settings/UIFeature";
import HostSignupAction from "./HostSignupAction";
import { IHostSignupConfig } from "../views/dialogs/HostSignupDialogTypes";
import SpaceStore from "../../stores/spaces/SpaceStore";
import { UPDATE_SELECTED_SPACE } from "../../stores/spaces";
import { replaceableComponent } from "../../utils/replaceableComponent";
import MatrixClientContext from "../../contexts/MatrixClientContext";
import { SettingUpdatedPayload } from "../../dispatcher/payloads/SettingUpdatedPayload";
import UserIdentifierCustomisations from "../../customisations/UserIdentifier";
import PosthogTrackers from "../../PosthogTrackers";
import { ViewHomePagePayload } from "../../dispatcher/payloads/ViewHomePagePayload";
const CustomStatusSection = () => {
const cli = useContext(MatrixClientContext);
const setStatus = cli.getUser(cli.getUserId()).unstable_statusMessage || "";
const [value, setValue] = useState(setStatus);
const ref = useRef<HTMLInputElement>(null);
const [onFocus, isActive] = useRovingTabIndex(ref);
const classes = classNames({
'mx_UserMenu_CustomStatusSection_field': true,
'mx_UserMenu_CustomStatusSection_field_hasQuery': value,
});
let details: JSX.Element;
if (value !== setStatus) {
details = <>
<p>{ _t("Your status will be shown to people you have a DM with.") }</p>
<RovingAccessibleButton
onClick={() => cli._unstable_setStatusMessage(value)}
kind="primary_outline"
>
{ value ? _t("Set status") : _t("Clear status") }
</RovingAccessibleButton>
</>;
}
return <form className="mx_UserMenu_CustomStatusSection">
<div className={classes}>
<input
type="text"
value={value}
className="mx_UserMenu_CustomStatusSection_input"
onChange={e => setValue(e.target.value)}
placeholder={_t("Set a new status")}
autoComplete="off"
onFocus={onFocus}
ref={ref}
tabIndex={isActive ? 0 : -1}
/>
<AccessibleButton
// The clear button is only for mouse users
tabIndex={-1}
title={_t("Clear")}
className="mx_UserMenu_CustomStatusSection_clear"
onClick={() => setValue("")}
/>
</div>
{ details }
</form>;
};
interface IProps {
isPanelCollapsed: boolean;
}
type PartialDOMRect = Pick<DOMRect, "width" | "left" | "top" | "height">;
interface IState {
contextMenuPosition: PartialDOMRect;
isDarkTheme: boolean;
isHighContrast: boolean;
selectedSpace?: Room;
dndEnabled: boolean;
}
const toRightOf = (rect: PartialDOMRect) => {
return {
left: rect.width + rect.left + 8,
top: rect.top,
chevronFace: ChevronFace.None,
};
};
const below = (rect: PartialDOMRect) => {
return {
left: rect.left,
top: rect.top + rect.height,
chevronFace: ChevronFace.None,
};
};
@replaceableComponent("structures.UserMenu")
export default class UserMenu extends React.Component<IProps, IState> {
private dispatcherRef: string;
private themeWatcherRef: string;
private readonly dndWatcherRef: string;
private buttonRef: React.RefObject<HTMLButtonElement> = createRef();
private tagStoreRef: fbEmitter.EventSubscription;
constructor(props: IProps) {
super(props);
this.state = {
contextMenuPosition: null,
isDarkTheme: this.isUserOnDarkTheme(),
isHighContrast: this.isUserOnHighContrastTheme(),
dndEnabled: this.doNotDisturb,
selectedSpace: SpaceStore.instance.activeSpaceRoom,
};
OwnProfileStore.instance.on(UPDATE_EVENT, this.onProfileUpdate);
if (SpaceStore.spacesEnabled) {
SpaceStore.instance.on(UPDATE_SELECTED_SPACE, this.onSelectedSpaceUpdate);
}
SettingsStore.monitorSetting("feature_dnd", null);
SettingsStore.monitorSetting("doNotDisturb", null);
}
private get doNotDisturb(): boolean {
return SettingsStore.getValue("doNotDisturb");
}
private get hasHomePage(): boolean {
return !!getHomePageUrl(SdkConfig.get());
}
public componentDidMount() {
this.dispatcherRef = defaultDispatcher.register(this.onAction);
this.themeWatcherRef = SettingsStore.watchSetting("theme", null, this.onThemeChanged);
this.tagStoreRef = GroupFilterOrderStore.addListener(this.onTagStoreUpdate);
}
public componentWillUnmount() {
if (this.themeWatcherRef) SettingsStore.unwatchSetting(this.themeWatcherRef);
if (this.dndWatcherRef) SettingsStore.unwatchSetting(this.dndWatcherRef);
if (this.dispatcherRef) defaultDispatcher.unregister(this.dispatcherRef);
OwnProfileStore.instance.off(UPDATE_EVENT, this.onProfileUpdate);
this.tagStoreRef.remove();
if (SpaceStore.spacesEnabled) {
SpaceStore.instance.off(UPDATE_SELECTED_SPACE, this.onSelectedSpaceUpdate);
}
}
private onTagStoreUpdate = () => {
this.forceUpdate(); // we don't have anything useful in state to update
};
private isUserOnDarkTheme(): boolean {
if (SettingsStore.getValue("use_system_theme")) {
return window.matchMedia("(prefers-color-scheme: dark)").matches;
} else {
const theme = SettingsStore.getValue("theme");
if (theme.startsWith("custom-")) {
return getCustomTheme(theme.substring("custom-".length)).is_dark;
}
return theme === "dark";
}
}
private isUserOnHighContrastTheme(): boolean {<|fim▁hole|> } else {
const theme = SettingsStore.getValue("theme");
if (theme.startsWith("custom-")) {
return false;
}
return isHighContrastTheme(theme);
}
}
private onProfileUpdate = async () => {
// the store triggered an update, so force a layout update. We don't
// have any state to store here for that to magically happen.
this.forceUpdate();
};
private onSelectedSpaceUpdate = async () => {
this.setState({
selectedSpace: SpaceStore.instance.activeSpaceRoom,
});
};
private onThemeChanged = () => {
this.setState(
{
isDarkTheme: this.isUserOnDarkTheme(),
isHighContrast: this.isUserOnHighContrastTheme(),
});
};
private onAction = (payload: ActionPayload) => {
switch (payload.action) {
case Action.ToggleUserMenu:
if (this.state.contextMenuPosition) {
this.setState({ contextMenuPosition: null });
} else {
if (this.buttonRef.current) this.buttonRef.current.click();
}
break;
case Action.SettingUpdated: {
const settingUpdatedPayload = payload as SettingUpdatedPayload;
switch (settingUpdatedPayload.settingName) {
case "feature_dnd":
case "doNotDisturb": {
const dndEnabled = this.doNotDisturb;
if (this.state.dndEnabled !== dndEnabled) {
this.setState({ dndEnabled });
}
break;
}
}
}
}
};
private onOpenMenuClick = (ev: React.MouseEvent) => {
ev.preventDefault();
ev.stopPropagation();
this.setState({ contextMenuPosition: ev.currentTarget.getBoundingClientRect() });
};
private onContextMenu = (ev: React.MouseEvent) => {
ev.preventDefault();
ev.stopPropagation();
this.setState({
contextMenuPosition: {
left: ev.clientX,
top: ev.clientY,
width: 20,
height: 0,
},
});
};
private onCloseMenu = () => {
this.setState({ contextMenuPosition: null });
};
private onSwitchThemeClick = (ev: React.MouseEvent) => {
ev.preventDefault();
ev.stopPropagation();
PosthogTrackers.trackInteraction("WebUserMenuThemeToggleButton", ev);
// Disable system theme matching if the user hits this button
SettingsStore.setValue("use_system_theme", null, SettingLevel.DEVICE, false);
let newTheme = this.state.isDarkTheme ? "light" : "dark";
if (this.state.isHighContrast) {
const hcTheme = findHighContrastTheme(newTheme);
if (hcTheme) {
newTheme = hcTheme;
}
}
SettingsStore.setValue("theme", null, SettingLevel.DEVICE, newTheme); // set at same level as Appearance tab
};
private onSettingsOpen = (ev: ButtonEvent, tabId: string) => {
ev.preventDefault();
ev.stopPropagation();
const payload: OpenToTabPayload = { action: Action.ViewUserSettings, initialTabId: tabId };
defaultDispatcher.dispatch(payload);
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onProvideFeedback = (ev: ButtonEvent) => {
ev.preventDefault();
ev.stopPropagation();
Modal.createTrackedDialog('Feedback Dialog', '', FeedbackDialog);
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onSignOutClick = async (ev: ButtonEvent) => {
ev.preventDefault();
ev.stopPropagation();
const cli = MatrixClientPeg.get();
if (!cli || !cli.isCryptoEnabled() || !(await cli.exportRoomKeys())?.length) {
// log out without user prompt if they have no local megolm sessions
dis.dispatch({ action: 'logout' });
} else {
Modal.createTrackedDialog('Logout from LeftPanel', '', LogoutDialog);
}
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onSignInClick = () => {
dis.dispatch({ action: 'start_login' });
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onRegisterClick = () => {
dis.dispatch({ action: 'start_registration' });
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onHomeClick = (ev: ButtonEvent) => {
ev.preventDefault();
ev.stopPropagation();
defaultDispatcher.dispatch<ViewHomePagePayload>({ action: Action.ViewHomePage });
this.setState({ contextMenuPosition: null }); // also close the menu
};
private onDndToggle = (ev: ButtonEvent) => {
ev.stopPropagation();
const current = SettingsStore.getValue("doNotDisturb");
SettingsStore.setValue("doNotDisturb", null, SettingLevel.DEVICE, !current);
};
private renderContextMenu = (): React.ReactNode => {
if (!this.state.contextMenuPosition) return null;
let topSection;
const hostSignupConfig: IHostSignupConfig = SdkConfig.get().hostSignup;
if (MatrixClientPeg.get().isGuest()) {
topSection = (
<div className="mx_UserMenu_contextMenu_header mx_UserMenu_contextMenu_guestPrompts">
{ _t("Got an account? <a>Sign in</a>", {}, {
a: sub => (
<AccessibleButton kind="link" onClick={this.onSignInClick}>
{ sub }
</AccessibleButton>
),
}) }
{ _t("New here? <a>Create an account</a>", {}, {
a: sub => (
<AccessibleButton kind="link" onClick={this.onRegisterClick}>
{ sub }
</AccessibleButton>
),
}) }
</div>
);
} else if (hostSignupConfig) {
if (hostSignupConfig && hostSignupConfig.url) {
// If hostSignup.domains is set to a non-empty array, only show
// dialog if the user is on the domain or a subdomain.
const hostSignupDomains = hostSignupConfig.domains || [];
const mxDomain = MatrixClientPeg.get().getDomain();
const validDomains = hostSignupDomains.filter(d => (d === mxDomain || mxDomain.endsWith(`.${d}`)));
if (!hostSignupConfig.domains || validDomains.length > 0) {
topSection = <HostSignupAction onClick={this.onCloseMenu} />;
}
}
}
let homeButton = null;
if (this.hasHomePage) {
homeButton = (
<IconizedContextMenuOption
iconClassName="mx_UserMenu_iconHome"
label={_t("Home")}
onClick={this.onHomeClick}
/>
);
}
let customStatusSection: JSX.Element;
if (SettingsStore.getValue("feature_custom_status")) {
customStatusSection = <CustomStatusSection />;
}
let dndButton: JSX.Element;
if (SettingsStore.getValue("feature_dnd")) {
dndButton = (
<IconizedContextMenuCheckbox
iconClassName={this.state.dndEnabled ? "mx_UserMenu_iconDnd" : "mx_UserMenu_iconDndOff"}
label={_t("Do not disturb")}
onClick={this.onDndToggle}
active={this.state.dndEnabled}
words
/>
);
}
let feedbackButton;
if (SettingsStore.getValue(UIFeature.Feedback)) {
feedbackButton = <IconizedContextMenuOption
iconClassName="mx_UserMenu_iconMessage"
label={_t("Feedback")}
onClick={this.onProvideFeedback}
/>;
}
let primaryOptionList = (
<IconizedContextMenuOptionList>
{ homeButton }
{ dndButton }
<IconizedContextMenuOption
iconClassName="mx_UserMenu_iconBell"
label={_t("Notifications")}
onClick={(e) => this.onSettingsOpen(e, UserTab.Notifications)}
/>
<IconizedContextMenuOption
iconClassName="mx_UserMenu_iconLock"
label={_t("Security & Privacy")}
onClick={(e) => this.onSettingsOpen(e, UserTab.Security)}
/>
<IconizedContextMenuOption
iconClassName="mx_UserMenu_iconSettings"
label={_t("All settings")}
onClick={(e) => this.onSettingsOpen(e, null)}
/>
{ feedbackButton }
<IconizedContextMenuOption
className="mx_IconizedContextMenu_option_red"
iconClassName="mx_UserMenu_iconSignOut"
label={_t("Sign out")}
onClick={this.onSignOutClick}
/>
</IconizedContextMenuOptionList>
);
if (MatrixClientPeg.get().isGuest()) {
primaryOptionList = (
<IconizedContextMenuOptionList>
{ homeButton }
<IconizedContextMenuOption
iconClassName="mx_UserMenu_iconSettings"
label={_t("Settings")}
onClick={(e) => this.onSettingsOpen(e, null)}
/>
{ feedbackButton }
</IconizedContextMenuOptionList>
);
}
const position = this.props.isPanelCollapsed
? toRightOf(this.state.contextMenuPosition)
: below(this.state.contextMenuPosition);
return <IconizedContextMenu
{...position}
onFinished={this.onCloseMenu}
className="mx_UserMenu_contextMenu"
>
<div className="mx_UserMenu_contextMenu_header">
<div className="mx_UserMenu_contextMenu_name">
<span className="mx_UserMenu_contextMenu_displayName">
{ OwnProfileStore.instance.displayName }
</span>
<span className="mx_UserMenu_contextMenu_userId">
{ UserIdentifierCustomisations.getDisplayUserIdentifier(
MatrixClientPeg.get().getUserId(), { withDisplayName: true }) }
</span>
</div>
<RovingAccessibleTooltipButton
className="mx_UserMenu_contextMenu_themeButton"
onClick={this.onSwitchThemeClick}
title={this.state.isDarkTheme ? _t("Switch to light mode") : _t("Switch to dark mode")}
>
<img
src={require("../../../res/img/element-icons/roomlist/dark-light-mode.svg").default}
alt={_t("Switch theme")}
width={16}
/>
</RovingAccessibleTooltipButton>
</div>
{ customStatusSection }
{ topSection }
{ primaryOptionList }
</IconizedContextMenu>;
};
public render() {
const avatarSize = 32; // should match border-radius of the avatar
const userId = MatrixClientPeg.get().getUserId();
const displayName = OwnProfileStore.instance.displayName || userId;
const avatarUrl = OwnProfileStore.instance.getHttpAvatarUrl(avatarSize);
let badge: JSX.Element;
if (this.state.dndEnabled) {
badge = <div className="mx_UserMenu_dndBadge" />;
}
let name: JSX.Element;
if (!this.props.isPanelCollapsed) {
name = <div className="mx_UserMenu_name">
{ displayName }
</div>;
}
return <div className="mx_UserMenu">
<ContextMenuButton
onClick={this.onOpenMenuClick}
inputRef={this.buttonRef}
label={_t("User menu")}
isExpanded={!!this.state.contextMenuPosition}
onContextMenu={this.onContextMenu}
className={classNames({
mx_UserMenu_cutout: badge,
})}
>
<div className="mx_UserMenu_userAvatar">
<BaseAvatar
idName={userId}
name={displayName}
url={avatarUrl}
width={avatarSize}
height={avatarSize}
resizeMethod="crop"
className="mx_UserMenu_userAvatar_BaseAvatar"
/>
{ badge }
</div>
{ name }
{ this.renderContextMenu() }
</ContextMenuButton>
{ this.props.children }
</div>;
}
}<|fim▁end|>
|
if (SettingsStore.getValue("use_system_theme")) {
return window.matchMedia("(prefers-contrast: more)").matches;
|
<|file_name|>GsonManager.java<|end_file_name|><|fim▁begin|>package com.comps.util;<|fim▁hole|>
private static Gson instance;
public static Gson getInstance(){
if (instance == null){
instance = new Gson();
}
return instance;
}
}<|fim▁end|>
|
import com.google.gson.Gson;
public class GsonManager {
|
<|file_name|>buffer.rs<|end_file_name|><|fim▁begin|>/*
gl/src/gl_wrapper/buffer.rs, 2017-07-19
Copyright (c) 2017 Juuso Tuononen
This file is licensed under
Apache License, Version 2.0
or
MIT License
*/
//! Send data to GPU.
use super::gl_raw;
use self::gl_raw::types::*;
use std::mem::size_of;
use std::os::raw::c_void;
use std::ptr;
/// Send static data to GPU with Vertex Buffer Object
struct VertexBufferStatic {
id: GLuint,
attribute_component_count: GLint,
}
impl VertexBufferStatic {
/// Sends static data to GPU.
///
/// # Arguments
/// * `data` - Float data which is sent to GPU.
/// * `attribute_component_count` - Number of floats in one vertex attribute.
///
/// # Safety
/// This function does not check if data length and `attribute_component_count` match.
unsafe fn new(data: &[f32], attribute_component_count: GLint) -> VertexBufferStatic {
let mut id: GLuint = 0;
gl_raw::GenBuffers(1, &mut id);
gl_raw::BindBuffer(gl_raw::ARRAY_BUFFER, id);
let size: GLsizeiptr = (size_of::<f32>() * data.len()) as GLsizeiptr;
let data_ptr = data.as_ptr() as *const c_void;
gl_raw::BufferData(gl_raw::ARRAY_BUFFER, size, data_ptr, gl_raw::STATIC_DRAW);
VertexBufferStatic {id, attribute_component_count}
}
/// Set vertex attribute to match buffer data.
///
/// # Arguments
/// * `attribute_index` - Index of vertex attribute.
fn set_vertex_attributes(&mut self, attribute_index: GLuint) {
unsafe {
gl_raw::BindBuffer(gl_raw::ARRAY_BUFFER, self.id);
let stride = (self.attribute_component_count * size_of::<f32>() as GLint) as GLsizei;
gl_raw::VertexAttribPointer(attribute_index, self.attribute_component_count, gl_raw::FLOAT, gl_raw::FALSE, stride, ptr::null());
gl_raw::EnableVertexAttribArray(attribute_index);
}
}
}
impl Drop for VertexBufferStatic {
/// Deletes OpenGL's buffer object.
fn drop(&mut self) {
unsafe {
gl_raw::DeleteBuffers(1, &self.id);
}
}
}
/// Send multiple buffers of data to GPU.
///
/// OpenGL 3.3 version of this struct is implemented
/// with OpenGL's Vertex Array Object.
///
/// OpenGL ES 2.0 does not support Vertex Array Objects, so vertex
/// attributes are set for every buffer
/// when `draw` method is called if using OpenGL ES version of this struct.
#[cfg(not(feature = "gles"))]
pub struct VertexArray {
id: GLuint,
vertex_buffers: Vec<VertexBufferStatic>,
vertex_count: GLsizei,<|fim▁hole|>#[cfg(feature = "gles")]
pub struct VertexArray {
vertex_buffers: Vec<(VertexBufferStatic, GLuint)>,
vertex_count: GLsizei,
}
impl VertexArray {
/// Creates new Vertex Array Object
#[cfg(not(feature = "gles"))]
pub fn new(vertex_count: GLsizei) -> VertexArray {
let mut id: GLuint = 0;
let vertex_buffers = vec![];
unsafe {
gl_raw::GenVertexArrays(1, &mut id);
VertexArray {id, vertex_buffers, vertex_count}
}
}
#[cfg(feature = "gles")]
pub fn new(vertex_count: GLsizei) -> VertexArray {
let vertex_buffers = vec![];
VertexArray {vertex_buffers, vertex_count}
}
/// Adds new buffer to Vertex Array Object
///
/// # Arguments
/// * `data` - Float data to send to the GPU.
/// * `attribute_component_count` - Number of floats in one attribute.
/// * `attribute_index` - Index of vertex attribute.
///
/// # Panics
/// * If buffer length doesn't match with `VertexArray`'s vertex count.
/// * If buffer length doesn't match with attribute_component_count.
pub fn add_static_buffer(&mut self, data: &[f32], attribute_component_count: GLint, attribute_index: GLuint) {
if data.len() / attribute_component_count as usize != self.vertex_count as usize {
panic!("buffer length doesn't match with VertexArray's vertex count");
}
if data.len() % attribute_component_count as usize != 0 {
panic!("buffer length doesn't match with attribute_component_count");
}
#[cfg(not(feature = "gles"))]
{
let mut buffer;
unsafe {
buffer = VertexBufferStatic::new(data, attribute_component_count);
}
self.bind();
buffer.set_vertex_attributes(attribute_index);
self.vertex_buffers.push(buffer);
}
#[cfg(feature = "gles")]
{
let buffer;
unsafe {
buffer = VertexBufferStatic::new(data, attribute_component_count);
}
self.vertex_buffers.push((buffer, attribute_index));
}
}
/// Bind OpenGL's Vertex Array Object. This method
/// only exists for OpenGL 3.3 version of `VertexArray` struct.
#[cfg(not(feature = "gles"))]
fn bind(&self) {
unsafe {
gl_raw::BindVertexArray(self.id);
}
}
/// Draw with buffers currently existing buffers in `VertexArray`. Remember to enable
/// correct shader `Program` with it's `use_program` method before calling this method.
pub fn draw(&mut self) {
#[cfg(not(feature = "gles"))]
{
self.bind();
}
#[cfg(feature = "gles")]
{
for &mut (ref mut buffer, attribute_index) in &mut self.vertex_buffers {
buffer.set_vertex_attributes(attribute_index);
}
}
unsafe {
gl_raw::DrawArrays(gl_raw::TRIANGLES, 0, self.vertex_count);
}
}
}
#[cfg(not(feature = "gles"))]
impl Drop for VertexArray {
/// Delete OpenGL's Vertex Array Object. This implementation of Drop trait
/// only exists for OpenGL 3.3 version of `VertexArray` struct.
fn drop(&mut self) {
unsafe {
gl_raw::DeleteVertexArrays(1, &self.id);
}
}
}<|fim▁end|>
|
}
|
<|file_name|>f20.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn expr_index_20() {
let v = [2us, 0us, 20us];
v[20us];
}<|fim▁end|>
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
<|file_name|>check_cfc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""Check CFC - Check Compile Flow Consistency
This is a compiler wrapper for testing that code generation is consistent with
different compilation processes. It checks that code is not unduly affected by
compiler options or other changes which should not have side effects.
To use:
-Ensure that the compiler under test (i.e. clang, clang++) is on the PATH
-On Linux copy this script to the name of the compiler
e.g. cp check_cfc.py clang && cp check_cfc.py clang++
-On Windows use setup.py to generate check_cfc.exe and copy that to clang.exe
and clang++.exe
-Enable the desired checks in check_cfc.cfg (in the same directory as the
wrapper)
e.g.
[Checks]
dash_g_no_change = true
dash_s_no_change = false
-The wrapper can be run using its absolute path or added to PATH before the
compiler under test
e.g. export PATH=<path to check_cfc>:$PATH
-Compile as normal. The wrapper intercepts normal -c compiles and will return
non-zero if the check fails.
e.g.
$ clang -c test.cpp
Code difference detected with -g
--- /tmp/tmp5nv893.o
+++ /tmp/tmp6Vwjnc.o
@@ -1 +1 @@
- 0: 48 8b 05 51 0b 20 00 mov 0x200b51(%rip),%rax
+ 0: 48 39 3d 51 0b 20 00 cmp %rdi,0x200b51(%rip)
-To run LNT with Check CFC specify the absolute path to the wrapper to the --cc
and --cxx options
e.g.
lnt runtest nt --cc <path to check_cfc>/clang \\
--cxx <path to check_cfc>/clang++ ...
To add a new check:
-Create a new subclass of WrapperCheck
-Implement the perform_check() method. This should perform the alternate compile
and do the comparison.
-Add the new check to check_cfc.cfg. The check has the same name as the
subclass.
"""
from __future__ import absolute_import, division, print_function
import imp
import os
import platform
import shutil
import subprocess
import sys
import tempfile
try:
import configparser
except ImportError:
import ConfigParser as configparser
import io
import obj_diff
def is_windows():
"""Returns True if running on Windows."""
return platform.system() == 'Windows'
class WrapperStepException(Exception):
"""Exception type to be used when a step other than the original compile
fails."""
def __init__(self, msg, stdout, stderr):
self.msg = msg
self.stdout = stdout
self.stderr = stderr
class WrapperCheckException(Exception):
"""Exception type to be used when a comparison check fails."""
def __init__(self, msg):
self.msg = msg
def main_is_frozen():
"""Returns True when running as a py2exe executable."""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
"""Get the directory that the script or executable is located in."""
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def remove_dir_from_path(path_var, directory):
"""Remove the specified directory from path_var, a string representing
PATH"""
pathlist = path_var.split(os.pathsep)
norm_directory = os.path.normpath(os.path.normcase(directory))
pathlist = [x for x in pathlist if os.path.normpath(
os.path.normcase(x)) != norm_directory]
return os.pathsep.join(pathlist)
def path_without_wrapper():
"""Returns the PATH variable modified to remove the path to this program."""
scriptdir = get_main_dir()
path = os.environ['PATH']
return remove_dir_from_path(path, scriptdir)
def flip_dash_g(args):
"""Search for -g in args. If it exists then return args without. If not then
add it."""
if '-g' in args:
# Return args without any -g
return [x for x in args if x != '-g']
else:
# No -g, add one
return args + ['-g']
def derive_output_file(args):
"""Derive output file from the input file (if just one) or None
otherwise."""
infile = get_input_file(args)
if infile is None:
return None
else:
return '{}.o'.format(os.path.splitext(infile)[0])
def get_output_file(args):
"""Return the output file specified by this command or None if not
specified."""
grabnext = False
for arg in args:
if grabnext:
return arg
if arg == '-o':
# Specified as a separate arg
grabnext = True
elif arg.startswith('-o'):
# Specified conjoined with -o
return arg[2:]
assert grabnext == False
return None
def is_output_specified(args):
"""Return true is output file is specified in args."""
return get_output_file(args) is not None
def replace_output_file(args, new_name):
"""Replaces the specified name of an output file with the specified name.
Assumes that the output file name is specified in the command line args."""
replaceidx = None
attached = False
for idx, val in enumerate(args):
if val == '-o':
replaceidx = idx + 1
attached = False
elif val.startswith('-o'):
replaceidx = idx
attached = True
if replaceidx is None:
raise Exception
replacement = new_name
if attached == True:
replacement = '-o' + new_name
args[replaceidx] = replacement
return args
def add_output_file(args, output_file):
"""Append an output file to args, presuming not already specified."""
return args + ['-o', output_file]
def set_output_file(args, output_file):
"""Set the output file within the arguments. Appends or replaces as
appropriate."""
if is_output_specified(args):
args = replace_output_file(args, output_file)
else:
args = add_output_file(args, output_file)
return args
gSrcFileSuffixes = ('.c', '.cpp', '.cxx', '.c++', '.cp', '.cc')
def get_input_file(args):
"""Return the input file string if it can be found (and there is only
one)."""
inputFiles = list()
for arg in args:
testarg = arg
quotes = ('"', "'")
while testarg.endswith(quotes):
testarg = testarg[:-1]
testarg = os.path.normcase(testarg)
# Test if it is a source file
if testarg.endswith(gSrcFileSuffixes):
inputFiles.append(arg)
if len(inputFiles) == 1:
return inputFiles[0]
else:
return None
def set_input_file(args, input_file):
"""Replaces the input file with that specified."""
infile = get_input_file(args)
if infile:
infile_idx = args.index(infile)
args[infile_idx] = input_file
return args
else:
# Could not find input file
assert False
def is_normal_compile(args):
"""Check if this is a normal compile which will output an object file rather
than a preprocess or link. args is a list of command line arguments."""
compile_step = '-c' in args
# Bitcode cannot be disassembled in the same way
bitcode = '-flto' in args or '-emit-llvm' in args
# Version and help are queries of the compiler and override -c if specified
query = '--version' in args or '--help' in args
# Options to output dependency files for make
dependency = '-M' in args or '-MM' in args
# Check if the input is recognised as a source file (this may be too
# strong a restriction)
input_is_valid = bool(get_input_file(args))
return compile_step and not bitcode and not query and not dependency and input_is_valid
def run_step(command, my_env, error_on_failure):
"""Runs a step of the compilation. Reports failure as exception."""
# Need to use shell=True on Windows as Popen won't use PATH otherwise.
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=my_env, shell=is_windows())
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise WrapperStepException(error_on_failure, stdout, stderr)
def get_temp_file_name(suffix):
"""Get a temporary file name with a particular suffix. Let the caller be
responsible for deleting it."""
tf = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
tf.close()
return tf.name
class WrapperCheck(object):
"""Base class for a check. Subclass this to add a check."""
def __init__(self, output_file_a):
"""Record the base output file that will be compared against."""
self._output_file_a = output_file_a
def perform_check(self, arguments, my_env):
"""Override this to perform the modified compilation and required
checks."""
raise NotImplementedError("Please Implement this method")
class dash_g_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if different code is generated with/without the -g flag."""
output_file_b = get_temp_file_name('.o')
alternate_command = list(arguments)
alternate_command = flip_dash_g(alternate_command)
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env, "Error compiling with -g")
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -g\n{}".format(difference))
# Clean up temp file if comparison okay
os.remove(output_file_b)
class dash_s_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if compiling to asm then assembling in separate steps results
in different code than compiling to object directly."""
output_file_b = get_temp_file_name('.o')
alternate_command = arguments + ['-via-file-asm']
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env,
"Error compiling with -via-file-asm")
# Compare if object files are exactly the same
exactly_equal = obj_diff.compare_exact(self._output_file_a, output_file_b)
if not exactly_equal:
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -S\n{}".format(difference))
# Code is identical, compare debug info
dbgdifference = obj_diff.compare_debug_info(self._output_file_a,
output_file_b)
if dbgdifference:
raise WrapperCheckException(
"Debug info difference detected with -S\n{}".format(dbgdifference))
raise WrapperCheckException("Object files not identical with -S\n")
# Clean up temp file if comparison okay
os.remove(output_file_b)
if __name__ == '__main__':
# Create configuration defaults from list of checks
default_config = """
[Checks]
"""
# Find all subclasses of WrapperCheck
checks = [cls.__name__ for cls in vars()['WrapperCheck'].__subclasses__()]
for c in checks:
default_config += "{} = false\n".format(c)
config = configparser.RawConfigParser()
config.readfp(io.BytesIO(default_config))
scriptdir = get_main_dir()
config_path = os.path.join(scriptdir, 'check_cfc.cfg')
try:
config.read(os.path.join(config_path))
except:
print("Could not read config from {}, "
"using defaults.".format(config_path))
my_env = os.environ.copy()
my_env['PATH'] = path_without_wrapper()
arguments_a = list(sys.argv)
# Prevent infinite loop if called with absolute path.
arguments_a[0] = os.path.basename(arguments_a[0])
# Sanity check
enabled_checks = [check_name
for check_name in checks
if config.getboolean('Checks', check_name)]
checks_comma_separated = ', '.join(enabled_checks)
print("Check CFC, checking: {}".format(checks_comma_separated))
# A - original compilation
output_file_orig = get_output_file(arguments_a)
if output_file_orig is None:
output_file_orig = derive_output_file(arguments_a)
p = subprocess.Popen(arguments_a, env=my_env, shell=is_windows())
p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not is_normal_compile(arguments_a) or output_file_orig is None:
# Bail out here if we can't apply checks in this case.
# Does not indicate an error.
# Maybe not straight compilation (e.g. -S or --version or -flto)
# or maybe > 1 input files.
sys.exit(0)
# Sometimes we generate files which have very long names which can't be
# read/disassembled. This will exit early if we can't find the file we
# expected to be output.
if not os.path.isfile(output_file_orig):
sys.exit(0)
# Copy output file to a temp file
temp_output_file_orig = get_temp_file_name('.o')
shutil.copyfile(output_file_orig, temp_output_file_orig)
# Run checks, if they are enabled in config and if they are appropriate for
# this command line.
current_module = sys.modules[__name__]
for check_name in checks:
if config.getboolean('Checks', check_name):
class_ = getattr(current_module, check_name)
checker = class_(temp_output_file_orig)
try:
checker.perform_check(arguments_a, my_env)
except WrapperCheckException as e:
# Check failure<|fim▁hole|> # output file if failed)
os.remove(output_file_orig)
sys.exit(1)
except WrapperStepException as e:
# Compile step failure
print(e.msg, file=sys.stderr)
print("*** stdout ***", file=sys.stderr)
print(e.stdout, file=sys.stderr)
print("*** stderr ***", file=sys.stderr)
print(e.stderr, file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)<|fim▁end|>
|
print("{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr)
# Remove file to comply with build system expectations (no
|
<|file_name|>jquery.form.js<|end_file_name|><|fim▁begin|>/*!
* jQuery Form Plugin
* version: 3.33.0-2013.05.02
* @requires jQuery v1.5 or later
* Copyright (c) 2013 M. Alsup
* Examples and documentation at: http://malsup.com/jquery/form/
* Project repository: https://github.com/malsup/form
* Dual licensed under the MIT and GPL licenses.
* https://github.com/malsup/form#copyright-and-license
*/
/*global ActiveXObject */
;(function($) {
"use strict";
/*
Usage Note:
-----------
Do not use both ajaxSubmit and ajaxForm on the same form. These
functions are mutually exclusive. Use ajaxSubmit if you want
to bind your own submit handler to the form. For example,
$(document).ready(function() {
$('#myForm').on('submit', function(e) {
e.preventDefault(); // <-- important
$(this).ajaxSubmit({
target: '#output'
});
});
});
Use ajaxForm when you want the plugin to manage all the event binding
for you. For example,
$(document).ready(function() {
$('#myForm').ajaxForm({
target: '#output'
});
});
You can also use ajaxForm with delegation (requires jQuery v1.7+), so the
form does not have to exist when you invoke ajaxForm:
$('#myForm').ajaxForm({
delegation: true,
target: '#output'
});
When using ajaxForm, the ajaxSubmit function will be invoked for you
at the appropriate time.
*/
/**
* Feature detection
*/
var feature = {};
feature.fileapi = $("<input type='file'/>").get(0).files !== undefined;
feature.formdata = window.FormData !== undefined;
var hasProp = !!$.fn.prop;
// attr2 uses prop when it can but checks the return type for
// an expected string. this accounts for the case where a form
// contains inputs with names like "action" or "method"; in those
// cases "prop" returns the element
$.fn.attr2 = function() {
if ( ! hasProp )
return this.attr.apply(this, arguments);
var val = this.prop.apply(this, arguments);
if ( ( val && val.jquery ) || typeof val === 'string' )
return val;
return this.attr.apply(this, arguments);
};
/**
* ajaxSubmit() provides a mechanism for immediately submitting
* an HTML form using AJAX.
*/
$.fn.ajaxSubmit = function(options) {
/*jshint scripturl:true */
// fast fail if nothing selected (http://dev.jquery.com/ticket/2752)
if (!this.length) {
log('ajaxSubmit: skipping submit process - no element selected');
return this;
}
var method, action, url, $form = this;
if (typeof options == 'function') {
options = { success: options };
}
method = this.attr2('method');
action = this.attr2('action');
url = (typeof action === 'string') ? $.trim(action) : '';
url = url || window.location.href || '';
if (url) {
// clean url (don't include hash vaue)
url = (url.match(/^([^#]+)/)||[])[1];
}
options = $.extend(true, {
url: url,
success: $.ajaxSettings.success,
type: method || 'GET',
iframeSrc: /^https/i.test(window.location.href || '') ? 'javascript:false' : 'about:blank'
}, options);
// hook for manipulating the form data before it is extracted;
// convenient for use with rich editors like tinyMCE or FCKEditor
var veto = {};
this.trigger('form-pre-serialize', [this, options, veto]);
if (veto.veto) {
log('ajaxSubmit: submit vetoed via form-pre-serialize trigger');
return this;
}
// provide opportunity to alter form data before it is serialized
if (options.beforeSerialize && options.beforeSerialize(this, options) === false) {
log('ajaxSubmit: submit aborted via beforeSerialize callback');
return this;
}
var traditional = options.traditional;
if ( traditional === undefined ) {
traditional = $.ajaxSettings.traditional;
}
var elements = [];
var qx, a = this.formToArray(options.semantic, elements);
if (options.data) {
options.extraData = options.data;
qx = $.param(options.data, traditional);
}
// give pre-submit callback an opportunity to abort the submit
if (options.beforeSubmit && options.beforeSubmit(a, this, options) === false) {
log('ajaxSubmit: submit aborted via beforeSubmit callback');
return this;
}
// fire vetoable 'validate' event
this.trigger('form-submit-validate', [a, this, options, veto]);
if (veto.veto) {
log('ajaxSubmit: submit vetoed via form-submit-validate trigger');
return this;
}
var q = $.param(a, traditional);
if (qx) {
q = ( q ? (q + '&' + qx) : qx );
}
if (options.type.toUpperCase() == 'GET') {
options.url += (options.url.indexOf('?') >= 0 ? '&' : '?') + q;
options.data = null; // data is null for 'get'
}
else {
options.data = q; // data is the query string for 'post'
}
var callbacks = [];
if (options.resetForm) {
callbacks.push(function() { $form.resetForm(); });
}
if (options.clearForm) {
callbacks.push(function() { $form.clearForm(options.includeHidden); });
}
// perform a load on the target only if dataType is not provided
if (!options.dataType && options.target) {
var oldSuccess = options.success || function(){};
callbacks.push(function(data) {
var fn = options.replaceTarget ? 'replaceWith' : 'html';
$(options.target)[fn](data).each(oldSuccess, arguments);
});
}
else if (options.success) {
callbacks.push(options.success);
}
options.success = function(data, status, xhr) { // jQuery 1.4+ passes xhr as 3rd arg
var context = options.context || this ; // jQuery 1.4+ supports scope context
for (var i=0, max=callbacks.length; i < max; i++) {
callbacks[i].apply(context, [data, status, xhr || $form, $form]);
}
};
if (options.error) {
var oldError = options.error;
options.error = function(xhr, status, error) {
var context = options.context || this;
oldError.apply(context, [xhr, status, error, $form]);
};
}
if (options.complete) {
var oldComplete = options.complete;
options.complete = function(xhr, status) {
var context = options.context || this;
oldComplete.apply(context, [xhr, status, $form]);
};
}
// are there files to upload?
// [value] (issue #113), also see comment:
// https://github.com/malsup/form/commit/588306aedba1de01388032d5f42a60159eea9228#commitcomment-2180219
var fileInputs = $('input[type=file]:enabled[value!=""]', this);
var hasFileInputs = fileInputs.length > 0;
var mp = 'multipart/form-data';
var multipart = ($form.attr('enctype') == mp || $form.attr('encoding') == mp);
var fileAPI = feature.fileapi && feature.formdata;
log("fileAPI :" + fileAPI);
var shouldUseFrame = (hasFileInputs || multipart) && !fileAPI;
var jqxhr;
// options.iframe allows user to force iframe mode
// 06-NOV-09: now defaulting to iframe mode if file input is detected
if (options.iframe !== false && (options.iframe || shouldUseFrame)) {
// hack to fix Safari hang (thanks to Tim Molendijk for this)
// see: http://groups.google.com/group/jquery-dev/browse_thread/thread/36395b7ab510dd5d
if (options.closeKeepAlive) {
$.get(options.closeKeepAlive, function() {
jqxhr = fileUploadIframe(a);
});
}
else {
jqxhr = fileUploadIframe(a);
}
}
else if ((hasFileInputs || multipart) && fileAPI) {
jqxhr = fileUploadXhr(a);
}
else {
jqxhr = $.ajax(options);
}
$form.removeData('jqxhr').data('jqxhr', jqxhr);
// clear element array
for (var k=0; k < elements.length; k++)
elements[k] = null;
// fire 'notify' event
this.trigger('form-submit-notify', [this, options]);
return this;
// utility fn for deep serialization
function deepSerialize(extraData){
var serialized = $.param(extraData).split('&');
var len = serialized.length;
var result = [];
var i, part;
for (i=0; i < len; i++) {
// #252; undo param space replacement
serialized[i] = serialized[i].replace(/\+/g,' ');
part = serialized[i].split('=');
// #278; use array instead of object storage, favoring array serializations
result.push([decodeURIComponent(part[0]), decodeURIComponent(part[1])]);
}
return result;
}
// XMLHttpRequest Level 2 file uploads (big hat tip to francois2metz)
function fileUploadXhr(a) {
var formdata = new FormData();
for (var i=0; i < a.length; i++) {
formdata.append(a[i].name, a[i].value);
}
if (options.extraData) {
var serializedData = deepSerialize(options.extraData);
for (i=0; i < serializedData.length; i++)
if (serializedData[i])
formdata.append(serializedData[i][0], serializedData[i][1]);
}
options.data = null;
var s = $.extend(true, {}, $.ajaxSettings, options, {
contentType: false,
processData: false,
cache: false,
type: method || 'POST'
});
if (options.uploadProgress) {
// workaround because jqXHR does not expose upload property
s.xhr = function() {
var xhr = jQuery.ajaxSettings.xhr();
if (xhr.upload) {
xhr.upload.addEventListener('progress', function(event) {
var percent = 0;
var position = event.loaded || event.position; /*event.position is deprecated*/
var total = event.total;
if (event.lengthComputable) {
percent = Math.ceil(position / total * 100);
}
options.uploadProgress(event, position, total, percent);
}, false);
}
return xhr;
};
}
s.data = null;
var beforeSend = s.beforeSend;
s.beforeSend = function(xhr, o) {
o.data = formdata;
if(beforeSend)
beforeSend.call(this, xhr, o);
};
return $.ajax(s);
}
// private function for handling file uploads (hat tip to YAHOO!)
function fileUploadIframe(a) {
var form = $form[0], el, i, s, g, id, $io, io, xhr, sub, n, timedOut, timeoutHandle;
var deferred = $.Deferred();
if (a) {
// ensure that every serialized input is still enabled
for (i=0; i < elements.length; i++) {
el = $(elements[i]);
if ( hasProp )
el.prop('disabled', false);
else
el.removeAttr('disabled');
}
}
s = $.extend(true, {}, $.ajaxSettings, options);
s.context = s.context || s;
id = 'jqFormIO' + (new Date().getTime());
if (s.iframeTarget) {
$io = $(s.iframeTarget);
n = $io.attr2('name');
if (!n)
$io.attr2('name', id);
else
id = n;
}
else {
$io = $('<iframe name="' + id + '" src="'+ s.iframeSrc +'" />');
$io.css({ position: 'absolute', top: '-1000px', left: '-1000px' });
}
io = $io[0];
<|fim▁hole|> xhr = { // mock object
aborted: 0,
responseText: null,
responseXML: null,
status: 0,
statusText: 'n/a',
getAllResponseHeaders: function() {},
getResponseHeader: function() {},
setRequestHeader: function() {},
abort: function(status) {
var e = (status === 'timeout' ? 'timeout' : 'aborted');
log('aborting upload... ' + e);
this.aborted = 1;
try { // #214, #257
if (io.contentWindow.document.execCommand) {
io.contentWindow.document.execCommand('Stop');
}
}
catch(ignore) {}
$io.attr('src', s.iframeSrc); // abort op in progress
xhr.error = e;
if (s.error)
s.error.call(s.context, xhr, e, status);
if (g)
$.event.trigger("ajaxError", [xhr, s, e]);
if (s.complete)
s.complete.call(s.context, xhr, e);
}
};
g = s.global;
// trigger ajax global events so that activity/block indicators work like normal
if (g && 0 === $.active++) {
$.event.trigger("ajaxStart");
}
if (g) {
$.event.trigger("ajaxSend", [xhr, s]);
}
if (s.beforeSend && s.beforeSend.call(s.context, xhr, s) === false) {
if (s.global) {
$.active--;
}
deferred.reject();
return deferred;
}
if (xhr.aborted) {
deferred.reject();
return deferred;
}
// add submitting element to data if we know it
sub = form.clk;
if (sub) {
n = sub.name;
if (n && !sub.disabled) {
s.extraData = s.extraData || {};
s.extraData[n] = sub.value;
if (sub.type == "image") {
s.extraData[n+'.x'] = form.clk_x;
s.extraData[n+'.y'] = form.clk_y;
}
}
}
var CLIENT_TIMEOUT_ABORT = 1;
var SERVER_ABORT = 2;
function getDoc(frame) {
/* it looks like contentWindow or contentDocument do not
* carry the protocol property in ie8, when running under ssl
* frame.document is the only valid response document, since
* the protocol is know but not on the other two objects. strange?
* "Same origin policy" http://en.wikipedia.org/wiki/Same_origin_policy
*/
var doc = null;
// IE8 cascading access check
try {
if (frame.contentWindow) {
doc = frame.contentWindow.document;
}
} catch(err) {
// IE8 access denied under ssl & missing protocol
log('cannot get iframe.contentWindow document: ' + err);
}
if (doc) { // successful getting content
return doc;
}
try { // simply checking may throw in ie8 under ssl or mismatched protocol
doc = frame.contentDocument ? frame.contentDocument : frame.document;
} catch(err) {
// last attempt
log('cannot get iframe.contentDocument: ' + err);
doc = frame.document;
}
return doc;
}
// Rails CSRF hack (thanks to Yvan Barthelemy)
var csrf_token = $('meta[name=csrf-token]').attr('content');
var csrf_param = $('meta[name=csrf-param]').attr('content');
if (csrf_param && csrf_token) {
s.extraData = s.extraData || {};
s.extraData[csrf_param] = csrf_token;
}
// take a breath so that pending repaints get some cpu time before the upload starts
function doSubmit() {
// make sure form attrs are set
var t = $form.attr2('target'), a = $form.attr2('action');
// update form attrs in IE friendly way
form.setAttribute('target',id);
if (!method) {
form.setAttribute('method', 'POST');
}
if (a != s.url) {
form.setAttribute('action', s.url);
}
// ie borks in some cases when setting encoding
if (! s.skipEncodingOverride && (!method || /post/i.test(method))) {
$form.attr({
encoding: 'multipart/form-data',
enctype: 'multipart/form-data'
});
}
// support timout
if (s.timeout) {
timeoutHandle = setTimeout(function() { timedOut = true; cb(CLIENT_TIMEOUT_ABORT); }, s.timeout);
}
// look for server aborts
function checkState() {
try {
var state = getDoc(io).readyState;
log('state = ' + state);
if (state && state.toLowerCase() == 'uninitialized')
setTimeout(checkState,50);
}
catch(e) {
log('Server abort: ' , e, ' (', e.name, ')');
cb(SERVER_ABORT);
if (timeoutHandle)
clearTimeout(timeoutHandle);
timeoutHandle = undefined;
}
}
// add "extra" data to form if provided in options
var extraInputs = [];
try {
if (s.extraData) {
for (var n in s.extraData) {
if (s.extraData.hasOwnProperty(n)) {
// if using the $.param format that allows for multiple values with the same name
if($.isPlainObject(s.extraData[n]) && s.extraData[n].hasOwnProperty('name') && s.extraData[n].hasOwnProperty('value')) {
extraInputs.push(
$('<input type="hidden" name="'+s.extraData[n].name+'">').val(s.extraData[n].value)
.appendTo(form)[0]);
} else {
extraInputs.push(
$('<input type="hidden" name="'+n+'">').val(s.extraData[n])
.appendTo(form)[0]);
}
}
}
}
if (!s.iframeTarget) {
// add iframe to doc and submit the form
$io.appendTo('body');
if (io.attachEvent)
io.attachEvent('onload', cb);
else
io.addEventListener('load', cb, false);
}
setTimeout(checkState,15);
try {
form.submit();
} catch(err) {
// just in case form has element with name/id of 'submit'
var submitFn = document.createElement('form').submit;
submitFn.apply(form);
}
}
finally {
// reset attrs and remove "extra" input elements
form.setAttribute('action',a);
if(t) {
form.setAttribute('target', t);
} else {
$form.removeAttr('target');
}
$(extraInputs).remove();
}
}
if (s.forceSync) {
doSubmit();
}
else {
setTimeout(doSubmit, 10); // this lets dom updates render
}
var data, doc, domCheckCount = 50, callbackProcessed;
function cb(e) {
if (xhr.aborted || callbackProcessed) {
return;
}
doc = getDoc(io);
if(!doc) {
log('cannot access response document');
e = SERVER_ABORT;
}
if (e === CLIENT_TIMEOUT_ABORT && xhr) {
xhr.abort('timeout');
deferred.reject(xhr, 'timeout');
return;
}
else if (e == SERVER_ABORT && xhr) {
xhr.abort('server abort');
deferred.reject(xhr, 'error', 'server abort');
return;
}
if (!doc || doc.location.href == s.iframeSrc) {
// response not received yet
if (!timedOut)
return;
}
if (io.detachEvent)
io.detachEvent('onload', cb);
else
io.removeEventListener('load', cb, false);
var status = 'success', errMsg;
try {
if (timedOut) {
throw 'timeout';
}
var isXml = s.dataType == 'xml' || doc.XMLDocument || $.isXMLDoc(doc);
log('isXml='+isXml);
if (!isXml && window.opera && (doc.body === null || !doc.body.innerHTML)) {
if (--domCheckCount) {
// in some browsers (Opera) the iframe DOM is not always traversable when
// the onload callback fires, so we loop a bit to accommodate
log('requeing onLoad callback, DOM not available');
setTimeout(cb, 250);
return;
}
// let this fall through because server response could be an empty document
//log('Could not access iframe DOM after mutiple tries.');
//throw 'DOMException: not available';
}
//log('response detected');
var docRoot = doc.body ? doc.body : doc.documentElement;
xhr.responseText = docRoot ? docRoot.innerHTML : null;
xhr.responseXML = doc.XMLDocument ? doc.XMLDocument : doc;
if (isXml)
s.dataType = 'xml';
xhr.getResponseHeader = function(header){
var headers = {'content-type': s.dataType};
return headers[header];
};
// support for XHR 'status' & 'statusText' emulation :
if (docRoot) {
xhr.status = Number( docRoot.getAttribute('status') ) || xhr.status;
xhr.statusText = docRoot.getAttribute('statusText') || xhr.statusText;
}
var dt = (s.dataType || '').toLowerCase();
var scr = /(json|script|text)/.test(dt);
if (scr || s.textarea) {
// see if user embedded response in textarea
var ta = doc.getElementsByTagName('textarea')[0];
if (ta) {
xhr.responseText = ta.value;
// support for XHR 'status' & 'statusText' emulation :
xhr.status = Number( ta.getAttribute('status') ) || xhr.status;
xhr.statusText = ta.getAttribute('statusText') || xhr.statusText;
}
else if (scr) {
// account for browsers injecting pre around json response
var pre = doc.getElementsByTagName('pre')[0];
var b = doc.getElementsByTagName('body')[0];
if (pre) {
xhr.responseText = pre.textContent ? pre.textContent : pre.innerText;
}
else if (b) {
xhr.responseText = b.textContent ? b.textContent : b.innerText;
}
}
}
else if (dt == 'xml' && !xhr.responseXML && xhr.responseText) {
xhr.responseXML = toXml(xhr.responseText);
}
try {
data = httpData(xhr, dt, s);
}
catch (err) {
status = 'parsererror';
xhr.error = errMsg = (err || status);
}
}
catch (err) {
log('error caught: ',err);
status = 'error';
xhr.error = errMsg = (err || status);
}
if (xhr.aborted) {
log('upload aborted');
status = null;
}
if (xhr.status) { // we've set xhr.status
status = (xhr.status >= 200 && xhr.status < 300 || xhr.status === 304) ? 'success' : 'error';
}
// ordering of these callbacks/triggers is odd, but that's how $.ajax does it
if (status === 'success') {
if (s.success)
s.success.call(s.context, data, 'success', xhr);
deferred.resolve(xhr.responseText, 'success', xhr);
if (g)
$.event.trigger("ajaxSuccess", [xhr, s]);
}
else if (status) {
if (errMsg === undefined)
errMsg = xhr.statusText;
if (s.error)
s.error.call(s.context, xhr, status, errMsg);
deferred.reject(xhr, 'error', errMsg);
if (g)
$.event.trigger("ajaxError", [xhr, s, errMsg]);
}
if (g)
$.event.trigger("ajaxComplete", [xhr, s]);
if (g && ! --$.active) {
$.event.trigger("ajaxStop");
}
if (s.complete)
s.complete.call(s.context, xhr, status);
callbackProcessed = true;
if (s.timeout)
clearTimeout(timeoutHandle);
// clean up
setTimeout(function() {
if (!s.iframeTarget)
$io.remove();
xhr.responseXML = null;
}, 100);
}
var toXml = $.parseXML || function(s, doc) { // use parseXML if available (jQuery 1.5+)
if (window.ActiveXObject) {
doc = new ActiveXObject('Microsoft.XMLDOM');
doc.async = 'false';
doc.loadXML(s);
}
else {
doc = (new DOMParser()).parseFromString(s, 'text/xml');
}
return (doc && doc.documentElement && doc.documentElement.nodeName != 'parsererror') ? doc : null;
};
var parseJSON = $.parseJSON || function(s) {
/*jslint evil:true */
return window['eval']('(' + s + ')');
};
var httpData = function( xhr, type, s ) { // mostly lifted from jq1.4.4
var ct = xhr.getResponseHeader('content-type') || '',
xml = type === 'xml' || !type && ct.indexOf('xml') >= 0,
data = xml ? xhr.responseXML : xhr.responseText;
if (xml && data.documentElement.nodeName === 'parsererror') {
if ($.error)
$.error('parsererror');
}
if (s && s.dataFilter) {
data = s.dataFilter(data, type);
}
if (typeof data === 'string') {
if (type === 'json' || !type && ct.indexOf('json') >= 0) {
data = parseJSON(data);
} else if (type === "script" || !type && ct.indexOf("javascript") >= 0) {
$.globalEval(data);
}
}
return data;
};
return deferred;
}
};
/**
* ajaxForm() provides a mechanism for fully automating form submission.
*
* The advantages of using this method instead of ajaxSubmit() are:
*
* 1: This method will include coordinates for <input type="image" /> elements (if the element
* is used to submit the form).
* 2. This method will include the submit element's name/value data (for the element that was
* used to submit the form).
* 3. This method binds the submit() method to the form for you.
*
* The options argument for ajaxForm works exactly as it does for ajaxSubmit. ajaxForm merely
* passes the options argument along after properly binding events for submit elements and
* the form itself.
*/
$.fn.ajaxForm = function(options) {
options = options || {};
options.delegation = options.delegation && $.isFunction($.fn.on);
// in jQuery 1.3+ we can fix mistakes with the ready state
if (!options.delegation && this.length === 0) {
var o = { s: this.selector, c: this.context };
if (!$.isReady && o.s) {
log('DOM not ready, queuing ajaxForm');
$(function() {
$(o.s,o.c).ajaxForm(options);
});
return this;
}
// is your DOM ready? http://docs.jquery.com/Tutorials:Introducing_$(document).ready()
log('terminating; zero elements found by selector' + ($.isReady ? '' : ' (DOM not ready)'));
return this;
}
if ( options.delegation ) {
$(document)
.off('submit.form-plugin', this.selector, doAjaxSubmit)
.off('click.form-plugin', this.selector, captureSubmittingElement)
.on('submit.form-plugin', this.selector, options, doAjaxSubmit)
.on('click.form-plugin', this.selector, options, captureSubmittingElement);
return this;
}
return this.ajaxFormUnbind()
.bind('submit.form-plugin', options, doAjaxSubmit)
.bind('click.form-plugin', options, captureSubmittingElement);
};
// private event handlers
function doAjaxSubmit(e) {
/*jshint validthis:true */
var options = e.data;
if (!e.isDefaultPrevented()) { // if event has been canceled, don't proceed
e.preventDefault();
$(this).ajaxSubmit(options);
}
}
function captureSubmittingElement(e) {
/*jshint validthis:true */
var target = e.target;
var $el = $(target);
if (!($el.is("[type=submit],[type=image]"))) {
// is this a child element of the submit el? (ex: a span within a button)
var t = $el.closest('[type=submit]');
if (t.length === 0) {
return;
}
target = t[0];
}
var form = this;
form.clk = target;
if (target.type == 'image') {
if (e.offsetX !== undefined) {
form.clk_x = e.offsetX;
form.clk_y = e.offsetY;
} else if (typeof $.fn.offset == 'function') {
var offset = $el.offset();
form.clk_x = e.pageX - offset.left;
form.clk_y = e.pageY - offset.top;
} else {
form.clk_x = e.pageX - target.offsetLeft;
form.clk_y = e.pageY - target.offsetTop;
}
}
// clear form vars
setTimeout(function() { form.clk = form.clk_x = form.clk_y = null; }, 100);
}
// ajaxFormUnbind unbinds the event handlers that were bound by ajaxForm
$.fn.ajaxFormUnbind = function() {
return this.unbind('submit.form-plugin click.form-plugin');
};
/**
* formToArray() gathers form element data into an array of objects that can
* be passed to any of the following ajax functions: $.get, $.post, or load.
* Each object in the array has both a 'name' and 'value' property. An example of
* an array for a simple login form might be:
*
* [ { name: 'username', value: 'jresig' }, { name: 'password', value: 'secret' } ]
*
* It is this array that is passed to pre-submit callback functions provided to the
* ajaxSubmit() and ajaxForm() methods.
*/
$.fn.formToArray = function(semantic, elements) {
var a = [];
if (this.length === 0) {
return a;
}
var form = this[0];
var els = semantic ? form.getElementsByTagName('*') : form.elements;
if (!els) {
return a;
}
var i,j,n,v,el,max,jmax;
for(i=0, max=els.length; i < max; i++) {
el = els[i];
n = el.name;
if (!n || el.disabled) {
continue;
}
if (semantic && form.clk && el.type == "image") {
// handle image inputs on the fly when semantic == true
if(form.clk == el) {
a.push({name: n, value: $(el).val(), type: el.type });
a.push({name: n+'.x', value: form.clk_x}, {name: n+'.y', value: form.clk_y});
}
continue;
}
v = $.fieldValue(el, true);
if (v && v.constructor == Array) {
if (elements)
elements.push(el);
for(j=0, jmax=v.length; j < jmax; j++) {
a.push({name: n, value: v[j]});
}
}
else if (feature.fileapi && el.type == 'file') {
if (elements)
elements.push(el);
var files = el.files;
if (files.length) {
for (j=0; j < files.length; j++) {
a.push({name: n, value: files[j], type: el.type});
}
}
else {
// #180
a.push({ name: n, value: '', type: el.type });
}
}
else if (v !== null && typeof v != 'undefined') {
if (elements)
elements.push(el);
a.push({name: n, value: v, type: el.type, required: el.required});
}
}
if (!semantic && form.clk) {
// input type=='image' are not found in elements array! handle it here
var $input = $(form.clk), input = $input[0];
n = input.name;
if (n && !input.disabled && input.type == 'image') {
a.push({name: n, value: $input.val()});
a.push({name: n+'.x', value: form.clk_x}, {name: n+'.y', value: form.clk_y});
}
}
return a;
};
/**
* Serializes form data into a 'submittable' string. This method will return a string
* in the format: name1=value1&name2=value2
*/
$.fn.formSerialize = function(semantic) {
//hand off to jQuery.param for proper encoding
return $.param(this.formToArray(semantic));
};
/**
* Serializes all field elements in the jQuery object into a query string.
* This method will return a string in the format: name1=value1&name2=value2
*/
$.fn.fieldSerialize = function(successful) {
var a = [];
this.each(function() {
var n = this.name;
if (!n) {
return;
}
var v = $.fieldValue(this, successful);
if (v && v.constructor == Array) {
for (var i=0,max=v.length; i < max; i++) {
a.push({name: n, value: v[i]});
}
}
else if (v !== null && typeof v != 'undefined') {
a.push({name: this.name, value: v});
}
});
//hand off to jQuery.param for proper encoding
return $.param(a);
};
/**
* Returns the value(s) of the element in the matched set. For example, consider the following form:
*
* <form><fieldset>
* <input name="A" type="text" />
* <input name="A" type="text" />
* <input name="B" type="checkbox" value="B1" />
* <input name="B" type="checkbox" value="B2"/>
* <input name="C" type="radio" value="C1" />
* <input name="C" type="radio" value="C2" />
* </fieldset></form>
*
* var v = $('input[type=text]').fieldValue();
* // if no values are entered into the text inputs
* v == ['','']
* // if values entered into the text inputs are 'foo' and 'bar'
* v == ['foo','bar']
*
* var v = $('input[type=checkbox]').fieldValue();
* // if neither checkbox is checked
* v === undefined
* // if both checkboxes are checked
* v == ['B1', 'B2']
*
* var v = $('input[type=radio]').fieldValue();
* // if neither radio is checked
* v === undefined
* // if first radio is checked
* v == ['C1']
*
* The successful argument controls whether or not the field element must be 'successful'
* (per http://www.w3.org/TR/html4/interact/forms.html#successful-controls).
* The default value of the successful argument is true. If this value is false the value(s)
* for each element is returned.
*
* Note: This method *always* returns an array. If no valid value can be determined the
* array will be empty, otherwise it will contain one or more values.
*/
$.fn.fieldValue = function(successful) {
for (var val=[], i=0, max=this.length; i < max; i++) {
var el = this[i];
var v = $.fieldValue(el, successful);
if (v === null || typeof v == 'undefined' || (v.constructor == Array && !v.length)) {
continue;
}
if (v.constructor == Array)
$.merge(val, v);
else
val.push(v);
}
return val;
};
/**
* Returns the value of the field element.
*/
$.fieldValue = function(el, successful) {
var n = el.name, t = el.type, tag = el.tagName.toLowerCase();
if (successful === undefined) {
successful = true;
}
if (successful && (!n || el.disabled || t == 'reset' || t == 'button' ||
(t == 'checkbox' || t == 'radio') && !el.checked ||
(t == 'submit' || t == 'image') && el.form && el.form.clk != el ||
tag == 'select' && el.selectedIndex == -1)) {
return null;
}
if (tag == 'select') {
var index = el.selectedIndex;
if (index < 0) {
return null;
}
var a = [], ops = el.options;
var one = (t == 'select-one');
var max = (one ? index+1 : ops.length);
for(var i=(one ? index : 0); i < max; i++) {
var op = ops[i];
if (op.selected) {
var v = op.value;
if (!v) { // extra pain for IE...
v = (op.attributes && op.attributes['value'] && !(op.attributes['value'].specified)) ? op.text : op.value;
}
if (one) {
return v;
}
a.push(v);
}
}
return a;
}
return $(el).val();
};
/**
* Clears the form data. Takes the following actions on the form's input fields:
* - input text fields will have their 'value' property set to the empty string
* - select elements will have their 'selectedIndex' property set to -1
* - checkbox and radio inputs will have their 'checked' property set to false
* - inputs of type submit, button, reset, and hidden will *not* be effected
* - button elements will *not* be effected
*/
$.fn.clearForm = function(includeHidden) {
return this.each(function() {
$('input,select,textarea', this).clearFields(includeHidden);
});
};
/**
* Clears the selected form elements.
*/
$.fn.clearFields = $.fn.clearInputs = function(includeHidden) {
var re = /^(?:color|date|datetime|email|month|number|password|range|search|tel|text|time|url|week)$/i; // 'hidden' is not in this list
return this.each(function() {
var t = this.type, tag = this.tagName.toLowerCase();
if (re.test(t) || tag == 'textarea') {
this.value = '';
}
else if (t == 'checkbox' || t == 'radio') {
this.checked = false;
}
else if (tag == 'select') {
this.selectedIndex = -1;
}
else if (t == "file") {
if (/MSIE/.test(navigator.userAgent)) {
$(this).replaceWith($(this).clone(true));
} else {
$(this).val('');
}
}
else if (includeHidden) {
// includeHidden can be the value true, or it can be a selector string
// indicating a special test; for example:
// $('#myForm').clearForm('.special:hidden')
// the above would clean hidden inputs that have the class of 'special'
if ( (includeHidden === true && /hidden/.test(t)) ||
(typeof includeHidden == 'string' && $(this).is(includeHidden)) )
this.value = '';
}
});
};
/**
* Resets the form data. Causes all form elements to be reset to their original value.
*/
$.fn.resetForm = function() {
return this.each(function() {
// guard against an input with the name of 'reset'
// note that IE reports the reset function as an 'object'
if (typeof this.reset == 'function' || (typeof this.reset == 'object' && !this.reset.nodeType)) {
this.reset();
}
});
};
/**
* Enables or disables any matching elements.
*/
$.fn.enable = function(b) {
if (b === undefined) {
b = true;
}
return this.each(function() {
this.disabled = !b;
});
};
/**
* Checks/unchecks any matching checkboxes or radio buttons and
* selects/deselects and matching option elements.
*/
$.fn.selected = function(select) {
if (select === undefined) {
select = true;
}
return this.each(function() {
var t = this.type;
if (t == 'checkbox' || t == 'radio') {
this.checked = select;
}
else if (this.tagName.toLowerCase() == 'option') {
var $sel = $(this).parent('select');
if (select && $sel[0] && $sel[0].type == 'select-one') {
// deselect all other options
$sel.find('option').selected(false);
}
this.selected = select;
}
});
};
// expose debug var
$.fn.ajaxSubmit.debug = false;
// helper fn for console logging
function log() {
if (!$.fn.ajaxSubmit.debug)
return;
var msg = '[jquery.form] ' + Array.prototype.join.call(arguments,'');
if (window.console && window.console.log) {
window.console.log(msg);
}
else if (window.opera && window.opera.postError) {
window.opera.postError(msg);
}
}
})(jQuery);<|fim▁end|>
| |
<|file_name|>SQLiteClient.java<|end_file_name|><|fim▁begin|>package ca.qc.bergeron.marcantoine.crammeur.android.repository.crud.sqlite;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import ca.qc.bergeron.marcantoine.crammeur.librairy.annotations.Entity;
import ca.qc.bergeron.marcantoine.crammeur.librairy.exceptions.KeyException;
import ca.qc.bergeron.marcantoine.crammeur.android.models.Client;
import ca.qc.bergeron.marcantoine.crammeur.android.repository.crud.SQLiteTemplate;
import ca.qc.bergeron.marcantoine.crammeur.librairy.repository.i.Repository;
/**
* Created by Marc-Antoine on 2017-01-11.
*/
public final class SQLiteClient extends SQLiteTemplate<Client,Integer> implements ca.qc.bergeron.marcantoine.crammeur.android.repository.crud.sqlite.i.SQLiteClient {
public SQLiteClient(Repository pRepository, Context context) {
super(Client.class,Integer.class,pRepository, context);
}
@Override
protected Client convertCursorToEntity(@NonNull Cursor pCursor) {
Client o = new Client();
o.Id = pCursor.getInt(pCursor.getColumnIndex(mId.getAnnotation(Entity.Id.class).name()));
o.Name = pCursor.getString(pCursor.getColumnIndex(F_CLIENT_NAME));
o.EMail = pCursor.getString(pCursor.getColumnIndex(F_CLIENT_EMAIL));
return o;
}
@Override
protected Integer convertCursorToId(@NonNull Cursor pCursor) {
Integer result;
result = pCursor.getInt(pCursor.getColumnIndex(mId.getAnnotation(Entity.Id.class).name()));
return result;
}
@Override
public void create() {
mDB.execSQL(CREATE_TABLE_CLIENTS);
}
@NonNull
@Override
public Integer save(@NonNull Client pData) throws KeyException {
ContentValues values = new ContentValues();
try {
if (pData.Id == null) {
pData.Id = this.getKey(pData);
}
values.put(mId.getAnnotation(Entity.Id.class).name(), mKey.cast(mId.get(pData)));
values.put(F_CLIENT_NAME, pData.Name);
values.put(F_CLIENT_EMAIL, pData.EMail);
if (mId.get(pData) == null || !this.contains(mKey.cast(mId.get(pData)))) {
mId.set(pData, (int) mDB.insert(T_CLIENTS, null, values));
} else {
mDB.update(T_CLIENTS, values, mId.getAnnotation(Entity.Id.class).name() + "=?", new String[]{String.valueOf(pData.Id)});
}
return pData.Id;
} catch (IllegalAccessException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
@Nullable
@Override
public Integer getKey(@NonNull Client pEntity) {
Integer result = null;<|fim▁hole|> try {
if (mId.get(pEntity) != null) return (Integer) mId.get(pEntity);
String[] columns = new String[] {F_ID};
String where = "LOWER(" + F_CLIENT_NAME + ")=LOWER(?) AND LOWER(" + F_CLIENT_EMAIL + ")=LOWER(?)";
String[] whereArgs = new String[] {pEntity.Name,pEntity.EMail};
// limit 1 row = "1";
Cursor cursor = mDB.query(T_CLIENTS, columns, where, whereArgs, null, null, null, "1");
if (cursor.moveToFirst()) {
result = cursor.getInt(cursor.getColumnIndex(F_ID));
}
} catch (IllegalAccessException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
return result;
}
}<|fim▁end|>
| |
<|file_name|>create.go<|end_file_name|><|fim▁begin|>package pg
import "strings"
const (
createTmpl = `
// Create%[1]s inserts an entry into DB
func Create%[1]s(db cruderQueryRower, x %[2]s) (*%[2]s, error) {
var y %[2]s
err := db.QueryRow(
` + "`" + `INSERT INTO %s (%s) VALUES (%s)
RETURNING %s` + "`" + `,
%s,
).Scan(%s)
return &y, err
}
`
)
// GenerateCreate generates the Create method for the struct
func (g *PG) GenerateCreate() {
g.GenerateType(typeQueryRowerInterface)
var suffix string
if !g.SkipSuffix {
suffix = g.structModel
}
g.Printf(createTmpl,
suffix,
g.structModel,
g.TableName,<|fim▁hole|> strings.Join(g.placeholderStrings(len(g.writeFieldDBNames(""))), ", "),
strings.Join(g.readFieldDBNames(""), ", "),
strings.Join(g.writeFieldNames("x."), ", "),
strings.Join(g.readFieldNames("&y."), ", "),
)
}<|fim▁end|>
|
strings.Join(g.writeFieldDBNames(""), ", "),
|
<|file_name|>glyph.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use geom::point::Point2D;
use std::cmp::{Ordering, PartialOrd};
use std::iter::repeat;
use std::mem;
use std::num::{ToPrimitive, NumCast};
use std::ops::{Add, Sub, Mul, Neg, Div, Rem, BitAnd, BitOr, BitXor, Shl, Shr, Not};
use std::u16;
use std::vec::Vec;
use util::geometry::Au;
use util::range::{self, Range, RangeIndex, EachIndex};
use util::vec::*;
/// GlyphEntry is a port of Gecko's CompressedGlyph scheme for storing glyph data compactly.
///
/// In the common case (reasonable glyph advances, no offsets from the font em-box, and one glyph
/// per character), we pack glyph advance, glyph id, and some flags into a single u32.
///
/// In the uncommon case (multiple glyphs per unicode character, large glyph index/advance, or
/// glyph offsets), we pack the glyph count into GlyphEntry, and store the other glyph information
/// in DetailedGlyphStore.
#[derive(Clone, Debug, Copy)]
struct GlyphEntry {
value: u32,
}
impl GlyphEntry {
fn new(value: u32) -> GlyphEntry {
GlyphEntry {
value: value,
}
}
fn initial() -> GlyphEntry {
GlyphEntry::new(0)
}
// Creates a GlyphEntry for the common case
fn simple(id: GlyphId, advance: Au) -> GlyphEntry {
assert!(is_simple_glyph_id(id));
assert!(is_simple_advance(advance));
let id_mask = id as u32;
let Au(advance) = advance;
let advance_mask = (advance as u32) << GLYPH_ADVANCE_SHIFT as uint;
GlyphEntry::new(id_mask | advance_mask | FLAG_IS_SIMPLE_GLYPH)
}
// Create a GlyphEntry for uncommon case; should be accompanied by
// initialization of the actual DetailedGlyph data in DetailedGlyphStore
fn complex(starts_cluster: bool, starts_ligature: bool, glyph_count: int) -> GlyphEntry {
assert!(glyph_count <= u16::MAX as int);
debug!("creating complex glyph entry: starts_cluster={}, starts_ligature={}, \
glyph_count={}",
starts_cluster,
starts_ligature,
glyph_count);
let mut val = FLAG_NOT_MISSING;
if !starts_cluster {
val |= FLAG_NOT_CLUSTER_START;
}
if !starts_ligature {
val |= FLAG_NOT_LIGATURE_GROUP_START;
}
val |= (glyph_count as u32) << GLYPH_COUNT_SHIFT as uint;
GlyphEntry::new(val)
}
/// Create a GlyphEntry for the case where glyphs couldn't be found for the specified
/// character.
fn missing(glyph_count: int) -> GlyphEntry {
assert!(glyph_count <= u16::MAX as int);
GlyphEntry::new((glyph_count as u32) << GLYPH_COUNT_SHIFT as uint)
}
}
/// The id of a particular glyph within a font
pub type GlyphId = u32;
// TODO: unify with bit flags?
#[derive(PartialEq, Copy)]
pub enum BreakType {
None,
Normal,
Hyphen,
}
static BREAK_TYPE_NONE: u8 = 0x0;
static BREAK_TYPE_NORMAL: u8 = 0x1;
static BREAK_TYPE_HYPHEN: u8 = 0x2;
fn break_flag_to_enum(flag: u8) -> BreakType {
if (flag & BREAK_TYPE_NORMAL) != 0 {
BreakType::Normal
} else if (flag & BREAK_TYPE_HYPHEN) != 0 {
BreakType::Hyphen
} else {
BreakType::None
}
}
fn break_enum_to_flag(e: BreakType) -> u8 {
match e {
BreakType::None => BREAK_TYPE_NONE,
BreakType::Normal => BREAK_TYPE_NORMAL,
BreakType::Hyphen => BREAK_TYPE_HYPHEN,
}
}
// TODO: make this more type-safe.
static FLAG_CHAR_IS_SPACE: u32 = 0x10000000;
// These two bits store some BREAK_TYPE_* flags
static FLAG_CAN_BREAK_MASK: u32 = 0x60000000;
static FLAG_CAN_BREAK_SHIFT: u32 = 29;
static FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
static GLYPH_ADVANCE_MASK: u32 = 0x0FFF0000;
static GLYPH_ADVANCE_SHIFT: u32 = 16;
static GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char. If the char couldn't
// be mapped to a glyph (!FLAG_NOT_MISSING), then this actually holds
// the UTF8 code point instead.
static GLYPH_COUNT_MASK: u32 = 0x00FFFF00;
static GLYPH_COUNT_SHIFT: u32 = 8;
// N.B. following Gecko, these are all inverted so that a lot of
// missing chars can be memset with zeros in one fell swoop.
static FLAG_NOT_MISSING: u32 = 0x00000001;
static FLAG_NOT_CLUSTER_START: u32 = 0x00000002;
static FLAG_NOT_LIGATURE_GROUP_START: u32 = 0x00000004;
static FLAG_CHAR_IS_TAB: u32 = 0x00000008;
static FLAG_CHAR_IS_NEWLINE: u32 = 0x00000010;
//static FLAG_CHAR_IS_LOW_SURROGATE: u32 = 0x00000020;
//static CHAR_IDENTITY_FLAGS_MASK: u32 = 0x00000038;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
let unsigned_au = advance.to_u32().unwrap();
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT as uint)) == unsigned_au
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
// getter methods
#[inline(always)]
fn advance(&self) -> Au {
NumCast::from((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT as uint).unwrap()
}
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}<|fim▁hole|> }
fn is_cluster_start(&self) -> bool {
self.has_flag(!FLAG_NOT_CLUSTER_START)
}
// True if original char was normal (U+0020) space. Other chars may
// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
fn char_is_tab(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_TAB)
}
fn char_is_newline(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_NEWLINE)
}
fn can_break_before(&self) -> BreakType {
let flag = ((self.value & FLAG_CAN_BREAK_MASK) >> FLAG_CAN_BREAK_SHIFT as uint) as u8;
break_flag_to_enum(flag)
}
// setter methods
#[inline(always)]
fn set_char_is_space(&self) -> GlyphEntry {
GlyphEntry::new(self.value | FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_tab(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_TAB)
}
#[inline(always)]
fn set_char_is_newline(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_NEWLINE)
}
#[inline(always)]
fn set_can_break_before(&self, e: BreakType) -> GlyphEntry {
let flag = (break_enum_to_flag(e) as u32) << FLAG_CAN_BREAK_SHIFT as uint;
GlyphEntry::new(self.value | flag)
}
// helper methods
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
((self.value & GLYPH_COUNT_MASK) >> GLYPH_COUNT_SHIFT as uint) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag) != 0
}
#[inline(always)]
fn adapt_character_flags_of_entry(&self, other: GlyphEntry) -> GlyphEntry {
GlyphEntry { value: self.value | other.value }
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: CharIndex,
// offset into the detailed glyphs buffer
detail_offset: int,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: CharIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len() as int,
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert !arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.push_all(glyphs);
self.lookup_is_sorted = false;
}
fn get_detailed_glyphs_for_entry(&'a self, entry_offset: CharIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as uint) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = (&*self.detail_lookup).binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as uint) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i .. i + count as uint]
}
fn get_detailed_glyph_with_index(&'a self,
entry_offset: CharIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as uint) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.as_slice().binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as uint) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as uint)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records : Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
is_missing: is_missing,
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, CharIndex),
Detail(&'a GlyphStore, CharIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_uint()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_uint()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
is_whitespace: bool,
}
int_range_index! {
#[derive(RustcEncodable)]
#[doc = "An index that refers to a character in a text run. This could \
point to the middle of a glyph."]
struct CharIndex(int)
}
impl<'a> GlyphStore {
// Initializes the glyph store, but doesn't actually shape anything.
// Use the set_glyph, set_glyphs() methods to store glyph data.
pub fn new(length: int, is_whitespace: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: repeat(GlyphEntry::initial()).take(length as uint)
.collect(),
detail_store: DetailedGlyphStore::new(),
is_whitespace: is_whitespace,
}
}
pub fn char_len(&self) -> CharIndex {
CharIndex(self.entry_buffer.len() as int)
}
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
}
/// Adds a single glyph. If `character` is present, this represents a single character;
/// otherwise, this glyph represents multiple characters.
pub fn add_glyph_for_char_index(&mut self,
i: CharIndex,
character: Option<char>,
data: &GlyphData) {
fn glyph_is_compressible(data: &GlyphData) -> bool {
is_simple_glyph_id(data.id)
&& is_simple_advance(data.advance)
&& data.offset == Point2D::zero()
&& data.cluster_start // others are stored in detail buffer
}
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.char_len());
let mut entry = match (data.is_missing, glyph_is_compressible(data)) {
(true, _) => GlyphEntry::missing(1),
(false, true) => GlyphEntry::simple(data.id, data.advance),
(false, false) => {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
}
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_uint()]);
if character == Some(' ') {
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_uint()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.char_len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len() as int;
let first_glyph_data = data_for_glyphs[0];
let entry = match first_glyph_data.is_missing {
true => GlyphEntry::missing(glyph_count),
false => {
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count as uint).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_uint()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_uint()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
pub fn add_nonglyph_for_char_index(&mut self, i: CharIndex, cluster_start: bool, ligature_start: bool) {
assert!(i < self.char_len());
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_uint()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
self.iter_glyphs_for_char_range(&Range::new(i, CharIndex(1)))
}
#[inline]
pub fn iter_glyphs_for_char_range(&'a self, rang: &Range<CharIndex>) -> GlyphIterator<'a> {
if rang.begin() >= self.char_len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if rang.end() > self.char_len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
char_index: rang.begin(),
char_range: rang.each_index(),
glyph_range: None,
}
}
#[inline]
pub fn advance_for_char_range(&self, rang: &Range<CharIndex>) -> Au {
self.iter_glyphs_for_char_range(rang)
.fold(Au(0), |advance, (_, glyph)| advance + glyph.advance())
}
// getter methods
pub fn char_is_space(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_space()
}
pub fn char_is_tab(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_tab()
}
pub fn char_is_newline(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].char_is_newline()
}
pub fn is_ligature_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].is_ligature_start()
}
pub fn is_cluster_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].is_cluster_start()
}
pub fn can_break_before(&self, i: CharIndex) -> BreakType {
assert!(i < self.char_len());
self.entry_buffer[i.to_uint()].can_break_before()
}
// setter methods
pub fn set_char_is_space(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_space();
}
pub fn set_char_is_tab(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_tab();
}
pub fn set_char_is_newline(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_char_is_newline();
}
pub fn set_can_break_before(&mut self, i: CharIndex, t: BreakType) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_uint()];
self.entry_buffer[i.to_uint()] = entry.set_can_break_before(t);
}
pub fn space_count_in_range(&self, range: &Range<CharIndex>) -> u32 {
let mut spaces = 0;
for index in range.each_index() {
if self.char_is_space(index) {
spaces += 1
}
}
spaces
}
pub fn distribute_extra_space_in_range(&mut self, range: &Range<CharIndex>, space: f64) {
debug_assert!(space >= 0.0);
if range.is_empty() {
return
}
for index in range.each_index() {
// TODO(pcwalton): Handle spaces that are detailed glyphs -- these are uncommon but
// possible.
let entry = &mut self.entry_buffer[index.to_uint()];
if entry.is_simple() && entry.char_is_space() {
// FIXME(pcwalton): This can overflow for very large font-sizes.
let advance =
((entry.value & GLYPH_ADVANCE_MASK) >> (GLYPH_ADVANCE_SHIFT as uint)) +
Au::from_frac_px(space).to_u32().unwrap();
entry.value = (entry.value & !GLYPH_ADVANCE_MASK) |
(advance << (GLYPH_ADVANCE_SHIFT as uint));
}
}
}
}
/// An iterator over the glyphs in a character range in a `GlyphStore`.
pub struct GlyphIterator<'a> {
store: &'a GlyphStore,
char_index: CharIndex,
char_range: EachIndex<int, CharIndex>,
glyph_range: Option<EachIndex<int, CharIndex>>,
}
impl<'a> GlyphIterator<'a> {
// Slow path when there is a glyph range.
#[inline(never)]
fn next_glyph_range(&mut self) -> Option<(CharIndex, GlyphInfo<'a>)> {
match self.glyph_range.as_mut().unwrap().next() {
Some(j) => Some((self.char_index,
GlyphInfo::Detail(self.store, self.char_index, j.get() as u16 /* ??? */))),
None => {
// No more glyphs for current character. Try to get another.
self.glyph_range = None;
self.next()
}
}
}
// Slow path when there is a complex glyph.
#[inline(never)]
fn next_complex_glyph(&mut self, entry: &GlyphEntry, i: CharIndex)
-> Option<(CharIndex, GlyphInfo<'a>)> {
let glyphs = self.store.detail_store.get_detailed_glyphs_for_entry(i, entry.glyph_count());
self.glyph_range = Some(range::each_index(CharIndex(0), CharIndex(glyphs.len() as int)));
self.next()
}
}
impl<'a> Iterator for GlyphIterator<'a> {
type Item = (CharIndex, GlyphInfo<'a>);
// I tried to start with something simpler and apply FlatMap, but the
// inability to store free variables in the FlatMap struct was problematic.
//
// This function consists of the fast path and is designed to be inlined into its caller. The
// slow paths, which should not be inlined, are `next_glyph_range()` and
// `next_complex_glyph()`.
#[inline(always)]
fn next(&mut self) -> Option<(CharIndex, GlyphInfo<'a>)> {
// Would use 'match' here but it borrows contents in a way that
// interferes with mutation.
if self.glyph_range.is_some() {
self.next_glyph_range()
} else {
// No glyph range. Look at next character.
self.char_range.next().and_then(|i| {
self.char_index = i;
assert!(i < self.store.char_len());
let entry = self.store.entry_buffer[i.to_uint()];
if entry.is_simple() {
Some((self.char_index, GlyphInfo::Simple(self.store, i)))
} else {
// Fall back to the slow path.
self.next_complex_glyph(&entry, i)
}
})
}
}
}<|fim▁end|>
|
fn is_ligature_start(&self) -> bool {
self.has_flag(!FLAG_NOT_LIGATURE_GROUP_START)
|
<|file_name|>Word.java<|end_file_name|><|fim▁begin|>package eu.fbk.fcw.utils.corpus;
import java.io.Serializable;
/**
* Created by alessio on 12/11/15.
*/
public class Word implements Serializable {
private int id;
private String form;
private String lemma;
private String pos;
private int depParent;
private String depLabel;
private int begin;
private int end;
public static Word readFromArray(String[] parts) {
//todo: better management of possibilities
if (parts.length >= 12) {
return new Word(
Integer.parseInt(parts[0]),
parts[1],
parts[2],
parts[4],
Integer.parseInt(parts[8]),
parts[10]
);
}
return new Word(
Integer.parseInt(parts[0]),
parts[1],
parts[2],
parts[4]
);
}
public Word(int id, String form, String lemma, String pos) {
this.id = id;
this.form = form;
this.lemma = lemma;
this.pos = pos;
}
public Word(int id, String form, String lemma, String pos, int depParent, String depLabel) {
this.id = id;
this.form = form;
this.lemma = lemma;
this.pos = pos;<|fim▁hole|> public String getForm() {
return form;
}
public void setForm(String form) {
this.form = form;
}
public String getLemma() {
return lemma;
}
public void setLemma(String lemma) {
this.lemma = lemma;
}
public String getPos() {
return pos;
}
public void setPos(String pos) {
this.pos = pos;
}
public int getDepParent() {
return depParent;
}
public void setDepParent(int depParent) {
this.depParent = depParent;
}
public String getDepLabel() {
return depLabel;
}
public void setDepLabel(String depLabel) {
this.depLabel = depLabel;
}
public int getId() {
return id;
}
public int getBegin() {
return begin;
}
public void setBegin(int begin) {
this.begin = begin;
}
public int getEnd() {
return end;
}
public void setEnd(int end) {
this.end = end;
}
@Override public String toString() {
return "Word{" +
"id=" + id +
", form='" + form + '\'' +
", lemma='" + lemma + '\'' +
", pos='" + pos + '\'' +
", depParent=" + depParent +
", depLabel='" + depLabel + '\'' +
'}';
}
@Override public boolean equals(Object obj) {
if (obj instanceof Word) {
return ((Word) obj).getId() == id;
}
return super.equals(obj);
}
}<|fim▁end|>
|
this.depParent = depParent;
this.depLabel = depLabel;
}
|
<|file_name|>flatxml2po.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert flat XML files to Gettext PO localization files.<|fim▁hole|>
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class flatxml2po:
"""Convert a single XML file to a single PO file."""
SourceStoreClass = flatxml.FlatXMLFile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.source_store = self.SourceStoreClass(inputfile,
root_name=root,
value_name=value,
key_name=key,
namespace=ns)
self.target_store = self.TargetStoreClass()
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass.buildfromunit(unit)
return target_unit
def convert_store(self):
"""Convert a single source file to a target format file."""
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def run(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Wrapper around the converter."""
return flatxml2po(inputfile, outputfile, templatefile,
root, value, key, ns).run()
formats = {
"xml": ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats,
description=__doc__)
parser.add_option("-r", "--root", action="store", dest="root",
default="root",
help='name of the XML root element (default: "root")')
parser.add_option("-v", "--value", action="store", dest="value",
default="str",
help='name of the XML value element (default: "str")')
parser.add_option("-k", "--key", action="store", dest="key",
default="key",
help='name of the XML key attribute (default: "key")')
parser.add_option("-n", "--namespace", action="store", dest="ns",
default=None,
help="XML namespace uri (default: None)")
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.run(argv)
if __name__ == "__main__":
main()<|fim▁end|>
| |
<|file_name|>helpers.py<|end_file_name|><|fim▁begin|>"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
from routes import url_for
from webhelpers.html import literal
from webhelpers.html.secure_form import secure_form
from webhelpers.html.tags import *
from webhelpers.html.tools import auto_link, mail_to
from webhelpers.text import truncate, chop_at, plural
from webob.exc import strip_tags
from wurdig.lib import auth
from wurdig.lib.comment import *
from wurdig.lib.cookie import *
from wurdig.lib.conf_helper import *
from wurdig.lib.widgets import *
from wurdig.lib.html import *
from wurdig.lib.mdown import *
from wurdig.lib.tag import cloud, post_tags
from wurdig.lib.tidy_helper import *<|fim▁hole|> import os
path = os.path.join(pylons.config['pylons.paths']['static_files'], 'css', '%s')
f = open(path % csslist,'r')
stylesheets = f.read()
f.close()
return ['/css/%s.css?%s' % (f, mtime('/css/%s.css' % f)) for f in stylesheets.split()]<|fim▁end|>
|
from wurdig.lib.utils_helper import *
def load_stylesheet_assets(csslist='FCSSLIST'):
import pylons
|
<|file_name|>product.js<|end_file_name|><|fim▁begin|>export default (sequelize, DataTypes) => {
const Product = sequelize.define('Product', {
name: DataTypes.STRING,
description: DataTypes.TEXT,
price: DataTypes.FLOAT,
releasedate: DataTypes.DATE
}, {<|fim▁hole|> Product.belongsToMany(models.Platform, {through: 'ProductPlatform'});
Product.belongsTo(models.SpecialEdition);
}
}
});
return Product;
};<|fim▁end|>
|
classMethods: {
associate: models => {
Product.belongsToMany(models.Cart, {through: 'ProductCart'});
|
<|file_name|>issue-11925.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let r = {<|fim▁hole|>
drop(r);
}<|fim▁end|>
|
let x = ~42;
let f = proc() &x; //~ ERROR: `x` does not live long enough
f()
};
|
<|file_name|>issue-48414.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:issue-48414.rs
// ICE when resolving paths for a trait that linked to another trait, when both were in an external
// crate
#![crate_name = "base"]
<|fim▁hole|>extern crate issue_48414;
#[doc(inline)]
pub use issue_48414::{SomeTrait, OtherTrait};<|fim▁end|>
| |
<|file_name|>molecular_weights.py<|end_file_name|><|fim▁begin|>from sofia.step import Step
class MolecularWeights(Step):
"""
Reads in a set of molecular weights. The file of molecular weights can be obtained from
http://emboss.sourceforge.net/.
"""
<|fim▁hole|> OUT = ['molecular_weight_set']
def run(self, molecular_weight_file):
infile = open(molecular_weight_file[0], encoding='utf-8')
data = infile.read()
infile.close()
interface = {}
for line in data.split('\n'):
if line.strip() == '' or line[0] == '#' or line.startswith('Mol'):
continue
parts = line.split()
interface[parts[0]] = {'avg': float(parts[1]), 'mono': float(parts[2])}
yield interface<|fim▁end|>
|
IN = ['molecular_weight_file']
|
<|file_name|>runner.py<|end_file_name|><|fim▁begin|># docker-pipeline
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dan Leehr
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from docker.client import Client
from docker.utils import kwargs_from_env
class Runner():
def __init__(self, pipeline=None):
self.pipeline = pipeline
self.client = Runner.get_client()
self.remove_containers = False
self.result = None
@classmethod
def get_client(cls):
# Using boot2docker instructions for now
# http://docker-py.readthedocs.org/en/latest/boot2docker/
# Workaround for requests.exceptions.SSLError: hostname '192.168.59.103' doesn't match 'boot2docker'
client = Client(version='auto', **kwargs_from_env(assert_hostname=False))
return client
def run(self):
if self.pipeline.debug:
print "Running pipeline: {}".format(self)
for each_step in self.pipeline.steps:
if self.pipeline.pull_images:
self.pull_image(each_step)
container = self.create_container(each_step)
self.start_container(container, each_step)
self.result = self.get_result(container, each_step)
self.finish_container(container, each_step)
if self.result['code'] != 0:
# Container exited with nonzero status code
print "Error: step exited with code {}".format(self.result['code'])
# Pipeline breaks if nonzero result is encountered
break
if self.pipeline.debug:
print 'Result: {}'.format(self.result)
def pull_image(self, step):
if self.pipeline.debug:
print 'Pulling image for step: {}'.format(step)
image_result = self.client.pull(step.image)
if self.pipeline.debug:
print image_result
def create_container(self, step):
if self.pipeline.debug:
print 'Creating container for step: {}'.format(step)
print 'Image: {}'.format(step.image)
print 'Volumes: {}'.format(step.get_volumes())
print 'Environment: {}'.format(step.environment)
container = self.client.create_container(step.image,
command=step.command,
environment=step.environment,
volumes=step.get_volumes())
return container
def start_container(self, container, step):
if self.pipeline.debug:
print 'Running container for step {}'.format(step)
print 'Binds: {}'.format(step.binds)
# client.start does not return anything
self.client.start(container, binds=step.binds)
def get_result(self, container, step):
logs = self.client.attach(container, stream=True, logs=True)<|fim▁hole|> all_logs = all_logs + log
print log,
# Store the return value
code = self.client.wait(container)
result['code'] = code
return result
def finish_container(self, container, step):
if self.pipeline.debug:
print 'Cleaning up container for step {}'.format(step)
if self.remove_containers:
self.client.remove_container(container)<|fim▁end|>
|
result = {'image': step.image}
print 'step: {}\nimage: {}\n==============='.format(step.name, step.image)
all_logs = str()
for log in logs:
|
<|file_name|>en.js<|end_file_name|><|fim▁begin|><|fim▁hole|>/*
Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'justify', 'en', {
block: 'Justify',
center: 'Center',
left: 'Align Left',
right: 'Align Right'
} );<|fim▁end|>
| |
<|file_name|>GeoNetworkRecordSpec.js<|end_file_name|><|fim▁begin|>/*
* Copyright 2013 IMOS
*<|fim▁hole|>
var record;
beforeEach(function() {
record = new Portal.data.GeoNetworkRecord({
abstract: 'the abstract',
links: [
{
href: 'http://geoserver.imos.org.au/geoserver/wms',
name: 'imos:radar_stations',
protocol: 'OGC:WMS-1.1.1-http-get-map',
title: 'ACORN Radar Stations',
type: 'application/vnd.ogc.wms_xml'
},
{
href: 'http://geonetwork.imos.org.au/1234',
name: 'imos:radar_stations',
protocol: 'WWW:LINK-1.0-http--metadata-URL',
title: 'ACORN Radar Stations',
type: 'text/html'
}
],
title: 'the layer title',
wmsLayer: {
server: {
uri: "server_url"
},
params: {
LAYERS: 'layer name',
CQL_FILTER: 'cql_filter'
},
someUnusedField: 'la la la'
}
});
});
describe('wms link', function() {
it('has wms link', function() {
record.get('links')[0].protocol = 'OGC:WMS-1.1.1-http-get-map';
expect(record.hasWmsLink()).toBeTruthy();
});
it('does not have wms link', function() {
record.get('links')[0].protocol = 'some protocol';
expect(record.hasWmsLink()).toBeFalsy();
});
it('does not have any links', function() {
record.set('links', undefined);
expect(record.hasWmsLink()).toBeFalsy();
});
it('get first wms link', function() {
record.get('links')[0].protocol = 'OGC:WMS-1.1.1-http-get-map';
var link = record.getFirstWmsLink();
expect(link.server.uri).toBe('http://geoserver.imos.org.au/geoserver/wms');
expect(link.protocol).toBe('OGC:WMS-1.1.1-http-get-map');
});
});
});<|fim▁end|>
|
* The AODN/IMOS Portal is distributed under the terms of the GNU General Public License
*
*/
describe('Portal.data.GeoNetworkRecord', function() {
|
<|file_name|>generator.py<|end_file_name|><|fim▁begin|># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from datetime import date
from ggrc import db
from ggrc import builder
from ggrc_workflows.models import (Workflow, TaskGroup, TaskGroupTask,
TaskGroupObject, Cycle)
from tests.ggrc.generator import Generator
import random
import copy
class WorkflowsGenerator(Generator):
def generate_workflow(self, data={}):
""" create a workflow with dict data
return: wf if it was created, or response otherwise
"""
obj_name = "workflow"
data = copy.deepcopy(data)
tgs = data.pop("task_groups", [])
wf = Workflow(title="wf " + self.random_str())
obj_dict = self.obj_to_dict(wf, obj_name)
obj_dict[obj_name].update(data)
response, workflow = self.generate(Workflow, obj_name, obj_dict)
for tg in tgs:
self.generate_task_group(workflow, tg)
return response, workflow
def generate_task_group(self, workflow=None, data={}):
if not workflow:
_, workflow = self.generate_workflow()
data = copy.deepcopy(data)
tgts = data.pop("task_group_tasks", [])<|fim▁hole|> workflow = self._session_add(workflow)
tg = TaskGroup(
title="tg " + self.random_str(),
workflow_id=workflow.id,
context_id=workflow.context.id,
contact_id=1
)
obj_dict = self.obj_to_dict(tg, obj_name)
obj_dict[obj_name].update(data)
response, task_group = self.generate(TaskGroup, obj_name, obj_dict)
for tgt in tgts:
self.generate_task_group_task(task_group, tgt)
for tgo in tgos:
self.generate_task_group_object(task_group, tgo)
return response, task_group
def generate_task_group_task(self, task_group=None, data={}):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
default_start = self.random_date()
default_end = self.random_date(default_start, date.today())
day_range = 5 if task_group.workflow.frequency == "weekly" else 31
obj_name = "task_group_task"
tgt = TaskGroupTask(
task_group_id=task_group.id,
context_id=task_group.context.id,
title="tgt " + self.random_str(),
start_date=default_start,
end_date=default_end,
relative_start_day=random.randrange(1, day_range),
relative_start_month=random.randrange(1, 12),
relative_end_day=random.randrange(1, day_range),
relative_end_month=random.randrange(1, 12),
contact_id=1
)
obj_dict = self.obj_to_dict(tgt, obj_name)
obj_dict[obj_name].update(data)
return self.generate(TaskGroupTask, obj_name, obj_dict)
def generate_task_group_object(self, task_group=None, obj=None):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
obj = self._session_add(obj)
obj_name = "task_group_object"
tgo = TaskGroupObject(
object_id=obj.id,
object=obj,
task_group_id=task_group.id,
context_id=task_group.context.id
)
obj_dict = self.obj_to_dict(tgo, obj_name)
return self.generate(TaskGroupObject, obj_name, obj_dict)
def generate_cycle(self, workflow=None):
if not workflow:
_, workflow = self.generate_workflow()
workflow = self._session_add(workflow) # this should be nicer
obj_name = "cycle"
obj_dict = {
obj_name: {
"workflow": {
"id": workflow.id,
"type": workflow.__class__.__name__,
"href": "/api/workflows/%d" % workflow.id
},
"context": {
"id": workflow.context.id,
"type": workflow.context.__class__.__name__,
"href": "/api/workflows/%d" % workflow.context.id
},
"autogenerate": "true"
}
}
return self.generate(Cycle, obj_name, obj_dict)
def activate_workflow(self, workflow):
workflow = self._session_add(workflow)
return self.modify_workflow(workflow, {
"status": "Active",
"recurrences": workflow.frequency != "one_time"
})
def modify_workflow(self, wf=None, data={}):
if not wf:
_, wf = self.generate_workflow()
wf = self._session_add(wf)
obj_name = "workflow"
obj_dict = builder.json.publish(wf)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
default = {obj_name: obj_dict}
response, workflow = self.modify(wf, obj_name, default)
return response, workflow
def modify_object(self, obj, data={}):
obj = self._session_add(obj)
obj_name = obj._inflector.table_singular
obj_dict = builder.json.publish(obj)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
obj_data = {obj_name: obj_dict}
response, generated_object = self.modify(obj, obj_name, obj_data)
return response, generated_object
def _session_add(self, obj):
""" Sometimes tests throw conflicting state present error."""
try:
db.session.add(obj)
return obj
except:
return obj.__class__.query.get(obj.id)<|fim▁end|>
|
tgos = data.pop("task_group_objects", [])
obj_name = "task_group"
|
<|file_name|>publishconf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
DEVELOP = False
SITEURL = 'http://blog.feltnerm.com'
FEED_DOMAIN = SITEURL
FEED_ATOM = 'feeds/atom.xml'
FEED_RSS = 'feeds/rss.xml'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
FEED_ALL_RSS = 'feeds/all.rss.xml'
RELATIVE_URLS = False
DELETE_OUTPUT_DIRECTORY = True<|fim▁hole|># Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""<|fim▁end|>
| |
<|file_name|>WaveRefTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.waveprotocol.wave.model.waveref;
import junit.framework.TestCase;
import org.waveprotocol.wave.model.id.WaveId;
import org.waveprotocol.wave.model.id.WaveletId;
/**
* Unit tests for {@link WaveRef}
*
* @author [email protected] <Edwina Mead>
*/
public class WaveRefTest extends TestCase {
public void testBasicEquals() {
WaveRef first = WaveRef.of(WaveId.of("example.com", "w+1234abcd"));
WaveRef second = WaveRef.of(WaveId.of("example.com", "w+1234abcd"));
WaveRef different = WaveRef.of(WaveId.of("test.com", "w+1234abcd"));
assertFalse(first.equals(null));
assertTrue(first.equals(first));
assertTrue(first.equals(second));
assertFalse(first.equals(different));
}
<|fim▁hole|> WaveletId.of("example.com", "conv+root"));
WaveRef third = WaveRef.of(WaveId.of("example.com", "w+1234abcd"),
WaveletId.of("example.com", "conv+root"),
"b+12345");
assertTrue(second.equals(second));
assertTrue(third.equals(third));
assertFalse(first.equals(second));
assertFalse(first.equals(third));
assertFalse(second.equals(third));
}
public void testEqualsWithDifferentWaveIdSameOtherFields() {
WaveRef first = WaveRef.of(WaveId.of("test.com", "w+1234"),
WaveletId.of("example.com", "conv+root"),
"b+12345");
WaveRef second = WaveRef.of(WaveId.of("example.com", "w+1234"),
WaveletId.of("example.com", "conv+root"),
"b+12345");
assertFalse(first.equals(second));
}
public void testHashCode() {
WaveRef first = WaveRef.of(WaveId.of("example.com", "w+1234"));
WaveRef second = WaveRef.of(WaveId.of("example.com", "w+1234"),
WaveletId.of("example.com", "conv+root"));
WaveRef third = WaveRef.of(WaveId.of("example.com", "w+1234"),
WaveletId.of("example.com", "conv+root"), "b+12345");
WaveRef sameAsFirst = WaveRef.of(WaveId.of("example.com", "w+1234"));
WaveRef sameAsThird = WaveRef.of(WaveId.of("example.com", "w+1234"),
WaveletId.of("example.com", "conv+root"), "b+12345");
assertEquals(first.hashCode(), sameAsFirst.hashCode());
assertEquals(third.hashCode(), sameAsThird.hashCode());
assertFalse(first.hashCode() == second.hashCode());
assertFalse(first.hashCode() == third.hashCode());
assertFalse(second.hashCode() == third.hashCode());
}
}<|fim▁end|>
|
public void testEqualsWithSameWaveIdDifferentOtherFields() {
WaveRef first = WaveRef.of(WaveId.of("example.com", "w+1234abcd"));
WaveRef second = WaveRef.of(WaveId.of("example.com", "w+1234abcd"),
|
<|file_name|>patterngenerator.py<|end_file_name|><|fim▁begin|>"""
PatternGenerator abstract class, basic example concrete class, and
multichannel support.
PatternGenerators support both single-channel patterns, i.e. bare
arrays, and multiple channels, such as for color images. See
``PatternGenerator.__call__`` and ``PatternGenerator.channels`` for
more information.
"""
import numpy as np
from numpy import pi
import collections
import param
from param.parameterized import ParamOverrides
from holoviews import HoloMap, Image, RGB, Dimension
from holoviews.core import BoundingBox, BoundingRegionParameter, SheetCoordinateSystem
from .transferfn import TransferFn
# CEBALERT: PatternGenerator has become a bit of a monster abstract
# class. Can it be split into the minimum required to specify the
# interface, with a subclass implementing the rest (this subclass
# still being above the rest of the PatternGenerators)? We want to
# make it easy to add new types of PatternGenerator that don't match
# the assumptions of the current ones (OneDPowerSpectrum is an example
# of a PG that doesn't match the current assumptions), but still lets
# them be used like the current ones.
# (PatternGenerator-->TwoDPatternGenerator?)
# JLALERT: PatternGenerator should have
# override_plasticity_state/restore_plasticity_state functions which
# can override the plasticity of any output_fn that has state, in case
# anyone ever uses such an object in a PatternGenerator. Will also
# need to support Composite patterns.
class PatternGenerator(param.Parameterized):
"""
A class hierarchy for callable objects that can generate 2D patterns.
Once initialized, PatternGenerators can be called to generate a
value or a matrix of values from a 2D function, typically
accepting at least x and y.
A PatternGenerator's Parameters can make use of Parameter's
precedence attribute to specify the order in which they should
appear, e.g. in a GUI. The precedence attribute has a nominal
range of 0.0 to 1.0, with ordering going from 0.0 (first) to 1.0
(last), but any value is allowed.
The orientation and layout of the pattern matrices is defined by
the SheetCoordinateSystem class, which see.
Note that not every parameter defined for a PatternGenerator will
be used by every subclass. For instance, a Constant pattern will
ignore the x, y, orientation, and size parameters, because the
pattern does not vary with any of those parameters. However,
those parameters are still defined for all PatternGenerators, even
Constant patterns, to allow PatternGenerators to be scaled, rotated,
translated, etc. uniformly.
"""
__abstract = True
bounds = BoundingRegionParameter(
default=BoundingBox(points=((-0.5,-0.5), (0.5,0.5))),precedence=-1,
doc="BoundingBox of the area in which the pattern is generated.")
xdensity = param.Number(default=256,bounds=(0,None),precedence=-1,doc="""
Density (number of samples per 1.0 length) in the x direction.""")
ydensity = param.Number(default=256,bounds=(0,None),precedence=-1,doc="""
Density (number of samples per 1.0 length) in the y direction.
Typically the same as the xdensity.""")
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.20,doc="""
X-coordinate location of pattern center.""")
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.21,doc="""
Y-coordinate location of pattern center.""")
z = param.ClassSelector(default=None, precedence=-1, class_=Dimension, doc="""
The Dimension object associated with the z-values generated by
the PatternGenerator . If None, uses the default set by
HoloViews.Image.""")
group = param.String(default='Pattern', precedence=-1, doc="""
The group name assigned to the returned HoloViews object.""")
position = param.Composite(attribs=['x','y'],precedence=-1,doc="""
Coordinates of location of pattern center.
Provides a convenient way to set the x and y parameters together
as a tuple (x,y), but shares the same actual storage as x and y
(and thus only position OR x and y need to be specified).""")
orientation = param.Number(default=0.0,softbounds=(0.0,2*pi),precedence=0.40,doc="""
Polar angle of pattern, i.e., the orientation in the Cartesian coordinate
system, with zero at 3 o'clock and increasing counterclockwise.""")
size = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,6.0),
precedence=0.30,doc="""Determines the overall size of the pattern.""")
scale = param.Number(default=1.0,softbounds=(0.0,2.0),precedence=0.10,doc="""
Multiplicative strength of input pattern, defaulting to 1.0""")
offset = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.11,doc="""
Additive offset to input pattern, defaulting to 0.0""")
mask = param.Parameter(default=None,precedence=-1,doc="""
Optional object (expected to be an array) with which to multiply the
pattern array after it has been created, before any output_fns are
applied. This can be used to shape the pattern.""")
# Note that the class type is overridden to PatternGenerator below
mask_shape = param.ClassSelector(param.Parameterized,default=None,precedence=0.06,doc="""
Optional PatternGenerator used to construct a mask to be applied to
the pattern.""")
output_fns = param.HookList(default=[], precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
def __init__(self,**params):
super(PatternGenerator, self).__init__(**params)
self.set_matrix_dimensions(self.bounds, self.xdensity, self.ydensity)
def __call__(self,**params_to_override):
"""
Call the subclass's 'function' method on a rotated and scaled
coordinate system.
Creates and fills an array with the requested pattern. If
called without any params, uses the values for the Parameters
as currently set on the object. Otherwise, any params
specified override those currently set on the object.
"""
if 'output_fns' in params_to_override:
self.warning("Output functions specified through the call method will be ignored.")
p=ParamOverrides(self,params_to_override)
<|fim▁hole|> # position=params_to_override.get('position',None) if position
# is not None: x,y = position
self._setup_xy(p.bounds,p.xdensity,p.ydensity,p.x,p.y,p.orientation)
fn_result = self.function(p)
self._apply_mask(p,fn_result)
if p.scale != 1.0:
result = p.scale * fn_result
else:
result = fn_result
if p.offset != 0.0:
result += p.offset
for of in p.output_fns:
of(result)
return result
def __getitem__(self, coords):
value_dims = {}
if self.num_channels() in [0, 1]:
raster, data = Image, self()
value_dims = {'value_dimensions':[self.z]} if self.z else value_dims
elif self.num_channels() in [3,4]:
raster = RGB
data = np.dstack(self.channels().values()[1:])
image = raster(data, bounds=self.bounds,
**dict(group=self.group,
label=self.__class__.__name__, **value_dims))
# Works round a bug fixed shortly after HoloViews 1.0.0 release
return image if isinstance(coords, slice) else image.__getitem__(coords)
def channels(self, use_cached=False, **params_to_override):
"""
Channels() adds a shared interface for single channel and
multichannel structures. It will always return an ordered
dict: its first element is the single channel of the pattern
(if single-channel) or the channel average (if multichannel);
the successive elements are the individual channels' arrays
(key: 0,1,..N-1).
"""
return collections.OrderedDict({ 'default':self.__call__(**params_to_override) })
def num_channels(self):
"""
Query the number of channels implemented by the
PatternGenerator. In case of single-channel generators this
will return 1; in case of multichannel, it will return the
number of channels (eg, in the case of RGB images it would
return '3', Red-Green-Blue, even though the OrderedDict
returned by channels() will have 4 elements -- the 3 channels
+ their average).
"""
return 1
def _setup_xy(self,bounds,xdensity,ydensity,x,y,orientation):
"""
Produce pattern coordinate matrices from the bounds and
density (or rows and cols), and transforms them according to
x, y, and orientation.
"""
self.debug("bounds=%s, xdensity=%s, ydensity=%s, x=%s, y=%s, orientation=%s",bounds,xdensity,ydensity,x,y,orientation)
# Generate vectors representing coordinates at which the pattern
# will be sampled.
# CB: note to myself - use slice_._scs if supplied?
x_points,y_points = SheetCoordinateSystem(bounds,xdensity,ydensity).sheetcoordinates_of_matrixidx()
# Generate matrices of x and y sheet coordinates at which to
# sample pattern, at the correct orientation
self.pattern_x, self.pattern_y = self._create_and_rotate_coordinate_arrays(x_points-x,y_points-y,orientation)
def function(self,p):
"""
Function to draw a pattern that will then be scaled and rotated.
Instead of implementing __call__ directly, PatternGenerator
subclasses will typically implement this helper function used
by __call__, because that way they can let __call__ handle the
scaling and rotation for them. Alternatively, __call__ itself
can be reimplemented entirely by a subclass (e.g. if it does
not need to do any scaling or rotation), in which case this
function will be ignored.
"""
raise NotImplementedError
def _create_and_rotate_coordinate_arrays(self, x, y, orientation):
"""
Create pattern matrices from x and y vectors, and rotate them
to the specified orientation.
"""
# Using this two-liner requires that x increase from left to
# right and y decrease from left to right; I don't think it
# can be rewritten in so little code otherwise - but please
# prove me wrong.
pattern_y = np.subtract.outer(np.cos(orientation)*y, np.sin(orientation)*x)
pattern_x = np.add.outer(np.sin(orientation)*y, np.cos(orientation)*x)
return pattern_x, pattern_y
def _apply_mask(self,p,mat):
"""Create (if necessary) and apply the mask to the given matrix mat."""
mask = p.mask
ms=p.mask_shape
if ms is not None:
mask = ms(x=p.x+p.size*(ms.x*np.cos(p.orientation)-ms.y*np.sin(p.orientation)),
y=p.y+p.size*(ms.x*np.sin(p.orientation)+ms.y*np.cos(p.orientation)),
orientation=ms.orientation+p.orientation,size=ms.size*p.size,
bounds=p.bounds,ydensity=p.ydensity,xdensity=p.xdensity)
if mask is not None:
mat*=mask
def set_matrix_dimensions(self, bounds, xdensity, ydensity):
"""
Change the dimensions of the matrix into which the pattern
will be drawn. Users of this class should call this method
rather than changing the bounds, xdensity, and ydensity
parameters directly. Subclasses can override this method to
update any internal data structures that may depend on the
matrix dimensions.
"""
self.bounds = bounds
self.xdensity = xdensity
self.ydensity = ydensity
scs = SheetCoordinateSystem(bounds, xdensity, ydensity)
for of in self.output_fns:
if isinstance(of, TransferFn):
of.initialize(SCS=scs, shape=scs.shape)
def state_push(self):
"Save the state of the output functions, to be restored with state_pop."
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
super(PatternGenerator, self).state_push()
def state_pop(self):
"Restore the state of the output functions saved by state_push."
for of in self.output_fns:
if hasattr(of,'state_pop'):
of.state_pop()
super(PatternGenerator, self).state_pop()
def anim(self, duration, offset=0, timestep=1,
label=None, unit=None,
time_fn=param.Dynamic.time_fn):
"""
duration: The temporal duration to animate in the units
defined on the global time function.
offset: The temporal offset from which the animation is
generated given the supplied pattern
timestep: The time interval between successive frames. The
duration must be an exact multiple of the timestep.
label: A label string to override the label of the global time
function (if not None).
unit: The unit string to override the unit value of the global
time function (if not None).
time_fn: The global time function object that is shared across
the time-varying objects that are being sampled.
Note that the offset, timestep and time_fn only affect
patterns parameterized by time-dependent number
generators. Otherwise, the frames are generated by successive
call to the pattern which may or may not be varying (e.g to
view the patterns contained within a Selector).
"""
frames = (duration // timestep) + 1
if duration % timestep != 0:
raise ValueError("The duration value must be an exact multiple of the timestep.")
if label is None:
label = time_fn.label if hasattr(time_fn, 'label') else 'Time'
unit = time_fn.unit if (not unit and hasattr(time_fn, 'unit')) else unit
vmap = HoloMap(kdims=[Dimension(label, unit=unit if unit else '')])
self.state_push()
with time_fn as t:
t(offset)
for i in range(frames):
vmap[t()] = self[:]
t += timestep
self.state_pop()
return vmap
## Support for compositional expressions of PatternGenerator objects
def _promote(self,other):
if not isinstance(other,PatternGenerator):
other = Constant(scale=other,offset=0)
return [self,other]
def _rpromote(self,other):
if not isinstance(other,PatternGenerator):
other = Constant(scale=other,offset=0)
return [other,self]
# Could define any of Python's operators here, esp. if they have operator or ufunc equivalents
def __add__ (self,other): return Composite(generators=self._promote(other),operator=np.add)
def __sub__ (self,other): return Composite(generators=self._promote(other),operator=np.subtract)
def __mul__ (self,other): return Composite(generators=self._promote(other),operator=np.multiply)
def __mod__ (self,other): return Composite(generators=self._promote(other),operator=np.mod)
def __pow__ (self,other): return Composite(generators=self._promote(other),operator=np.power)
def __div__ (self,other): return Composite(generators=self._promote(other),operator=np.divide)
def __and__ (self,other): return Composite(generators=self._promote(other),operator=np.minimum)
def __or__ (self,other): return Composite(generators=self._promote(other),operator=np.maximum)
def __radd__ (self,other): return Composite(generators=self._rpromote(other),operator=np.add)
def __rsub__ (self,other): return Composite(generators=self._rpromote(other),operator=np.subtract)
def __rmul__ (self,other): return Composite(generators=self._rpromote(other),operator=np.multiply)
def __rmod__ (self,other): return Composite(generators=self._rpromote(other),operator=np.mod)
def __rpow__ (self,other): return Composite(generators=self._rpromote(other),operator=np.power)
def __rdiv__ (self,other): return Composite(generators=self._rpromote(other),operator=np.divide)
def __rand__ (self,other): return Composite(generators=self._rpromote(other),operator=np.minimum)
def __ror__ (self,other): return Composite(generators=self._rpromote(other),operator=np.maximum)
def __neg__ (self): return Composite(generators=[Constant(scale=0),self],operator=np.subtract)
class abs_first(object):
@staticmethod
def reduce(x): return np.abs(x[0])
def __abs__ (self): return Composite(generators=[self],operator=self.abs_first)
def pil(self, **params_to_override):
"""Returns a PIL image for this pattern, overriding parameters if provided."""
from PIL.Image import fromarray
nchans = self.num_channels()
if nchans in [0, 1]:
mode, arr = None, self(**params_to_override)
arr = (255.0 / arr.max() * (arr - arr.min())).astype(np.uint8)
elif nchans in [3,4]:
mode = 'RGB' if nchans==3 else 'RGBA'
arr = np.dstack(self.channels(**params_to_override).values()[1:])
arr = (255.0*arr).astype(np.uint8)
else:
raise ValueError("Unsupported number of channels")
return fromarray(arr, mode)
# Override class type; must be set here rather than when mask_shape is declared,
# to avoid referring to class not yet constructed
PatternGenerator.params('mask_shape').class_=PatternGenerator
# Trivial example of a PatternGenerator, provided for when a default is
# needed. The other concrete PatternGenerator classes are stored
# elsewhere, to be imported as needed.
class Constant(PatternGenerator):
"""Constant pattern generator, i.e., a solid, uniform field of the same value."""
# The orientation is ignored, so we don't show it in
# auto-generated lists of parameters (e.g. in the GUI)
orientation = param.Number(precedence=-1)
# Optimization: We use a simpler __call__ method here to skip the
# coordinate transformations (which would have no effect anyway)
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
shape = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
result = p.scale*np.ones(shape, np.float)+p.offset
self._apply_mask(p,result)
for of in p.output_fns:
of(result)
return result
class CompositeBase(PatternGenerator):
"""
PatternGenerator that combines or selects from a list of other
PatternGenerators.
"""
__abstract=True
generators = param.List(class_=PatternGenerator,default=[Constant(scale=0.0)],
bounds=(1,None),precedence=0.97, doc="""
List of patterns to combine or select from. The default pattern is a blank pattern,
and thus should be overridden for any useful work.""")
size = param.Number(default=1.0,doc="""Scaling factor applied to all sub-patterns.""")
class Composite(CompositeBase):
"""
PatternGenerator that accepts a list of other PatternGenerators.
To create a new pattern, asks each of the PatternGenerators in the
list to create a pattern, then it combines the patterns to create
a single pattern that it returns.
"""
# The Accum_Replace operator from LISSOM is not yet supported,
# but it should be added once PatternGenerator bounding boxes
# are respected and/or GenericImage patterns support transparency.
operator = param.Parameter(np.maximum,precedence=0.98,doc="""
Binary Numpy function used to combine the individual patterns.
Any binary Numpy array "ufunc" returning the same
type of array as the operands and supporting the reduce
operator is allowed here. Supported ufuncs include::
add
subtract
multiply
divide
maximum
minimum
remainder
power
The most useful ones are probably add and maximum, but there
are uses for at least some of the others as well (e.g. to
remove pieces of other patterns).
You can also write your own operators, by making a class that
has a static method named "reduce" that returns an array of the
same size and type as the arrays in the list. For example::
class return_first(object):
@staticmethod
def reduce(x):
return x[0]
""")
def _advance_pattern_generators(self,p):
"""
Subclasses can override this method to provide constraints on
the values of generators' parameters and/or eliminate
generators from this list if necessary.
"""
return p.generators
def state_push(self):
"""
Push the state of all generators
"""
super(Composite,self).state_push()
for gen in self.generators:
gen.state_push()
def state_pop(self):
"""
Pop the state of all generators
"""
super(Composite,self).state_pop()
for gen in self.generators:
gen.state_pop()
# JABALERT: To support large numbers of patterns on a large input region,
# should be changed to evaluate each pattern in a small box, and then
# combine them at the full Composite Bounding box size.
def function(self,p):
"""Constructs combined pattern out of the individual ones."""
generators = self._advance_pattern_generators(p)
assert hasattr(p.operator,'reduce'),repr(p.operator)+" does not support 'reduce'."
# CEBALERT: mask gets applied by all PGs including the Composite itself
# (leads to redundant calculations in current lissom_oo_or usage, but
# will lead to problems/limitations in the future).
patterns = [pg(xdensity=p.xdensity,ydensity=p.ydensity,
bounds=p.bounds,mask=p.mask,
x=p.x+p.size*(pg.x*np.cos(p.orientation)- pg.y*np.sin(p.orientation)),
y=p.y+p.size*(pg.x*np.sin(p.orientation)+ pg.y*np.cos(p.orientation)),
orientation=pg.orientation+p.orientation,
size=pg.size*p.size)
for pg in generators]
image_array = p.operator.reduce(patterns)
return image_array
class ChannelTransform(param.Parameterized):
"""
A ChannelTransform is a callable object that takes channels as
input (an ordered dictionary of arrays) and transforms their
contents in some way before returning them.
"""
__abstract = True
def __call__(self, channels):
raise NotImplementedError
# Example of a ChannelTransform
class CorrelateChannels(ChannelTransform):
"""
Correlate channels by mixing a fraction of one channel into another.
"""
from_channel = param.Number(default=1, doc="""
Name of the channel to take data from.""")
to_channel = param.Number(default=2, doc="""
Name of the channel to change data of.""")
strength = param.Number(default=0, doc="""
Strength of the correlation to add, with 0 being no change,
and 1.0 overwriting to_channel with from_channel.""")
def __call__(self, channel_data):
channel_data[self.to_channel] = \
self.strength*channel_data[self.from_channel] + \
(1-self.strength)*channel_data[self.to_channel]
return channel_data
class ChannelGenerator(PatternGenerator):
"""
Abstract base class for patterns supporting multiple channels natively.
"""
__abstract = True
channel_transforms = param.HookList(class_=ChannelTransform,default=[],doc="""
Optional functions to apply post processing to the set of channels.""")
def __init__(self, **params):
self._original_channel_data = [] # channel data before processing
self._channel_data = [] # channel data after processing
super(ChannelGenerator, self).__init__(**params)
def channels(self, use_cached=False, **params_to_override):
res = collections.OrderedDict()
if not use_cached:
default = self(**params_to_override)
res['default'] = default
else:
res['default'] = None
for i in range(len(self._channel_data)):
res[i] = self._channel_data[i]
return res
def num_channels(self):
return len(self._channel_data)
class ComposeChannels(ChannelGenerator):
"""
Create a multi-channel PatternGenerator from a list of
PatternGenerators, with the specified channel_transforms applied.
"""
generators = param.List(class_=PatternGenerator,default=[Constant(scale=0.0)],
bounds=(1,None), doc="""
List of patterns to use for each channel. Generators which already have more than one
channel will only contribute to a single channel of ComposeChannels.""")
def __init__(self,**params):
super(ComposeChannels,self).__init__(**params)
for i in range(len(self.generators)):
self._channel_data.append( None )
def __call__(self,**params):
# Generates all channels, then returns the default channel
p = param.ParamOverrides(self,params)
params['xdensity']=p.xdensity
params['ydensity']=p.ydensity
params['bounds']=p.bounds
# (not **p)
for i in range(len(p.generators)):
self._channel_data[i] = p.generators[i]( **params )
for c in self.channel_transforms:
self._channel_data = c(self._channel_data)
return sum(act for act in self._channel_data)/len(self._channel_data)<|fim▁end|>
|
# CEBERRORALERT: position parameter is not currently
# supported. We should delete the position parameter or fix
# this.
#
|
<|file_name|>IniWriter.java<|end_file_name|><|fim▁begin|>/*
* #%L
* OME SCIFIO package for reading and converting scientific file formats.
* %%
* Copyright (C) 2005 - 2012 Open Microscopy Environment:
* - Board of Regents of the University of Wisconsin-Madison
* - Glencoe Software, Inc.
* - University of Dundee
* %%
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of any organization.
* #L%<|fim▁hole|>
package loci.common;
import java.io.IOException;
/**
* A legacy delegator class for ome.scifio.common.IniWriter.
*
* <dl><dt><b>Source code:</b></dt>
* <dd><a href="http://trac.openmicroscopy.org.uk/ome/browser/bioformats.git/components/common/src/loci/common/IniWriter.java">Trac</a>,
* <a href="http://git.openmicroscopy.org/?p=bioformats.git;a=blob;f=components/common/src/loci/common/IniWriter.java;hb=HEAD">Gitweb</a></dd></dl>
*
* @author Melissa Linkert melissa at glencoesoftware.com
*/
public class IniWriter {
// -- Fields --
private ome.scifio.common.IniWriter writer;
// -- Constructor --
public IniWriter() {
writer = new ome.scifio.common.IniWriter();
}
// -- IniWriter API methods --
/**
* Saves the given IniList to the given file.
* If the given file already exists, then the IniList will be appended.
*/
public void saveINI(IniList ini, String path) throws IOException {
writer.saveINI(ini.list, path);
}
/** Saves the given IniList to the given file. */
public void saveINI(IniList ini, String path, boolean append)
throws IOException
{
writer.saveINI(ini.list, path, append);
}
// -- Object delegators --
@Override
public boolean equals(Object obj) {
return writer.equals(obj);
}
@Override
public int hashCode() {
return writer.hashCode();
}
@Override
public String toString() {
return writer.toString();
}
}<|fim▁end|>
|
*/
|
<|file_name|>test_netapp_e_auditlog.py<|end_file_name|><|fim▁begin|># (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}<|fim▁hole|> audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()<|fim▁end|>
|
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
|
<|file_name|>test_indicatortools_Tempo__to_markup.py<|end_file_name|><|fim▁begin|># -*- encoding: utf -*-
from abjad import *
def test_indicatortools_Tempo__to_markup_01():
tempo = Tempo(Duration(1, 4), 60)
markup = tempo._to_markup()<|fim▁hole|> markup,
r'''
\markup {
\fontsize
#-6
\general-align
#Y
#DOWN
\note-by-number
#2
#0
#1
\upright
{
=
60
}
}
'''
), format(markup)
def test_indicatortools_Tempo__to_markup_02():
tempo = Tempo(Duration(3, 8), 60)
markup = tempo._to_markup()
assert systemtools.TestManager.compare(
markup,
r'''
\markup {
\fontsize
#-6
\general-align
#Y
#DOWN
\note-by-number
#3
#1
#1
\upright
{
=
60
}
}
'''
), format(markup)<|fim▁end|>
|
assert systemtools.TestManager.compare(
|
<|file_name|>Gruntfile.js<|end_file_name|><|fim▁begin|>Famono.scope('famous/surfaces/Gruntfile', ["load-grunt-tasks","time-grunt"], function(require, define) {
define(function() {
/*global module:false*/
/*Generated initially from grunt-init, heavily inspired by yo webapp*/
module.exports = function(grunt) {
'use strict';
// Load grunt tasks automatically
require('load-grunt-tasks')(grunt);
// Time how long tasks take. Can help when optimizing build times
require('time-grunt')(grunt);
// Project configuration.
grunt.initConfig({
eslint: {
options: {
config: '.eslintrc'
},
target: ['*.js']
},
jscs: {
src: ['*.js'],
options: {
config: '.jscsrc'
}
}
});
grunt.registerTask('test', [
'jscs',
'eslint'
]);
grunt.registerTask('default', [
'test'
]);
};
});<|fim▁hole|><|fim▁end|>
|
});
|
<|file_name|>blog_tags.py<|end_file_name|><|fim▁begin|>from ..models import Post, Category, Tag
from django.db.models.aggregates import Count
from django import template
register = template.Library()
# 最近文章
@register.simple_tag
def get_recent_posts(num=9):
return Post.objects.all().order_by('-modified_time')[:num]
# 按月归档
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
# 分类归档
@register.simple_tag
def get_categories():
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
<|fim▁hole|># 标签云
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)<|fim▁end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.