file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
dbstore_test.go | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"bytes"
"errors"
"io/ioutil"
"os"
"strings"
"testing"
)
var ErrInvalidArraySize = errors.New("invalid byte array size")
var ErrInvalidValuePersisted = errors.New("invalid value was persisted to the db")
type SerializingType struct {
key string
value string
}
func (st *SerializingType) MarshalBinary() (data []byte, err error) {
d := []byte(strings.Join([]string{st.key, st.value}, ";"))
return d, nil
}
func (st *SerializingType) UnmarshalBinary(data []byte) (err error) {
d := bytes.Split(data, []byte(";"))
l := len(d)
if l == 0 {
return ErrInvalidArraySize
}
if l == 2 {
keyLen := len(d[0]) | valLen := len(d[1])
st.value = string(d[1][:valLen])
}
return nil
}
// TestDBStore tests basic functionality of DBStore.
func TestDBStore(t *testing.T) {
dir, err := ioutil.TempDir("", "db_store_test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
store, err := NewDBStore(dir)
if err != nil {
t.Fatal(err)
}
testStore(t, store)
store.Close()
persistedStore, err := NewDBStore(dir)
if err != nil {
t.Fatal(err)
}
defer persistedStore.Close()
testPersistedStore(t, persistedStore)
}
func testStore(t *testing.T, store Store) {
ser := &SerializingType{key: "key1", value: "value1"}
jsonify := []string{"a", "b", "c"}
err := store.Put(ser.key, ser)
if err != nil {
t.Fatal(err)
}
err = store.Put("key2", jsonify)
if err != nil {
t.Fatal(err)
}
}
func testPersistedStore(t *testing.T, store Store) {
ser := &SerializingType{}
err := store.Get("key1", ser)
if err != nil {
t.Fatal(err)
}
if ser.key != "key1" || ser.value != "value1" {
t.Fatal(ErrInvalidValuePersisted)
}
as := []string{}
err = store.Get("key2", &as)
if err != nil {
t.Fatal(err)
}
if len(as) != 3 {
t.Fatalf("serialized array did not match expectation")
}
if as[0] != "a" || as[1] != "b" || as[2] != "c" {
t.Fatalf("elements serialized did not match expected values")
}
} | st.key = string(d[0][:keyLen])
|
tasks.py | from __future__ import absolute_import
import json
import logging
from datetime import datetime
from threading import Thread
from tornado import web
from tornado import gen
from tornado.escape import json_decode
from tornado.web import HTTPError
from celery import states
from celery.result import AsyncResult
from celery.contrib.abortable import AbortableAsyncResult
from celery.backends.base import DisabledBackend
from ..utils import tasks
from ..views import BaseHandler
from ..utils.broker import Broker
from ..api.control import ControlHandler
logger = logging.getLogger(__name__)
class BaseTaskHandler(BaseHandler):
def get_task_args(self):
try:
body = self.request.body
options = json_decode(body) if body else {}
except ValueError as e:
raise HTTPError(400, str(e))
args = options.pop('args', [])
kwargs = options.pop('kwargs', {})
if not isinstance(args, (list, tuple)):
raise HTTPError(400, 'args must be an array')
return args, kwargs, options
@staticmethod
def backend_configured(result):
return not isinstance(result.backend, DisabledBackend)
def write_error(self, status_code, **kwargs):
self.set_status(status_code)
def update_response_result(self, response, result):
if result.state == states.FAILURE:
response.update({'result': self.safe_result(result.result),
'traceback': result.traceback})
else:
response.update({'result': self.safe_result(result.result)})
def normalize_options(self, options):
if 'eta' in options:
options['eta'] = datetime.strptime(options['eta'],
self.DATE_FORMAT)
if 'countdown' in options:
options['countdown'] = float(options['countdown'])
if 'expires' in options:
expires = options['expires']
try:
expires = float(expires)
except ValueError:
expires = datetime.strptime(expires, self.DATE_FORMAT)
options['expires'] = expires
def safe_result(self, result):
"returns json encodable result"
try:
json.dumps(result)
except TypeError:
return repr(result)
else:
return result
class TaskApply(BaseTaskHandler):
@web.authenticated
@web.asynchronous
def post(self, taskname):
"""
Execute a task by name and wait results
**Example request**:
.. sourcecode:: http
POST /api/task/apply/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
{
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6",
"result": 3
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
# In tornado for not blocking event loop we must return results
# from other thread by self.finish()
th = Thread(target=self.wait_results, args=(result, response, ))
th.start()
# So just exit
def wait_results(self, result, response):
# Wait until task finished and do not raise anything
result.get(propagate=False)
# Write results and finish async function
self.update_response_result(response, result)
if self.backend_configured(result):
response.update(state=result.state)
self.finish(response)
class TaskAsyncApply(BaseTaskHandler):
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
@web.authenticated
def post(self, taskname):
"""
Execute a task
**Example request**:
.. sourcecode:: http
POST /api/task/async-apply/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
Date: Sun, 13 Apr 2014 15:55:00 GMT
{
"state": "PENDING",
"task-id": "abc300c7-2922-4069-97b6-a635cc2ac47c"
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:query options: a dictionary of `apply_async` keyword arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskSend(BaseTaskHandler):
@web.authenticated
def post(self, taskname):
"""
Execute a task by name (doesn't require task sources)
**Example request**:
.. sourcecode:: http
POST /api/task/send-task/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
{
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6"
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking task '%s' with '%s' and '%s'",
taskname, args, kwargs)
result = self.capp.send_task(
taskname, args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskResult(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
"""
Get a task result
**Example request**:
.. sourcecode:: http
GET /api/task/result/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 84
Content-Type: application/json; charset=UTF-8
{
"result": 3,
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6"
}
:query timeout: how long to wait, in seconds, before the operation times out
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 503: result backend is not configured
"""
timeout = self.get_argument('timeout', None)
timeout = float(timeout) if timeout is not None else None
result = AsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
response = {'task-id': taskid, 'state': result.state}
if timeout:
result.get(timeout=timeout, propagate=False)
self.update_response_result(response, result)
elif result.ready():
self.update_response_result(response, result)
self.write(response)
class TaskAbort(BaseTaskHandler):
@web.authenticated
def post(self, taskid):
"""
Abort a running task
**Example request**:
.. sourcecode:: http
POST /api/task/abort/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 61
Content-Type: application/json; charset=UTF-8
{
"message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'"
}
:reqheader Authorization: optional OAuth token to authenticate | logger.info("Aborting task '%s'", taskid)
result = AbortableAsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
result.abort()
self.write(dict(message="Aborted '%s'" % taskid))
class GetQueueLengths(BaseTaskHandler):
@web.authenticated
@gen.coroutine
def get(self):
app = self.application
broker_options = self.capp.conf.BROKER_TRANSPORT_OPTIONS
http_api = None
if app.transport == 'amqp' and app.options.broker_api:
http_api = app.options.broker_api
broker = Broker(app.capp.connection().as_uri(include_password=True),
http_api=http_api, broker_options=broker_options)
queue_names = ControlHandler.get_active_queue_names()
if not queue_names:
queue_names = set([self.capp.conf.CELERY_DEFAULT_QUEUE])
queues = yield broker.queues(sorted(queue_names))
self.write({'active_queues': queues})
class ListTasks(BaseTaskHandler):
@web.authenticated
def get(self):
"""
List tasks
**Example request**:
.. sourcecode:: http
GET /api/tasks HTTP/1.1
Host: localhost:5555
User-Agent: HTTPie/0.8.0
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 1109
Content-Type: application/json; charset=UTF-8
Etag: "b2478118015c8b825f7b88ce6b660e5449746c37"
Server: TornadoServer/3.1.1
{
"e42ceb2d-8730-47b5-8b4d-8e0d2a1ef7c9": {
"args": "[3, 4]",
"client": null,
"clock": 1079,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1398505411.107885,
"result": "'7'",
"retried": null,
"retries": 0,
"revoked": null,
"routing_key": null,
"runtime": 0.01610181899741292,
"sent": null,
"started": 1398505411.108985,
"state": "SUCCESS",
"succeeded": 1398505411.124802,
"timestamp": 1398505411.124802,
"traceback": null,
"uuid": "e42ceb2d-8730-47b5-8b4d-8e0d2a1ef7c9"
},
"f67ea225-ae9e-42a8-90b0-5de0b24507e0": {
"args": "[1, 2]",
"client": null,
"clock": 1042,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1398505395.327208,
"result": "'3'",
"retried": null,
"retries": 0,
"revoked": null,
"routing_key": null,
"runtime": 0.012884548006695695,
"sent": null,
"started": 1398505395.3289,
"state": "SUCCESS",
"succeeded": 1398505395.341089,
"timestamp": 1398505395.341089,
"traceback": null,
"uuid": "f67ea225-ae9e-42a8-90b0-5de0b24507e0"
}
}
:query limit: maximum number of tasks
:query workername: filter task by workername
:query taskname: filter tasks by taskname
:query state: filter tasks by state
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
"""
app = self.application
limit = self.get_argument('limit', None)
worker = self.get_argument('workername', None)
type = self.get_argument('taskname', None)
state = self.get_argument('state', None)
limit = limit and int(limit)
worker = worker if worker != 'All' else None
type = type if type != 'All' else None
state = state if state != 'All' else None
result = []
for task_id, task in tasks.iter_tasks(
app.events, limit=limit, type=type,
worker=worker, state=state):
task = tasks.as_dict(task)
task.pop('worker', None)
result.append((task_id, task))
self.write(dict(result))
class ListTaskTypes(BaseTaskHandler):
@web.authenticated
def get(self):
"""
List (seen) task types
**Example request**:
.. sourcecode:: http
GET /api/task/types HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 44
Content-Type: application/json; charset=UTF-8
{
"task-types": [
"tasks.add",
"tasks.sleep"
]
}
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
"""
seen_task_types = self.application.events.state.task_types()
response = {}
response['task-types'] = seen_task_types
self.write(response)
class TaskInfo(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
"""
Get a task info
**Example request**:
.. sourcecode:: http
GET /api/task/info/91396550-c228-4111-9da4-9d88cfd5ddc6 HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate, compress
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 575
Content-Type: application/json; charset=UTF-8
{
"args": "[2, 2]",
"client": null,
"clock": 25,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1400806241.970742,
"result": "'4'",
"retried": null,
"retries": null,
"revoked": null,
"routing_key": null,
"runtime": 2.0037889280356467,
"sent": null,
"started": 1400806241.972624,
"state": "SUCCESS",
"succeeded": 1400806243.975336,
"task-id": "91396550-c228-4111-9da4-9d88cfd5ddc6",
"timestamp": 1400806243.975336,
"traceback": null,
"worker": "celery@worker1"
}
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
task = tasks.get_task_by_id(self.application.events, taskid)
if not task:
raise HTTPError(404, "Unknown task '%s'" % taskid)
response = {}
for name in task._fields:
if name not in ['uuid', 'worker']:
response[name] = getattr(task, name, None)
response['task-id'] = task.uuid
if task.worker is not None:
response['worker'] = task.worker.hostname
self.write(response) | :statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 503: result backend is not configured
""" |
sigfig.test.ts | import * as fc from 'fast-check';
import { expect, test } from 'vitest';
import sigfig from '~/index.js';
test('rounds correctly', () => { | const num1 = '3.10194';
const num1Expected = [
[1, '3'],
[2, '3.1'],
[3, '3.10'],
[4, '3.102'],
[5, '3.1019'],
[6, '3.10194'],
[7, '3.101940'],
[8, '3.1019400'],
] as const;
for (const [numSigfigs, result] of num1Expected) {
expect(sigfig(num1, numSigfigs)).toEqual(result);
expect(sigfig(result)).toEqual(numSigfigs);
}
const num2 = '.1509';
const num2Expected = [
[1, '0.2'],
[2, '0.15'],
[3, '0.151'],
[4, '0.1509'],
[5, '0.15090'],
[6, '0.150900'],
] as const;
for (const [numSigfigs, result] of num2Expected) {
expect(sigfig(num2, numSigfigs)).toEqual(result);
expect(sigfig(result)).toEqual(numSigfigs);
}
for (const num of ['429', '429.']) {
expect(sigfig(num)).toEqual(3);
expect(sigfig(num, 1)).toEqual('400');
expect(sigfig(num, 2)).toEqual('430');
expect(sigfig(num, 3)).toEqual('429');
expect(sigfig(num, 4)).toEqual('429.0');
expect(sigfig(num, 5)).toEqual('429.00');
}
expect(sigfig('0')).toEqual(1);
expect(sigfig('0.')).toEqual(1);
expect(sigfig('.0')).toEqual(1);
expect(sigfig('0.0')).toEqual(1);
expect(sigfig('0.00')).toEqual(2);
expect(sigfig('0', 1)).toEqual('0');
expect(sigfig('0', 2)).toEqual('0.00');
expect(sigfig('0.00000000001')).toEqual(1);
expect(sigfig('0.09500000000000008', 1)).toEqual('0.1');
expect(sigfig('0.000109', 2)).toEqual('0.00011');
expect(sigfig('1.0430812624750985e-7', 15)).toEqual(
'0.000000104308126247510'
);
expect(sigfig('0.04760919500000005', 7)).toEqual('0.04760920');
expect(sigfig('0.9500000029802322', 1)).toEqual('1');
expect(sigfig('9.9', 1)).toEqual('10');
expect(() => sigfig(Number.NaN, 1)).toThrow();
expect(() => sigfig(1, Number.NaN)).toThrow();
expect(() => sigfig(Number.NaN, Number.NaN)).toThrow();
expect(() => sigfig('not a number', 1)).toThrow();
expect(() => sigfig('42', 0)).toThrow();
expect(() => sigfig('not a number', 0)).toThrow();
expect(() => sigfig('42', 1.3)).toThrow();
});
test('fast check', () => {
fc.assert(
fc.property(
fc.double(),
fc.integer().filter((n) => n > 0 && n < 100_000),
(number, numSigfigs) => {
expect(sigfig(sigfig(number, numSigfigs))).toEqual(numSigfigs);
}
)
);
});
test('fast check 2', () => {
// Correctly rounds positive integers
fc.assert(
fc.property(
fc.integer().filter((n) => n >= 1),
fc.integer().filter((n) => n > 0 && n < 100),
(integer, numSigfigs) => {
if (numSigfigs > String(integer).length) {
expect(sigfig(integer, numSigfigs).length).toEqual(
numSigfigs + 1 // +1 accounts for decimal place
);
} else {
expect(sigfig(integer, numSigfigs).length).toEqual(
String(integer).length
);
}
}
)
);
}); | |
main.rs | //! In persuit of a quicker release, the current plan is to only support turtle documents stored on
//! ipfs. That obvates this program.
extern crate alloc;
extern crate core;
mod ipfs_client_resolver;
mod parse;
mod resolve;
mod rm_to_om;
use ipfs_api::IpfsClient;
use resolve::Resolve;
use structopt::StructOpt;
#[derive(StructOpt)]
struct | {
#[structopt(required = true)]
iri: Vec<String>,
}
#[tokio::main]
async fn main() {
let mut resolver = IpfsClient::default();
for iri in Args::from_args().iri {
let doc = resolver.lookup(&iri).await.unwrap();
let graph = parse::into_rdf(&doc, "text/turtle; charset=utf-8").unwrap();
dbg!(graph);
}
}
| Args |
py_serde.rs | use num_traits::cast::ToPrimitive;
use num_traits::sign::Signed;
use serde::de::{DeserializeSeed, Visitor};
use serde::ser::{Serialize, SerializeMap, SerializeSeq};
use crate::builtins::{dict::PyDictRef, float, int, list::PyList, pybool, tuple::PyTuple, PyStr};
use crate::{PyObject, PyObjectRef, TypeProtocol, VirtualMachine};
#[inline]
pub fn serialize<S>(
vm: &VirtualMachine,
pyobject: &PyObject,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
PyObjectSerializer { pyobject, vm }.serialize(serializer)
}
#[inline]
pub fn deserialize<'de, D>(
vm: &'de VirtualMachine,
deserializer: D,
) -> Result<<PyObjectDeserializer as DeserializeSeed>::Value, D::Error>
where
D: serde::Deserializer<'de>,
{
PyObjectDeserializer { vm }.deserialize(deserializer)
}
// We need to have a VM available to serialise a PyObject based on its subclass, so we implement
// PyObject serialisation via a proxy object which holds a reference to a VM
pub struct PyObjectSerializer<'s> {
pyobject: &'s PyObject,
vm: &'s VirtualMachine,
}
impl<'s> PyObjectSerializer<'s> {
pub fn new(vm: &'s VirtualMachine, pyobject: &'s PyObjectRef) -> Self {
PyObjectSerializer { pyobject, vm }
}
fn clone_with_object(&self, pyobject: &'s PyObjectRef) -> PyObjectSerializer {
PyObjectSerializer {
pyobject,
vm: self.vm,
}
}
}
impl<'s> serde::Serialize for PyObjectSerializer<'s> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let serialize_seq_elements =
|serializer: S, elements: &[PyObjectRef]| -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(elements.len()))?;
for e in elements.iter() {
seq.serialize_element(&self.clone_with_object(e))?;
}
seq.end()
};
if let Some(s) = self.pyobject.payload::<PyStr>() {
serializer.serialize_str(s.as_ref())
} else if self.pyobject.isinstance(&self.vm.ctx.types.float_type) {
serializer.serialize_f64(float::get_value(self.pyobject))
} else if self.pyobject.isinstance(&self.vm.ctx.types.bool_type) {
serializer.serialize_bool(pybool::get_value(self.pyobject))
} else if self.pyobject.isinstance(&self.vm.ctx.types.int_type) {
let v = int::get_value(self.pyobject);
let int_too_large = || serde::ser::Error::custom("int too large to serialize");
// TODO: serialize BigInt when it does not fit into i64
// BigInt implements serialization to a tuple of sign and a list of u32s,
// eg. -1 is [-1, [1]], 0 is [0, []], 12345678900000654321 is [1, [2710766577,2874452364]]
// CPython serializes big ints as long decimal integer literals
if v.is_positive() {
serializer.serialize_u64(v.to_u64().ok_or_else(int_too_large)?)
} else {
serializer.serialize_i64(v.to_i64().ok_or_else(int_too_large)?)
}
} else if let Some(list) = self.pyobject.payload_if_subclass::<PyList>(self.vm) {
serialize_seq_elements(serializer, &list.borrow_vec())
} else if let Some(tuple) = self.pyobject.payload_if_subclass::<PyTuple>(self.vm) {
serialize_seq_elements(serializer, tuple.as_slice())
} else if self.pyobject.isinstance(&self.vm.ctx.types.dict_type) {
let dict: PyDictRef = self.pyobject.to_owned().downcast().unwrap();
let pairs: Vec<_> = dict.into_iter().collect();
let mut map = serializer.serialize_map(Some(pairs.len()))?;
for (key, e) in pairs.iter() {
map.serialize_entry(&self.clone_with_object(key), &self.clone_with_object(e))?;
}
map.end()
} else if self.vm.is_none(self.pyobject) {
serializer.serialize_none()
} else {
Err(serde::ser::Error::custom(format!(
"Object of type '{}' is not serializable",
self.pyobject.class()
)))
}
}
}
// This object is used as the seed for deserialization so we have access to the PyContext for type
// creation
#[derive(Clone)]
pub struct PyObjectDeserializer<'c> {
vm: &'c VirtualMachine,
}
impl<'c> PyObjectDeserializer<'c> {
pub fn new(vm: &'c VirtualMachine) -> Self {
PyObjectDeserializer { vm }
}
}
impl<'de> DeserializeSeed<'de> for PyObjectDeserializer<'de> {
type Value = PyObjectRef;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_any(self.clone())
}
}
impl<'de> Visitor<'de> for PyObjectDeserializer<'de> {
type Value = PyObjectRef;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a type that can deserialise in Python")
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.new_bool(value).into())
}
// Other signed integers delegate to this method by default, it’s the only one needed
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.new_int(value).into())
}
// Other unsigned integers delegate to this method by default, it’s the only one needed
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.new_int(value).into())
}
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.new_float(value).into())
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
// Owned value needed anyway, delegate to visit_string
self.visit_string(value.to_owned())
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.new_str(value).into())
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(self.vm.ctx.none())
}
fn visi | self, mut access: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut seq = Vec::with_capacity(access.size_hint().unwrap_or(0));
while let Some(value) = access.next_element_seed(self.clone())? {
seq.push(value);
}
Ok(self.vm.ctx.new_list(seq).into())
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: serde::de::MapAccess<'de>,
{
let dict = self.vm.ctx.new_dict();
// Although JSON keys must be strings, implementation accepts any keys
// and can be reused by other deserializers without such limit
while let Some((key_obj, value)) = access.next_entry_seed(self.clone(), self.clone())? {
dict.set_item(key_obj, value, self.vm).unwrap();
}
Ok(dict.into())
}
}
| t_seq<A>( |
enterprisePolicy.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package powerplatform
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Definition of the EnterprisePolicy.
// API Version: 2020-10-30-preview.
type EnterprisePolicy struct {
pulumi.CustomResourceState
// The encryption settings for a configuration store.
Encryption PropertiesResponseEncryptionPtrOutput `pulumi:"encryption"`
// The identity of the EnterprisePolicy.
Identity EnterprisePolicyIdentityResponsePtrOutput `pulumi:"identity"`
// The geo-location where the resource lives
Location pulumi.StringOutput `pulumi:"location"`
// Settings concerning lockbox.
Lockbox PropertiesResponseLockboxPtrOutput `pulumi:"lockbox"`
// The name of the resource
Name pulumi.StringOutput `pulumi:"name"`
// Settings concerning network injection.
NetworkInjection PropertiesResponseNetworkInjectionPtrOutput `pulumi:"networkInjection"`
// Metadata pertaining to creation and last modification of the resource.
SystemData SystemDataResponseOutput `pulumi:"systemData"`
// Resource tags.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type pulumi.StringOutput `pulumi:"type"`
}
// NewEnterprisePolicy registers a new resource with the given unique name, arguments, and options.
func NewEnterprisePolicy(ctx *pulumi.Context,
name string, args *EnterprisePolicyArgs, opts ...pulumi.ResourceOption) (*EnterprisePolicy, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:powerplatform:EnterprisePolicy"),
},
{
Type: pulumi.String("azure-native:powerplatform/v20201030preview:EnterprisePolicy"),
},
{
Type: pulumi.String("azure-nextgen:powerplatform/v20201030preview:EnterprisePolicy"),
},
})
opts = append(opts, aliases)
var resource EnterprisePolicy
err := ctx.RegisterResource("azure-native:powerplatform:EnterprisePolicy", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetEnterprisePolicy gets an existing EnterprisePolicy resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetEnterprisePolicy(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *EnterprisePolicyState, opts ...pulumi.ResourceOption) (*EnterprisePolicy, error) |
// Input properties used for looking up and filtering EnterprisePolicy resources.
type enterprisePolicyState struct {
// The encryption settings for a configuration store.
Encryption *PropertiesResponseEncryption `pulumi:"encryption"`
// The identity of the EnterprisePolicy.
Identity *EnterprisePolicyIdentityResponse `pulumi:"identity"`
// The geo-location where the resource lives
Location *string `pulumi:"location"`
// Settings concerning lockbox.
Lockbox *PropertiesResponseLockbox `pulumi:"lockbox"`
// The name of the resource
Name *string `pulumi:"name"`
// Settings concerning network injection.
NetworkInjection *PropertiesResponseNetworkInjection `pulumi:"networkInjection"`
// Metadata pertaining to creation and last modification of the resource.
SystemData *SystemDataResponse `pulumi:"systemData"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type *string `pulumi:"type"`
}
type EnterprisePolicyState struct {
// The encryption settings for a configuration store.
Encryption PropertiesResponseEncryptionPtrInput
// The identity of the EnterprisePolicy.
Identity EnterprisePolicyIdentityResponsePtrInput
// The geo-location where the resource lives
Location pulumi.StringPtrInput
// Settings concerning lockbox.
Lockbox PropertiesResponseLockboxPtrInput
// The name of the resource
Name pulumi.StringPtrInput
// Settings concerning network injection.
NetworkInjection PropertiesResponseNetworkInjectionPtrInput
// Metadata pertaining to creation and last modification of the resource.
SystemData SystemDataResponsePtrInput
// Resource tags.
Tags pulumi.StringMapInput
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type pulumi.StringPtrInput
}
func (EnterprisePolicyState) ElementType() reflect.Type {
return reflect.TypeOf((*enterprisePolicyState)(nil)).Elem()
}
type enterprisePolicyArgs struct {
// The encryption settings for a configuration store.
Encryption *PropertiesEncryption `pulumi:"encryption"`
// Name of the EnterprisePolicy.
EnterprisePolicyName *string `pulumi:"enterprisePolicyName"`
// The identity of the EnterprisePolicy.
Identity *EnterprisePolicyIdentity `pulumi:"identity"`
// The geo-location where the resource lives
Location *string `pulumi:"location"`
// Settings concerning lockbox.
Lockbox *PropertiesLockbox `pulumi:"lockbox"`
// Settings concerning network injection.
NetworkInjection *PropertiesNetworkInjection `pulumi:"networkInjection"`
// The name of the resource group. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a EnterprisePolicy resource.
type EnterprisePolicyArgs struct {
// The encryption settings for a configuration store.
Encryption PropertiesEncryptionPtrInput
// Name of the EnterprisePolicy.
EnterprisePolicyName pulumi.StringPtrInput
// The identity of the EnterprisePolicy.
Identity EnterprisePolicyIdentityPtrInput
// The geo-location where the resource lives
Location pulumi.StringPtrInput
// Settings concerning lockbox.
Lockbox PropertiesLockboxPtrInput
// Settings concerning network injection.
NetworkInjection PropertiesNetworkInjectionPtrInput
// The name of the resource group. The name is case insensitive.
ResourceGroupName pulumi.StringInput
// Resource tags.
Tags pulumi.StringMapInput
}
func (EnterprisePolicyArgs) ElementType() reflect.Type {
return reflect.TypeOf((*enterprisePolicyArgs)(nil)).Elem()
}
type EnterprisePolicyInput interface {
pulumi.Input
ToEnterprisePolicyOutput() EnterprisePolicyOutput
ToEnterprisePolicyOutputWithContext(ctx context.Context) EnterprisePolicyOutput
}
func (*EnterprisePolicy) ElementType() reflect.Type {
return reflect.TypeOf((*EnterprisePolicy)(nil))
}
func (i *EnterprisePolicy) ToEnterprisePolicyOutput() EnterprisePolicyOutput {
return i.ToEnterprisePolicyOutputWithContext(context.Background())
}
func (i *EnterprisePolicy) ToEnterprisePolicyOutputWithContext(ctx context.Context) EnterprisePolicyOutput {
return pulumi.ToOutputWithContext(ctx, i).(EnterprisePolicyOutput)
}
type EnterprisePolicyOutput struct {
*pulumi.OutputState
}
func (EnterprisePolicyOutput) ElementType() reflect.Type {
return reflect.TypeOf((*EnterprisePolicy)(nil))
}
func (o EnterprisePolicyOutput) ToEnterprisePolicyOutput() EnterprisePolicyOutput {
return o
}
func (o EnterprisePolicyOutput) ToEnterprisePolicyOutputWithContext(ctx context.Context) EnterprisePolicyOutput {
return o
}
func init() {
pulumi.RegisterOutputType(EnterprisePolicyOutput{})
}
| {
var resource EnterprisePolicy
err := ctx.ReadResource("azure-native:powerplatform:EnterprisePolicy", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
index.js | import React, { PureComponent } from 'react';
import {
Animated,
View,
PanResponder,
StyleSheet,
} from 'react-native';
import PropTypes from 'prop-types';
import Color from 'color';
import styled from '@ui/styled'; | marginHorizontal: theme.sizing.baseUnit,
overflow: 'visible',
paddingHorizontal: theme.sizing.baseUnit / 2,
paddingVertical: (theme.sizing.baseUnit / 2) - (theme.sizing.borderRadius / 2),
}), 'Seeker.Container')(View);
const Track = styled(({ theme }) => ({
backgroundColor: Color(theme.colors.text.primary).fade(theme.alpha.medium).string(),
height: theme.sizing.borderRadius,
borderRadius: theme.sizing.borderRadius,
overflow: 'hidden',
}), 'Seeker.Track')(View);
const ProgressBar = styled(({ theme }) => ({
backgroundColor: theme.colors.text.primary,
...StyleSheet.absoluteFillObject,
}), 'Seeker.ProgressBar')(View);
const Knob = styled(({ theme }) => ({
backgroundColor: theme.colors.text.primary,
borderRadius: theme.sizing.baseUnit,
position: 'absolute',
top: 0,
right: 0,
height: theme.sizing.baseUnit,
width: theme.sizing.baseUnit,
elevation: 2,
zIndex: 100,
}), 'Seeker.Knob')(View);
export class Seeker extends PureComponent {
static propTypes = {
progress: PropTypes.object, // eslint-disable-line
onSeek: PropTypes.func,
onSeeking: PropTypes.func,
};
static defaultProps = {
progress: new Animated.Value(0),
onSeek() {},
onSeeking() {},
};
state = {
width: 0,
}
componentWillMount() {
this.listen(this.props.progress);
}
componentWillReceiveProps({ progress }) {
if (progress !== this.props.progress) {
this.listen(progress);
}
}
componentWillUnmount() {
if (this.listener) this.props.progress.removeListener(this.props.progress);
}
get trackBarOffset() {
const progressInvert = Animated.add(1, Animated.multiply(this.props.progress, -1));
const position = Animated.multiply(progressInvert, -this.state.width);
return Animated.add(position, this.offsetDriver);
}
get knobStyles() {
return ({
position: 'absolute',
right: 0,
top: 0,
bottom: 0,
width: '100%',
overflow: 'visible',
transform: [
{ translateX: this.trackBarOffset },
],
});
}
get progressBarStyles() {
return ([StyleSheet.absoluteFill, {
transform: [{ translateX: this.trackBarOffset }],
}]);
}
listen = (progress) => {
if (this.listener) this.props.progress.removeListener(progress);
this.listener = progress.addListener(({ value }) => {
this.lastProgressValue = value;
this.lastPosition = value * this.state.width;
});
}
offsetDriver = new Animated.Value(0);
panResponder = PanResponder.create({
onStartShouldSetPanResponder: () => !!this.props.onSeeking,
onPanResponderMove: (e, { dx }) => {
let offset = dx;
offset = Math.min(this.state.width - this.lastPosition, offset);
offset = Math.max(-this.lastPosition, offset);
this.offsetDriver.setValue(offset || 0);
const { onSeeking } = this.props;
onSeeking((this.lastPosition + dx) / this.state.width);
},
onPanResponderRelease: (e, { dx }) => {
this.offsetDriver.setValue(0);
const { onSeek } = this.props;
if (onSeek) onSeek(((this.lastPosition || 0) + dx) / this.state.width);
},
});
handleOnLayout = ({ nativeEvent: { layout: { width } } }) => {
this.setState({
width,
});
};
render() {
return (
<Container>
<Track onLayout={this.handleOnLayout}>
<Animated.View
style={this.progressBarStyles}
>
<ProgressBar />
</Animated.View>
</Track>
<Animated.View
style={this.knobStyles}
>
<Knob {...this.panResponder.panHandlers} />
</Animated.View>
</Container>
);
}
}
export default withTheme(({ theme, ...otherProps } = {}) => ({
progressColor: theme.colors.primary,
knobColor: theme.colors.secondary,
trackColor: theme.colors.darkPrimary,
...otherProps,
}))(Seeker); | import { withTheme } from '@ui/theme';
const Container = styled(({ theme }) => ({
minWidth: '65%', |
widget-open-handler.js | "use strict";
/********************************************************************************
* Copyright (C) 2018 TypeFox and others.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* This Source Code may also be made available under the following Secondary
* Licenses when the conditions for such availability set forth in the Eclipse
* Public License v. 2.0 are satisfied: GNU General Public License, version 2
* with the GNU Classpath Exception which is available at
* https://www.gnu.org/software/classpath/license.html.
*
* SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
********************************************************************************/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.WidgetOpenHandler = void 0;
var inversify_1 = require("inversify");
var common_1 = require("../common");
var shell_1 = require("./shell");
var widget_manager_1 = require("./widget-manager");
/**
* Generic base class for {@link OpenHandler}s that are opening a widget for a given {@link URI}.
*/
var WidgetOpenHandler = /** @class */ (function () {
function WidgetOpenHandler() {
this.onCreatedEmitter = new common_1.Emitter();
/**
* Emit when a new widget is created.
*/
this.onCreated = this.onCreatedEmitter.event;
}
WidgetOpenHandler.prototype.init = function () {
var _this = this;
this.widgetManager.onDidCreateWidget(function (_a) {
var factoryId = _a.factoryId, widget = _a.widget;
if (factoryId === _this.id) {
_this.onCreatedEmitter.fire(widget);
}
});
};
/**
* Open a widget for the given uri and options.
* Reject if the given options are not widget options or a widget cannot be opened.
* @param uri the uri of the resource that should be opened.
* @param options the widget opener options.
*
* @returns promise of the widget that resolves when the widget has been opened.
*/
WidgetOpenHandler.prototype.open = function (uri, options) {
return __awaiter(this, void 0, void 0, function () {
var widget;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this.getOrCreateWidget(uri, options)];
case 1:
widget = _a.sent();
return [4 /*yield*/, this.doOpen(widget, options)];
case 2:
_a.sent();
return [2 /*return*/, widget];
}
});
});
};
WidgetOpenHandler.prototype.doOpen = function (widget, options) {
return __awaiter(this, void 0, void 0, function () {
var op;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
op = __assign({ mode: 'activate' }, options);
if (!widget.isAttached) {
this.shell.addWidget(widget, op.widgetOptions || { area: 'main' });
}
if (!(op.mode === 'activate')) return [3 /*break*/, 2];
return [4 /*yield*/, this.shell.activateWidget(widget.id)];
case 1:
_a.sent();
return [3 /*break*/, 4];
case 2:
if (!(op.mode === 'reveal')) return [3 /*break*/, 4];
return [4 /*yield*/, this.shell.revealWidget(widget.id)];
case 3:
_a.sent();
_a.label = 4;
case 4: return [2 /*return*/];
}
});
});
};
/**
* Tries to get an existing widget for the given uri.
* @param uri the uri of the widget.
*
* @returns a promise that resolves to the existing widget or `undefined` if no widget for the given uri exists.
*/
WidgetOpenHandler.prototype.getByUri = function (uri) {
return this.getWidget(uri);
};
/**
* Return an existing widget for the given uri or creates a new one.
*
* It does not open a widget, use {@link WidgetOpenHandler#open} instead. | * @param uri uri of the widget.
*
* @returns a promise of the existing or newly created widget.
*/
WidgetOpenHandler.prototype.getOrCreateByUri = function (uri) {
return this.getOrCreateWidget(uri);
};
Object.defineProperty(WidgetOpenHandler.prototype, "all", {
/**
* Retrieves all open widgets that have been opened by this handler.
*
* @returns all open widgets for this open handler.
*/
get: function () {
return this.widgetManager.getWidgets(this.id);
},
enumerable: false,
configurable: true
});
WidgetOpenHandler.prototype.getWidget = function (uri, options) {
var widgetOptions = this.createWidgetOptions(uri, options);
return this.widgetManager.getWidget(this.id, widgetOptions);
};
WidgetOpenHandler.prototype.getOrCreateWidget = function (uri, options) {
var widgetOptions = this.createWidgetOptions(uri, options);
return this.widgetManager.getOrCreateWidget(this.id, widgetOptions);
};
/**
* Closes all widgets that have been opened by this open handler.
* @param options the close options that should be applied to all widgets.
*
* @returns a promise of all closed widgets that resolves after they have been closed.
*/
WidgetOpenHandler.prototype.closeAll = function (options) {
return __awaiter(this, void 0, void 0, function () {
var closed;
var _this = this;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, Promise.all(this.all.map(function (widget) { return _this.shell.closeWidget(widget.id, options); }))];
case 1:
closed = _a.sent();
return [2 /*return*/, closed.filter(function (widget) { return !!widget; })];
}
});
});
};
__decorate([
inversify_1.inject(shell_1.ApplicationShell),
__metadata("design:type", shell_1.ApplicationShell)
], WidgetOpenHandler.prototype, "shell", void 0);
__decorate([
inversify_1.inject(widget_manager_1.WidgetManager),
__metadata("design:type", widget_manager_1.WidgetManager)
], WidgetOpenHandler.prototype, "widgetManager", void 0);
__decorate([
inversify_1.postConstruct(),
__metadata("design:type", Function),
__metadata("design:paramtypes", []),
__metadata("design:returntype", void 0)
], WidgetOpenHandler.prototype, "init", null);
WidgetOpenHandler = __decorate([
inversify_1.injectable()
], WidgetOpenHandler);
return WidgetOpenHandler;
}());
exports.WidgetOpenHandler = WidgetOpenHandler;
//# sourceMappingURL=widget-open-handler.js.map | |
txtfile.py | """
MIT License
Copyright (c) 2017 Roni Eliezer
"""
import logger
import json
import yaml
class File(object):
"""
Implement a text file methods
e.g.: read, write, etc...
Args:
file_path: full path to file
"""
#==================================================================
def __init__(self, file_path):
""" Constructor """
self.file = file_path
#==================================================================
def read(self):
|
#==================================================================
#==================================================================
#==================================================================
class Json(File):
'''
Implement any Json method
'''
#==================================================================
def loads(self):
self.load()
#==================================================================
def load(self):
"""
Return a dictionary representing the Json file content
"""
return json.loads(self.read())
#==================================================================
def dump(self, json_dictionary):
"""
Dump the given json_dictionary to self.file
"""
with open(self.file, 'w') as f:
json.dump(json_dictionary, f, indent=4)
#==================================================================
#==================================================================
#==================================================================
class Yaml(File):
""" Implement YAML file format """
#==================================================================
def load(self):
"""
Return a dictionary representing the Yaml file content
"""
return yaml.load(self.read())
#==================================================================
def dump(self, dictionary):
"""
Dump the given dictionary to self.file
"""
with open(self.file, 'w') as f:
yaml.dump(dictionary, f, indent=4)
| """ Open the file and return its content """
try:
with open(self.file, 'r') as f:
try:
return f.read()
except Exception as e:
logger.out.exception("Exception while trying to read: '{}', {}"\
.format(self.file, e))
except Exception as e:
logger.out.exception("Exception while trying to open: '{}', {}"\
.format(self.file, e))
return None |
edit.rs | //! Services to support meeting edits.
use crate::api::rcos::meetings::creation::create::normalize_url;
use crate::api::rcos::meetings::edit::EditHostSelection;
use crate::api::rcos::meetings::ALL_MEETING_TYPES;
use crate::api::rcos::meetings::{
authorization_for::{AuthorizationFor, UserMeetingAuthorization},
creation::context::CreationContext,
edit,
get_by_id::{meeting::MeetingMeeting, Meeting},
};
use crate::error::TelescopeError;
use crate::templates::forms::FormTemplate;
use crate::templates::Template;
use crate::web::services::auth::identity::AuthenticationCookie;
use crate::web::services::meetings::create::{get_semester_bounds, FinishForm};
use actix_web::http::header::LOCATION;
use actix_web::web::Form;
use actix_web::{
web::{Path, Query, ServiceConfig},
HttpRequest, HttpResponse,
};
use chrono::{DateTime, Local, NaiveDateTime, NaiveTime, TimeZone, Utc};
use serde_json::Value;
use uuid::Uuid;
/// The Handlebars file for the meeting edit form.
const MEETING_EDIT_FORM: &'static str = "meetings/edit/form";
/// The Handlebars file for the host selection page.
const HOST_SELECTION_TEMPLATE: &'static str = "meetings/edit/host_selection";
/// Register the meeting edit services.
pub fn register(config: &mut ServiceConfig) {
config
.service(edit_page)
.service(submit_meeting_edits)
.service(host_selection);
}
/// Structure for query which can optionally be passed to the edit page to set a new host.
#[derive(Clone, Debug, Serialize, Deserialize)]
struct | {
/// The new host for the meeting. Nil UUID for no host.
set_host: Uuid,
}
/// Get meeting data or return a resource not found error.
async fn get_meeting_data(meeting_id: i64) -> Result<MeetingMeeting, TelescopeError> {
// Get the meeting data to check that it exists.
Meeting::get(meeting_id)
.await?
.ok_or(TelescopeError::resource_not_found(
"Meeting Not Found",
"Could not find a meeting for this ID.",
))
}
/// Get a user's meeting authorization object from their authentication cookie.
async fn authorization_for_viewer(
auth: &AuthenticationCookie,
) -> Result<UserMeetingAuthorization, TelescopeError> {
// Get user ID from cookie.
let viewer = auth.get_user_id_or_error().await?;
// Query API for auth object.
return AuthorizationFor::get(Some(viewer)).await;
}
/// Get meeting data and error if the authenticated user cannot edit the meeting.
async fn meeting_data_checked(
auth: &AuthenticationCookie,
meeting_id: i64,
) -> Result<MeetingMeeting, TelescopeError> {
// Get meeting data. Extract host's user ID.
let meeting_data = get_meeting_data(meeting_id).await?;
let meeting_host: Option<_> = meeting_data.host.as_ref().map(|host| host.id);
// Get user's authorization object.
let authorization = authorization_for_viewer(auth).await?;
// Check edit access.
if !authorization.can_edit(meeting_host) {
return Err(TelescopeError::Forbidden);
} else {
return Ok(meeting_data);
}
}
/// Resolve the desired host user ID from the set host query parameter or the existing meeting
/// host.
fn resolve_host_user_id(
meeting_data: &MeetingMeeting,
set_host: Option<Query<HostQuery>>,
) -> Option<Uuid> {
// The current host ID from the meeting data.
let existing_host: Option<Uuid> = meeting_data.host.as_ref().map(|h| h.id);
// The new host's user ID if not set to nil.
let new_host: Option<Uuid> = set_host.and_then(|q| {
let set_host = q.set_host;
// Check for nil UUID here.
(!set_host.is_nil()).then(|| set_host)
});
return new_host.or(existing_host);
}
/// Resolve the meeting title value. This is the supplied title or a combination of the meeting
/// type and date.
fn resolve_meeting_title(meeting_data: &MeetingMeeting) -> String {
meeting_data.title()
}
/// Create the form template for meeting edits.
fn make_form(meeting_data: &MeetingMeeting) -> FormTemplate {
// Resolve the meeting title.
let meeting_title: String = resolve_meeting_title(&meeting_data);
// Create the template.
return FormTemplate::new(MEETING_EDIT_FORM, format!("Edit {}", meeting_title));
}
/// Service to display meeting edit form to users who can edit the meeting.
#[get("/meeting/{meeting_id}/edit")]
async fn edit_page(
Path(meeting_id): Path<i64>,
auth: AuthenticationCookie,
set_host: Option<Query<HostQuery>>,
) -> Result<FormTemplate, TelescopeError> {
// Get the meeting data. Error on meeting not found or permissions failure.
let meeting_data = meeting_data_checked(&auth, meeting_id).await?;
// Resolve the desired host user ID.
let host: Option<Uuid> = resolve_host_user_id(&meeting_data, set_host);
// Get the creation context (based on the resolved host)
// so we know what semesters are available.
let context =
CreationContext::execute(host, vec![meeting_data.semester.semester_id.clone()]).await?;
// Create the meeting template.
let mut form: FormTemplate = make_form(&meeting_data);
// Instantiate form with meeting data, context, and meeting types.
form.template = json!({
"data": &meeting_data,
"meeting_types": ALL_MEETING_TYPES,
"context": context
});
// Add fields to the template converting the timestamps in the meeting data to the HTML versions.
let meeting_start: &DateTime<Utc> = &meeting_data.start_date_time;
let meeting_start_local: DateTime<Local> = meeting_start.with_timezone(&Local);
form.template["data"]["start_date"] = json!(meeting_start_local.format("%Y-%m-%d").to_string());
form.template["data"]["start_time"] = json!(meeting_start_local.format("%H:%M").to_string());
let meeting_end: &DateTime<Utc> = &meeting_data.end_date_time;
let meeting_end_local: DateTime<Local> = meeting_end.with_timezone(&Local);
form.template["data"]["end_date"] = json!(meeting_end_local.format("%Y-%m-%d").to_string());
form.template["data"]["end_time"] = json!(meeting_end_local.format("%H:%M").to_string());
return Ok(form);
}
#[post("/meeting/{meeting_id}/edit")]
async fn submit_meeting_edits(
Path(meeting_id): Path<i64>,
auth: AuthenticationCookie,
set_host: Option<Query<HostQuery>>,
// Use the same structure as is used for creation since the
// form data submitted should be the same.
Form(form_data): Form<FinishForm>,
) -> Result<HttpResponse, TelescopeError> {
// Get meeting data. Error if there is no such meeting or the user cannot access it
let meeting_data = meeting_data_checked(&auth, meeting_id).await?;
// Resolve the desired host user ID.
let host: Option<Uuid> = resolve_host_user_id(&meeting_data, set_host);
// Get the creation context (based on the resolved host)
// so we know what semesters are available.
let context =
CreationContext::execute(host, vec![meeting_data.semester.semester_id.clone()]).await?;
// Create the meeting template.
let mut form: FormTemplate = make_form(&meeting_data);
// Instantiate form with meeting types, context and data.
form.template = json!({
"meeting_types": ALL_MEETING_TYPES,
"context": &context,
"data": &meeting_data
});
// Destructure the submitted form.
let FinishForm {
start_time,
start_date,
end_time,
end_date,
description,
external_slides_url,
is_remote,
is_draft,
semester,
recording_url,
meeting_url,
location,
kind,
title,
} = form_data;
// Like the creation system, semester ID, meeting kind, and host ID are not validated.
// Add submitted data to return form.
form.template["data"]["semester"] = json!({ "semester_id": &semester });
form.template["data"]["type"] = json!(kind);
form.template["data"]["description"] = json!(&description);
form.template["data"]["start_date"] = json!(&start_date);
form.template["data"]["end_date"] = json!(&end_date);
form.template["data"]["start_time"] = json!(&start_time);
form.template["data"]["end_time"] = json!(&end_time);
// Handle meeting title -- just whitespace and default to None if empty.
let title: Option<String> = (!title.trim().is_empty()).then(|| title.trim().to_string());
form.template["data"]["title"] = json!(&title);
// Same with location.
let location: Option<String> =
location.and_then(|string| (!string.trim().is_empty()).then(|| string.trim().to_string()));
form.template["data"]["location"] = json!(&location);
// Trim description.
let description: String = description.trim().to_string();
form.template["data"]["description"] = json!(&description);
// Don't bother trimming URLs, since the GraphQL mutation will normalize them.
form.template["data"]["meeting_url"] = json!(&meeting_url);
form.template["data"]["recording_url"] = json!(&recording_url);
form.template["data"]["external_presentation_url"] = json!(&external_slides_url);
// Handle flags.
let is_remote: bool = is_remote.unwrap_or(false);
let is_draft: bool = is_draft.unwrap_or(false);
form.template["data"]["is_remote"] = json!(is_remote);
form.template["data"]["is_draft"] = json!(is_draft);
// Validate dates and set an issue in the form if there is one.
// Get the selected semester info from the context object.
let selected_semester: &Value = form.template["context"]["available_semesters"]
.as_array()
.expect("There should be an available semesters array in the meeting context.")
.iter()
.find(|available_semester| available_semester["semester_id"] == semester.as_str())
.ok_or(TelescopeError::BadRequest {
header: "Malformed Meeting Edit Form".into(),
message: "Selected semester in available semester list.".into(),
show_status_code: false,
})?;
// Get the semester bounds.
let (semester_start, semester_end) = get_semester_bounds(selected_semester);
if end_date < start_date {
form.template["issues"]["end_date"] = json!("End date is before start date.");
} else if start_date > semester_end {
form.template["issues"]["start_date"] = json!("Start date is after end of semester.");
} else if end_date > semester_end {
form.template["issues"]["end_date"] = json!("End date is after end of semester.");
} else if start_date < semester_start {
form.template["issues"]["start_date"] = json!("Start date is before semester starts.");
} else if end_date < semester_start {
form.template["issues"]["end_date"] = json!("End date is before semester starts.");
}
// If there was an issue, return the form as invalid.
if form.template["issues"] != json!(null) {
return Err(TelescopeError::invalid_form(&form));
}
// Parse times
let time_parse = |time: String| format!("{}:00", time).parse::<NaiveTime>();
let start_time: NaiveTime = time_parse(start_time).map_err(|e| TelescopeError::BadRequest {
header: "Malformed Start Time".into(),
message: format!("Could not parse start time. Internal error: {}", e),
show_status_code: false,
})?;
let end_time: NaiveTime = time_parse(end_time).map_err(|e| TelescopeError::BadRequest {
header: "Malformed End Time".into(),
message: format!("Could not parse end time. Internal error: {}", e),
show_status_code: false,
})?;
// Add times to dates.
let start: NaiveDateTime = start_date.and_time(start_time);
let end: NaiveDateTime = end_date.and_time(end_time);
// Make sure meeting starts before it ends.
if start > end {
form.template["issues"]["end_time"] = json!("End time is before start time.");
return Err(TelescopeError::invalid_form(&form));
}
// Add timestamps.
let timezone_adder = |timestamp: &NaiveDateTime| Local.from_local_datetime(timestamp).single();
let start: DateTime<Local> = timezone_adder(&start).ok_or(TelescopeError::BadRequest {
header: "Malformed Start Time".into(),
message: "Could not ascribe local timezone to start timestamp.".into(),
show_status_code: false,
})?;
let end: DateTime<Local> = timezone_adder(&end).ok_or(TelescopeError::BadRequest {
header: "Malformed End Time".into(),
message: "Could not ascribe local timezone to end timestamp.".into(),
show_status_code: false,
})?;
// Create variables for mutation.
let edit_mutation_variables = edit::edit_meeting::Variables {
meeting_id,
title,
start: start.with_timezone(&Utc),
end: end.with_timezone(&Utc),
semester_id: semester,
kind,
description,
is_remote,
is_draft,
meeting_url: normalize_url(meeting_url),
location,
external_slides_url: normalize_url(external_slides_url),
recording_url: normalize_url(recording_url),
// Extract the host from context object.
host: form.template["context"]["host"][0]["id"]
.as_str()
.and_then(|host_id| host_id.parse::<Uuid>().ok()),
};
// The returned meeting ID should match the existing one but we don't check.
let meeting_id: i64 = edit::EditMeeting::execute(edit_mutation_variables)
.await?
.unwrap_or(meeting_id);
// Redirect the user back to the meeting they edited.
return Ok(HttpResponse::Found()
.header(LOCATION, format!("/meeting/{}", meeting_id))
.finish());
}
/// Host selection page.
#[get("/meeting/{meeting_id}/edit/select_host")]
async fn host_selection(
Path(meeting_id): Path<i64>,
auth: AuthenticationCookie,
req: HttpRequest,
) -> Result<Template, TelescopeError> {
// Check that the user can edit this meeting.
let viewer = auth.get_user_id_or_error().await?;
if !AuthorizationFor::get(Some(viewer))
.await?
.can_edit_by_id(meeting_id)
.await?
{
return Err(TelescopeError::Forbidden);
}
// Get host selection.
let data = EditHostSelection::get(meeting_id).await?;
// Create host selection page template.
let mut template: Template = Template::new(HOST_SELECTION_TEMPLATE);
template.set_field("data", data);
return template.render_into_page(&req, "Select Host").await;
}
| HostQuery |
index.js | "use strict";
import { NativeModules, Platform } from "react-native";
const RNUpdateAPK = NativeModules.RNUpdateAPK;
let jobId = -1;
export class UpdateAPK {
constructor(options) {
this.options = options;
}
get = (url, success, error, options = {}) => {
fetch(url, options)
.then(response => response.json())
.then(json => {
success && success(json);
})
.catch(err => {
error && error(err);
});
};
getApkVersion = () => {
if (jobId !== -1) {
return;
}
if (!this.options.apkVersionUrl) {
console.log("RNUpdateAPK::getApkVersion - apkVersionUrl doesn't exist.");
return;
}
this.get(
this.options.apkVersionUrl,
this.getApkVersionSuccess.bind(this),
this.getVersionError.bind(this),
this.options.apkVersionOptions
);
};
getApkVersionSuccess = remote => {
console.log("getApkVersionSuccess", remote);
// TODO switch this to versionCode
let outdated = false;
if (remote.versionCode && (remote.versionCode > RNUpdateAPK.versionCode)) {
console.log('RNUpdateAPK::getApkVersionSuccess - outdated based on code, local/remote: ' + RNUpdateAPK.versionCode + "/" + remote.versionCode);
outdated = true;
}
if (!remote.versionCode && (RNUpdateAPK.versionName !== remote.versionName)) {
console.log('RNUpdateAPK::getApkVersionSuccess - APK outdated based on version name, local/remote: ' + RNUpdateAPK.versionName + "/" + remote.versionName);
outdated = true;
}
if (outdated) {
if (remote.forceUpdate) {
if (this.options.forceUpdateApp) {
this.options.forceUpdateApp();
}
this.downloadApk(remote);
} else if (this.options.needUpdateApp) {
this.options.needUpdateApp(isUpdate => {
if (isUpdate) {
this.downloadApk(remote);
}
}, remote.whatsNew);
}
} else if (this.options.notNeedUpdateApp) {
this.options.notNeedUpdateApp();
}
};
downloadApk = remote => {
const RNFS = require("react-native-fs");
const progress = data => {
const percentage = ((100 * data.bytesWritten) / data.contentLength) | 0;
this.options.downloadApkProgress &&
this.options.downloadApkProgress(percentage, data.contentLength, data.bytesWritten);
};
const begin = res => {
console.log("RNUpdateAPK::downloadApk - downloadApkStart");
this.options.downloadApkStart && this.options.downloadApkStart();
};
const progressDivider = 1;
// You must be sure filepaths.xml exposes this path or you will have a FileProvider error API24+
// You might check {totalSpace, freeSpace} = await RNFS.getFSInfo() to make sure there is room
const downloadDestPath = `${RNFS.CachesDirectoryPath}/NewApp.apk`;
let options = this.options.apkOptions ? this.options.apkOptions : {};
const ret = RNFS.downloadFile(
Object.assign(
{
fromUrl: remote.apkUrl,
toFile: downloadDestPath,
begin,
progress,
background: true,
progressDivider
},
options
)
);
jobId = ret.jobId;
ret.promise
.then(res => {
if (res['statusCode'] >= 400 && res['statusCode'] <= 599){
throw "Failed to Download APK. Server returned with " + res['statusCode'] + " statusCode";
}
console.log("RNUpdateAPK::downloadApk - downloadApkEnd");
this.options.downloadApkEnd && this.options.downloadApkEnd();
RNUpdateAPK.getApkInfo(downloadDestPath)
.then(res => {
console.log(
"RNUpdateAPK::downloadApk - Old Cert SHA-256: " + RNUpdateAPK.signatures[0].thumbprint
);
console.log("RNUpdateAPK::downloadApk - New Cert SHA-256: " + res.signatures[0].thumbprint);
if (
res.signatures[0].thumbprint !==
RNUpdateAPK.signatures[0].thumbprint
) {
// FIXME should add extra callback for this
console.log(
"The signature thumbprints seem unequal. Install will fail"
);
}
})
.catch(rej => {
console.log("RNUpdateAPK::downloadApk - apk info error: ", rej);
this.options.onError && this.options.onError("Failed to get Downloaded APK Info");
// re-throw so we don't attempt to install the APK, this will call the downloadApkError handler
throw rej;
});
RNUpdateAPK.installApk(
downloadDestPath,
this.options.fileProviderAuthority
);
jobId = -1;
})
.catch(err => {
this.downloadApkError(err);
jobId = -1;
});
};
getAppStoreVersion = () => {
if (!this.options.iosAppId) {
console.log("RNUpdateAPK::getAppStoreVersion - iosAppId doesn't exist.");
return;
}
const URL = "https://itunes.apple.com/lookup?id=" + this.options.iosAppId + "&time=" +(new Date());
console.log("RNUpdateAPK::getAppStoreVersion - attempting to fetch " + URL);
this.get(
URL,
this.getAppStoreVersionSuccess.bind(this),
this.getVersionError.bind(this)
);
};
getAppStoreVersionSuccess = data => {
if (data.resultCount < 1) {
console.log("RNUpdateAPK::getAppStoreVersionSuccess - iosAppId is wrong.");
return;
}
const result = data.results[0];
const version = result.version;
const trackViewUrl = result.trackViewUrl;
let majorVersionCode = version.split(/[.]/)[0]
let appMajorVersionCode = RNUpdateAPK.versionName.split(/[.]/)[0]
if (majorVersionCode !== appMajorVersionCode) {
if (this.options.needUpdateApp) {
this.options.needUpdateApp(isUpdate => {
if (isUpdate) {
RNUpdateAPK.installFromAppStore(trackViewUrl);
}
});
}
}
};
getVersionError = err => {
console.log("RNUpdateAPK::getVersionError - getVersionError", err);
this.options.onError && this.options.onError(err);
};
downloadApkError = err => {
console.log("RNUpdateAPK::downloadApkError - downloadApkError", err);
this.options.onError && this.options.onError(err);
};
checkUpdate = () => {
if (Platform.OS === "android") {
this.getApkVersion();
} else {
this.getAppStoreVersion();
}
};
}
// Returns a Promise with either boolean true for success, or the Exception on error
export function patchSSLProvider(force = false, dialogIfRepairable = false) {
if (Platform.OS !== "android") {
return Promise.resolve(true);
}
console.log("Attempting to patch SSL Provider");
return RNUpdateAPK.patchSSLProvider(force, dialogIfRepairable);
}
export function | () {
return RNUpdateAPK.versionName;
}
export function getInstalledVersionCode() {
return RNUpdateAPK.versionCode;
}
export function getInstalledPackageName() {
return RNUpdateAPK.packageName;
}
export function getInstalledFirstInstallTime() {
return RNUpdateAPK.firstInstallTime;
}
export function getInstalledLastUpdateTime() {
return RNUpdateAPK.lastUpdateTime;
}
export function getInstalledPackageInstaller() {
return RNUpdateAPK.packageInstaller;
}
export function getInstalledSigningInfo() {
return RNUpdateAPK.signatures;
}
export async function getApps() {
if (Platform.OS === "android") {
return RNUpdateAPK.getApps();
} else {
return Promise.resolve([]);
}
}
export async function getNonSystemApps() {
if (Platform.OS === "android") {
return RNUpdateAPK.getNonSystemApps();
} else {
return Promise.resolve([]);
}
}
| getInstalledVersionName |
loader.min.js | window.kudosAppLoader=window.kudosAppLoader||{},function(){var e={};function n(e){window.document.title=e||""}function o(e){document.write(e)}function a(e){return e?e.charAt(0).toUpperCase()+e.slice(1):null}function t(){var e=location.href.replace(location.origin,"/..");window.location.href="/blogs/roller-ui/login-redirect.jsp?redirect="+encodeURIComponent(e)}window.kudosAppLoader.go=function(){var t,i=window.kudosAppLoader.appLoaderConfig,r=window.kudosAppLoader.contextPath,d=r.replace(/^\//,"");if(i[d])L=d,t=r;else{var c=window.location.pathname.replace(r,"").split("/");L=c.length>1?c[1]:null,t=r+"/"+L}document.body.classList.add(L);var l=L?i[L]:null,p=document.getElementById("app-frame");o(e.body);var s=$(document.body).height();if(p&&s&&s<100&&(p.style.paddingTop=s+"px"),l){var u=new URL(l);if(n(a(L)),window.addEventListener("message",function(e){if(!e||!e.data||e.origin!==u.origin)return;var o=e.data;"String"==typeof o&&(o={command:o});const{command:a,route:i,title:r,icon:d}=o||{};if("appReady"===a||"applicationReady"===a){const{currentLogin:e={},origin:n}=window;p&&p.contentWindow.postMessage({source:{resourceType:"header-frame"},context:{origin:n},user:e},"*")}var c,l;i&&history.replaceState(null,null,t+i),r&&n(r),d&&(c=d,(l=document.querySelector("link[rel*='icon']"))||(l=document.createElement("link"),document.getElementsByTagName("head")[0].appendChild(l)),l.type="image/x-icon",l.rel="shortcut icon",l.href=c)},!1),p){var w=u.origin,h="/"===u.pathname?"":u.pathname,f=window.location.pathname.replace(t,""),m=[];window.location.search&&m.push(window.location.search.replace("?","")),u.search&&m.push(u.search.replace("?",""));var g=m.length?"?"+m.join("&"):"",v=window.location.hash||u.hash;p.src=w+(f||h)+g+v}}else{p&&(p.style.display="none");var y=$('<div class="app-links" ></div>');for(var L in $(document.body).append(y),i){var b=$('<span class="app-link"><a href="'+r+"/"+L+'">'+a(L)+"</a></span>");y.append(b)}}};var i="/blogs/roller-ui/about.do";$.ajax({url:i,type:"GET",dataType:"text",async:!1,cache:!1}).done(function(n,o,a){function t(e,n){var o=e.toLowerCase(),a=n.toLowerCase(),t=o.indexOf("<"+a);if(t>-1){t=o.indexOf(">",t)+1;var i=o.indexOf("</"+a,t);if(i>t)return e.substring(t,i)}return null}var i=t(n,"head");i=(i=(i=i.replace(/<title>[^<]*<\/title>/,"")).replace(/<meta[^>]*>/g,"")).replace(/document.title[\s]*=[^;]*;/g,""),e.head=i;var r=t(n,"body");r=r.replace(/document.title[\s]*=[^;]*;/g,""),e.body=r}).fail(function(e,n,o){console.error("Connections integration load failure",arguments),t()}),o(e.head),window.appName||t()}(); |
||
repeat_vec.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::pick_slice_idxs;
use proptest::sample::Index;
/// An efficient representation of a vector with repeated elements inserted.
///
/// Internally, this data structure stores one copy of each inserted element, along with data about
/// how many times each element is repeated.
///
/// This data structure does not do any sort of deduplication, so it isn't any sort of set (or
/// multiset). | ///
/// This is useful for presenting a large logical vector for picking `proptest` indexes from.
///
/// # Examples
///
/// ```
/// use proptest_helpers::RepeatVec;
///
/// let mut repeat_vec = RepeatVec::new();
/// repeat_vec.extend("a", 10); // logically, insert "a" 10 times
/// repeat_vec.extend("b", 20); // logically, insert "b" 20 times
/// assert_eq!(repeat_vec.get(0), Some((&"a", 0))); // returns the "a" at logical position 0
/// assert_eq!(repeat_vec.get(5), Some((&"a", 5))); // returns the "a" at logical position 5
/// assert_eq!(repeat_vec.get(10), Some((&"b", 0))); // returns the "b" (offset 0) at logical position 10
/// assert_eq!(repeat_vec.get(20), Some((&"b", 10))); // returns the "b" (offset 10) at logical position 20
/// assert_eq!(repeat_vec.get(30), None); // past the end of the logical array
/// ```
///
/// The data structure doesn't care about whether the inserted items are equal or not.
///
/// ```
/// use proptest_helpers::RepeatVec;
///
/// let mut repeat_vec = RepeatVec::new();
/// repeat_vec.extend("a", 10); // logically, insert "a" 10 times
/// repeat_vec.extend("a", 20); // logically, insert "a" 20 times
/// assert_eq!(repeat_vec.get(0), Some((&"a", 0)));
/// assert_eq!(repeat_vec.get(5), Some((&"a", 5)));
/// assert_eq!(repeat_vec.get(10), Some((&"a", 0))); // This refers to the second "a".
/// ```
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct RepeatVec<T> {
// The first element of each tuple is the starting position for this item.
items: Vec<(usize, T)>,
len: usize,
}
impl<T> RepeatVec<T> {
/// Creates a new, empty `RepeatVec`.
pub fn new() -> Self {
Self {
items: vec![],
len: 0,
}
}
/// Returns the *logical* number of elements in this `RepeatVec`.
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Returns `true` if this `RepeatVec` has no *logical* elements.
///
/// # Examples
///
/// ```
/// use proptest_helpers::RepeatVec;
///
/// let mut repeat_vec = RepeatVec::new();
///
/// // There are no elements in this RepeatVec.
/// assert!(repeat_vec.is_empty());
///
/// // Adding 0 logical copies of an element still means it's empty.
/// repeat_vec.extend("a", 0);
/// assert!(repeat_vec.is_empty());
///
/// // Adding non-zero logical copies makes this vector not empty.
/// repeat_vec.extend("b", 1);
/// assert!(!repeat_vec.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Extends this `RepeatVec` by logically adding `size` copies of `item` to the end of it.
pub fn extend(&mut self, item: T, size: usize) {
self.items.push((self.len, item));
self.len += size;
}
/// Returns the item at location `at`. The return value is a reference to the stored item, plus
/// the offset from the start (logically, which copy of the item is being returned).
pub fn get(&self, at: usize) -> Option<(&T, usize)> {
if at >= self.len {
return None;
}
match self.items.binary_search_by_key(&at, |(start, _)| *start) {
Ok(exact_idx) => Some((&self.items[exact_idx].1, 0)),
Err(start_idx) => {
// start_idx can never be 0 because usize starts from 0 and items[0].0 is always 0.
// So start_idx is always at least 1.
let start_val = &self.items[start_idx - 1];
let offset = at - start_val.0;
Some((&start_val.1, offset))
}
}
}
/// Picks out elements uniformly randomly from this `RepeatVec`, using the provided
/// [`Index`](proptest::sample::Index) instances as sources of randomness.
pub fn pick_uniform(&self, indexes: &[impl AsRef<Index>]) -> Vec<(&T, usize)> {
pick_slice_idxs(self.len(), indexes)
.into_iter()
.map(|idx| {
self.get(idx)
.expect("indexes picked should always be in range")
})
.collect()
}
}
// Note that RepeatVec cannot implement `std::ops::Index<usize>` because the return type of Index
// has to be a reference (in this case it would be &(T, usize)). But RepeatVec computes the result
// of get() (as (&T, usize)) instead of merely returning a reference. This is a subtle but
// important point. | |
example_test.go | // Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package bech32_test
import (
"encoding/hex"
"fmt"
"github.com/ltcsuite/ltcutil/bech32"
)
// This example demonstrates how to decode a bech32 encoded string.
func ExampleDecode() {
encoded := "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx"
hrp, decoded, err := bech32.Decode(encoded)
if err != nil {
fmt.Println("Error:", err)
}
// Show the decoded data.
fmt.Println("Decoded human-readable part:", hrp)
fmt.Println("Decoded Data:", hex.EncodeToString(decoded))
// Output:
// Decoded human-readable part: bc | }
// This example demonstrates how to encode data into a bech32 string.
func ExampleEncode() {
data := []byte("Test data")
// Convert test data to base32:
conv, err := bech32.ConvertBits(data, 8, 5, true)
if err != nil {
fmt.Println("Error:", err)
}
encoded, err := bech32.Encode("customHrp!11111q", conv)
if err != nil {
fmt.Println("Error:", err)
}
// Show the encoded data.
fmt.Println("Encoded Data:", encoded)
// Output:
// Encoded Data: customHrp!11111q123jhxapqv3shgcgumastr
} | // Decoded Data: 010e140f070d1a001912060b0d081504140311021d030c1d03040f1814060e1e160e140f070d1a001912060b0d081504140311021d030c1d03040f1814060e1e16 |
effects.ts | import { call, put, select, takeEvery } from 'redux-saga/effects';
import { format } from '@waldur/core/ErrorMessageFormatter';
import { Action } from '@waldur/core/reducerActions';
import * as api from '@waldur/customer/payments/api';
import { updatePaymentsList } from '@waldur/customer/payments/utils';
import { translate } from '@waldur/i18n';
import { closeModalDialog } from '@waldur/modal/actions';
import { showError, showSuccess } from '@waldur/store/coreSaga';
import { getCustomer } from '@waldur/workspace/selectors';
import * as constants from '../constants';
function* createPayment(action) {
try { | });
yield put(showSuccess(translate('Payment has been created.')));
yield put(closeModalDialog());
const customer = yield select(getCustomer);
yield put(updatePaymentsList(customer));
} catch (error) {
const errorMessage = `${translate('Unable to create payment.')} ${format(
error,
)}`;
yield put(showError(errorMessage));
}
}
function* updatePayment(action) {
try {
yield call(api.updatePayment, action.payload);
yield put(showSuccess(translate('Payment has been updated.')));
yield put(closeModalDialog());
const customer = yield select(getCustomer);
yield put(updatePaymentsList(customer));
} catch (error) {
const errorMessage = `${translate('Unable to update payment.')} ${format(
error,
)}`;
yield put(showError(errorMessage));
}
}
function* deletePayment(action: Action<any>) {
try {
yield call(api.deletePayment, action.payload);
yield put(showSuccess(translate('Payment has been deleted.')));
yield put(closeModalDialog());
const customer = yield select(getCustomer);
yield put(updatePaymentsList(customer));
} catch (error) {
const errorMessage = `${translate('Unable to delete payment.')} ${format(
error,
)}`;
yield put(showError(errorMessage));
}
}
function* linkInvoice(action: Action<any>) {
try {
yield call(api.linkInvoice, action.payload);
yield put(
showSuccess(
translate('Invoice has been successfully linked to payment.'),
),
);
const customer = yield select(getCustomer);
yield put(updatePaymentsList(customer));
} catch (error) {
const errorMessage = `${translate(
'Unable to link invoice to the payment.',
)} ${format(error)}`;
yield put(showError(errorMessage));
}
}
function* unlinkInvoice(action: Action<any>) {
try {
yield call(api.unlinkInvoice, action.payload);
yield put(
showSuccess(
translate('Invoice has been successfully unlinked from payment.'),
),
);
const customer = yield select(getCustomer);
yield put(updatePaymentsList(customer));
} catch (error) {
const errorMessage = `${translate(
'Unable to unlink invoice from the payment.',
)} ${format(error)}`;
yield put(showError(errorMessage));
}
}
export default function* () {
yield takeEvery(constants.CREATE_PAYMENT, createPayment);
yield takeEvery(constants.UPDATE_PAYMENT, updatePayment);
yield takeEvery(constants.DELETE_PAYMENT, deletePayment);
yield takeEvery(constants.LINK_INVOICE, linkInvoice);
yield takeEvery(constants.UNLINK_INVOICE, unlinkInvoice);
} | yield call(api.createPayment, {
...action.payload, |
project_badge.rs | use gtk::prelude::*;
use projectpadsql::models::Project;
use relm::Widget;
use relm_derive::{widget, Msg};
use std::cell::{Cell, RefCell};
use std::f64::consts::PI;
use std::io::Cursor;
use std::rc::Rc;
pub const OUTER_SIZE: i32 = 60;
const PADDING: i32 = 5;
#[derive(Msg, Debug)]
pub enum Msg {
Click,
Activate(Project),
ActiveProjectChanged(i32),
MouseEnter,
MouseLeave,
MouseEnterProject(i32),
MouseLeaveProject(i32),
DarkThemeToggled,
ScaleFactorChange,
}
pub struct Model {
relm: relm::Relm<ProjectBadge>,
project: Project,
font_size_for_width: Rc<RefCell<Option<(i32, f64)>>>, // cache the computed font size
backing_buffer: Rc<RefCell<Option<cairo::ImageSurface>>>,
is_active: Rc<Cell<bool>>,
}
#[widget]
impl Widget for ProjectBadge {
fn init_view(&mut self) {
self.widgets
.drawing_area
.set_size_request(OUTER_SIZE, OUTER_SIZE);
self.widgets.drawing_area.add_events(
gdk::EventMask::BUTTON_PRESS_MASK
| gdk::EventMask::ENTER_NOTIFY_MASK
| gdk::EventMask::LEAVE_NOTIFY_MASK,
);
let buf = self.model.backing_buffer.clone();
let fsw = self.model.font_size_for_width.clone();
let is_a = self.model.is_active.clone();
let icon = self.model.project.icon.clone();
let name = self.model.project.name.clone();
self.widgets.drawing_area.connect_draw(move |da, context| {
let allocation = da.get_allocation();
let b0 = buf.borrow();
let is_buffer_good = Some((allocation.width, allocation.height))
== b0.as_ref().map(|b| (b.get_width(), b.get_height()));
let surface_ref = if is_buffer_good {
b0
} else {
drop(b0);
// need to set up the backing buffer
buf.replace(Some(Self::prepare_backing_buffer(
da,
&fsw,
is_a.get(),
&icon,
&name,
allocation.width,
allocation.height,
)));
buf.borrow()
};
// paint the backing buffer -
let s = surface_ref.as_ref().unwrap();
let output_scale = da.get_scale_factor();
s.set_device_scale(output_scale as f64, output_scale as f64);
context.set_source_surface(s, 0.0, 0.0);
context.paint();
Inhibit(false)
});
}
fn model(relm: &relm::Relm<Self>, project: Project) -> Model {
Model {
relm: relm.clone(),
project,
font_size_for_width: Rc::new(RefCell::new(None)),
backing_buffer: Rc::new(RefCell::new(None)),
is_active: Rc::new(Cell::new(false)),
}
}
fn prepare_backing_buffer(
drawing_area: >k::DrawingArea,
font_size_for_width: &RefCell<Option<(i32, f64)>>,
is_active: bool,
icon: &Option<Vec<u8>>,
name: &str,
allocation_width_: i32,
allocation_height_: i32,
) -> cairo::ImageSurface {
let output_scale = drawing_area.get_scale_factor();
let allocation_width = allocation_width_ * output_scale;
let allocation_height = allocation_height_ * output_scale;
let buf =
cairo::ImageSurface::create(cairo::Format::ARgb32, allocation_width, allocation_height)
.expect("cairo backing buffer");
let context = cairo::Context::new(&buf);
// code to make the badge text bold, but i feel it doesn't work out
// if let Some(family) = context.get_font_face().toy_get_family() {
// context.select_font_face(&family, cairo::FontSlant::Normal, cairo::FontWeight::Bold);
// }
// println!("drawing badge, allocation: {:?}", allocation);
let new_fsw = match *font_size_for_width.borrow() {
Some((w, font_size)) if w == allocation_width => {
context.set_font_size(font_size);
None
}
_ => Some((
allocation_width,
Self::compute_font_size(&context, (allocation_width - PADDING * 2) as f64 * 0.75),
)),
};
if let Some(fsw) = new_fsw {
font_size_for_width.replace(Some(fsw));
}
context.set_antialias(cairo::Antialias::Best);
let style_context = drawing_area.get_style_context();
gtk::render_background(
&style_context,
&context,
0.0,
0.0,
allocation_width.into(),
allocation_height.into(),
);
gtk::render_frame(
&style_context,
&context,
0.0,
0.0,
allocation_width.into(),
allocation_height.into(),
);
let fg_color = style_context.lookup_color("theme_fg_color").unwrap();
context.set_source_rgb(fg_color.red, fg_color.green, fg_color.blue);
if is_active {
context.set_line_width(6.0 * output_scale as f64);
context.set_line_cap(cairo::LineCap::Round);
context.move_to(10.0, allocation_height as f64 - 5.0 * output_scale as f64);
context.line_to(
allocation_width as f64 - 10.0 * output_scale as f64,
allocation_height as f64 - 5.0 * output_scale as f64,
);
context.stroke();
} else {
context.set_line_width(2.0 * output_scale as f64);
}
context.arc(
(allocation_width / 2).into(),
(allocation_width / 2).into(),
(allocation_width / 2 - PADDING).into(),
0.0,
2.0 * PI,
);
context.stroke_preserve();
let bg_color = style_context.lookup_color("theme_bg_color").unwrap();
// so the goal here is to push the contrast. if the background color
// is darker (<0.5) we go for pure black; if it's brighter, we go
// for pure white.
let bg_base = if bg_color.red < 0.5 { 0.0 } else { 1.0 };
context.set_source_rgb(bg_base, bg_base, bg_base);
context.fill();
context.set_source_rgb(fg_color.red, fg_color.green, fg_color.blue);
match icon {
// the 'if' works around an issue reading from SQL. should be None if it's empty!!
Some(icon) if !icon.is_empty() => Self::draw_icon(&context, allocation_width, icon),
_ => Self::draw_label(&context, allocation_width, &name[..2]),
}
buf
}
fn compute_font_size(context: &cairo::Context, width: f64) -> f64 {
let mut size = 5.0;
context.set_font_size(size);
while context.text_extents("HU").width < width * 0.8 {
context.set_font_size(size);
size += 1.0;
}
size
}
pub fn draw_icon(context: &cairo::Context, allocation_width: i32, icon: &[u8]) {
context.save();
match cairo::ImageSurface::create_from_png(&mut Cursor::new(icon)).ok() {
Some(surface) => {
let p = PADDING as f64;
let aw = (allocation_width - PADDING * 2) as f64;
let w = surface.get_width() as f64;
let h = surface.get_height() as f64;
let scale_ratio = f64::min(aw / w, aw / h);
context.scale(scale_ratio, scale_ratio);
let (offsetx, offsety) = if w > h {
(p, p + (aw - aw * h / w) / 2.0)
} else {
(p + (aw - aw * w / h) / 2.0, p)
};
context.set_source_surface(&surface, offsetx / scale_ratio, offsety / scale_ratio);
context.paint();
}
_ => {
eprintln!("failed reading png {}", icon.len());
}
}
context.restore();
}
fn | (context: &cairo::Context, allocation_width: i32, contents: &str) {
// context.set_source_rgb(1.0, 1.0, 1.0);
let text_extents = context.text_extents(contents);
context.move_to(
(allocation_width / 2) as f64 - text_extents.width / 2.0 - text_extents.x_bearing,
(allocation_width / 2) as f64 - text_extents.y_bearing - text_extents.height / 2.0,
);
context.text_path(contents);
context.fill();
}
fn update(&mut self, event: Msg) {
match event {
Msg::Click => {
self.model
.relm
.stream()
.emit(Msg::Activate(self.model.project.clone()));
}
Msg::Activate(_) => {
// meant for my parent, not me
}
Msg::ActiveProjectChanged(pid) => {
let new_active = pid == self.model.project.id;
if new_active != self.model.is_active.get() {
self.model.is_active.set(new_active);
// force a recompute of the display
self.model.backing_buffer.replace(None);
self.widgets.drawing_area.queue_draw();
}
}
Msg::MouseEnter => {
self.model
.relm
.stream()
.emit(Msg::MouseEnterProject(self.model.project.id));
}
Msg::MouseLeave => {
self.model
.relm
.stream()
.emit(Msg::MouseLeaveProject(self.model.project.id));
}
Msg::DarkThemeToggled | Msg::ScaleFactorChange => {
// force a recompute of the display
self.model.backing_buffer.replace(None);
self.widgets.drawing_area.queue_draw();
}
Msg::MouseEnterProject(_) => {}
Msg::MouseLeaveProject(_) => {}
}
}
view! {
#[name="drawing_area"]
gtk::DrawingArea {
button_press_event(_, _) => (Msg::Click, Inhibit(false)),
enter_notify_event(_, _) => (Msg::MouseEnter, Inhibit(false)),
leave_notify_event(_, _) => (Msg::MouseLeave, Inhibit(false)),
property_scale_factor_notify => Msg::ScaleFactorChange,
}
}
}
| draw_label |
metrics.go | package metrics
var latencyBuckets = [23]float64{
1.00,
1.50,
2.25,
3.38,
5.06,
7.59,
11.39,
17.09,
25.63,
38.44,
57.67,
86.50,
129.75,
194.62,
291.93,
437.89,
656.84,
985.26,
1477.89,
2216.84,
3325.26,
4987.89,
7481.83,
}
// Bucket returns the bucket where the received latency falls
func Bucket(latency int64) int | {
floatLatency := float64(latency) / 1000 // Convert to millisencods
index := 0
for index < len(latencyBuckets) && floatLatency > latencyBuckets[index] {
index++
}
if index == len(latencyBuckets) {
return index - 1
}
return index
} |
|
0001_initial.py | # Generated by Django 3.2.9 on 2021-11-03 13:00
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class | (migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| Migration |
pty_freebsd.go | package termios
import (
"fmt"
"syscall"
"unsafe"
)
func posix_openpt(oflag int) (fd uintptr, err error) {
// Copied from debian-golang-pty/pty_freebsd.go.
r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0)
fd = uintptr(r0)
if e1 != 0 |
return
}
func open_pty_master() (uintptr, error) {
return posix_openpt(syscall.O_NOCTTY | syscall.O_RDWR | syscall.O_CLOEXEC)
}
func Ptsname(fd uintptr) (string, error) {
var n uintptr
err := ioctl(fd, syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n)))
if err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}
func grantpt(fd uintptr) error {
var n uintptr
return ioctl(fd, syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n)))
}
func unlockpt(fd uintptr) error {
return nil
}
| {
err = e1
} |
fill.rs | use common_exception::Result;
use crate::arrays::builders::Utf8ArrayBuilder;
use crate::arrays::DataArray;
use crate::series::Series;
use crate::utils::NoNull;
use crate::DFBooleanArray;
use crate::DFListArray;
use crate::DFPrimitiveType;
use crate::DFUtf8Array;
// Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
#[derive(Copy, Clone, Debug)]
pub enum FillNoneStrategy {
/// previous value in array
Backward,
/// next value in array
Forward,
/// mean value of array
Mean,
/// minimal value in array
Min,
/// maximum value in array
Max,
/// replace with the value zero
Zero,
/// replace with the value one
One,
/// replace with the maximum value of that data type
MaxBound,
/// replace with the minimal value of that data type
MinBound,
}
/// Replace None values with various strategies
pub trait ArrayFillNone {
/// Replace None values with one of the following strategies:
/// * Forward fill (replace None with the previous value)
/// * Backward fill (replace None with the next value)
/// * Mean fill (replace None with the mean of the whole array)
/// * Min fill (replace None with the minimum of the whole array)
/// * Max fill (replace None with the maximum of the whole array)
fn fill_none(&self, strategy: FillNoneStrategy) -> Result<Self>
where Self: Sized;
}
/// Replace None values with a value
pub trait ArrayFillNoneValue<T> {
/// Replace None values with a give value `T`.
fn fill_none_with_value(&self, value: T) -> Result<Self>
where Self: Sized;
}
/// Fill a DataArray with one value.
pub trait ArrayFull<T> {
/// Create a DataArray with a single value.
fn full(value: T, length: usize) -> Self
where Self: std::marker::Sized;
}
pub trait ArrayFullNull {
fn full_null(_length: usize) -> Self
where Self: std::marker::Sized;
}
impl<T> ArrayFull<T::Native> for DataArray<T>
where T: DFPrimitiveType
{
fn full(value: T::Native, length: usize) -> Self
where T::Native: Copy {
(0..length)
.map(|_| value)
.collect::<NoNull<DataArray<T>>>()
.into_inner()
}
}
impl<T> ArrayFullNull for DataArray<T>
where T: DFPrimitiveType
{
fn full_null(length: usize) -> Self {
(0..length).map(|_| None).collect::<Self>()
}
}
impl ArrayFull<bool> for DFBooleanArray {
fn full(value: bool, length: usize) -> Self {
(0..length).map(|_| value).collect::<DFBooleanArray>()
}
}
impl ArrayFullNull for DFBooleanArray {
fn full_null(length: usize) -> Self {
(0..length).map(|_| None).collect::<Self>()
}
}
impl<'a> ArrayFull<&'a str> for DFUtf8Array {
fn full(value: &'a str, length: usize) -> Self {
let mut builder = Utf8ArrayBuilder::new(length, length * value.len());
for _ in 0..length {
builder.append_value(value);
}
builder.finish()
}
}
impl ArrayFullNull for DFUtf8Array {
fn full_null(length: usize) -> Self |
}
impl ArrayFull<&Series> for DFListArray {
fn full(_value: &Series, _length: usize) -> DFListArray {
todo!()
}
}
impl ArrayFullNull for DFListArray {
fn full_null(_length: usize) -> DFListArray {
todo!()
}
}
| {
(0..length)
.map::<Option<String>, _>(|_| None)
.collect::<Self>()
} |
test_process_voluntary_exit.py | from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.voluntary_exits import sign_voluntary_exit
def run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=True):
"""
Run ``process_voluntary_exit``, yielding:
- pre-state ('pre')
- voluntary_exit ('voluntary_exit')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
validator_index = signed_voluntary_exit.message.validator_index
yield 'pre', state
yield 'voluntary_exit', signed_voluntary_exit
if not valid:
expect_assertion_error(lambda: spec.process_voluntary_exit(state, signed_voluntary_exit))
yield 'post', None
return
pre_exit_epoch = state.validators[validator_index].exit_epoch
spec.process_voluntary_exit(state, signed_voluntary_exit)
yield 'post', state
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
@with_all_phases
@spec_state_test
def test_success(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert state.validators[validator_index].exit_epoch == spec.compute_activation_exit_epoch(current_epoch)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_signature(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch,
validator_index=validator_index,
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, 12345)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_success_exit_queue(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
# exit `MAX_EXITS_PER_EPOCH`
initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_validator_churn_limit(state)]
# Prepare a bunch of exits, based on the current state
exit_queue = []
for index in initial_indices:
privkey = pubkey_to_privkey[state.validators[index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=index), privkey)
exit_queue.append(signed_voluntary_exit)
# Now run all the exits
for voluntary_exit in exit_queue:
# the function yields data, but we are just interested in running it here, ignore yields.
for _ in run_voluntary_exit_processing(spec, state, voluntary_exit):
continue
# exit an additional validator
validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
# This is the interesting part of the test: on a pre-state with a full exit queue,
# when processing an additional exit, it results in an exit in a later epoch
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert (
state.validators[validator_index].exit_epoch ==
state.validators[initial_indices[0]].exit_epoch + 1
)
@with_all_phases
@spec_state_test
def test_default_exit_epoch_subsequent_exit(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
# Exit one validator prior to this new one
exited_index = spec.get_active_validator_indices(state, current_epoch)[-1]
state.validators[exited_index].exit_epoch = current_epoch - 1
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert state.validators[validator_index].exit_epoch == spec.compute_activation_exit_epoch(current_epoch)
@with_all_phases
@spec_state_test
def test_validator_exit_in_future(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch + 1,
validator_index=validator_index,
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_invalid_validator_index(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch,
validator_index=len(state.validators),
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_not_active(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
state.validators[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def | (spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
# but validator already has exited
state.validators[validator_index].exit_epoch = current_epoch + 2
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_not_active_long_enough(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
assert (
current_epoch - state.validators[validator_index].activation_epoch <
spec.config.SHARD_COMMITTEE_PERIOD
)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
| test_validator_already_exited |
CodeTools.py |
# from JumpScale.baselib.codeexecutor.CodeExecutor import CodeExecutor
import inspect
from JumpScale import j
from ClassBase import ClassBase, JSModelBase, JSRootModelBase
from TemplateEngineWrapper import TemplateEngineWrapper
from JumpScale.data.regex.RegexTools import RegexTools
from TextFileEditor import TextFileEditor
from WordReplacer import WordReplacer
# ujson.dumps does not support some arguments like separators, indent ...etc
def isPrimAttribute(obj, key):
if key[-1] == "s":
funcprop = "new_%s" % key[:-1]
else:
funcprop = "new_%s" % key
isprimtype = not hasattr(obj, funcprop)
return isprimtype, funcprop
class Struct:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class CodeTools:
def __init__(self):
self.__jslocation__ = "j.tools.code"
self._templateengine = None
# self.executor = CodeExecutor()
self._regex = None
self._wordreplacer = None
self._codemanager = None
self._texteditor = None
@property
def codemanager(self):
if self._codemanager is None:
from CodeManager import CodeManager
self._codemanager = CodeManager()
return self._codemanager
@property
def regex(self):
if self._regex is None:
self._regex = RegexTools()
return self._regex
@property
def templateengine(self):
if self._templateengine is None:
self._templateengine = TemplateEngineWrapper()
return self._templateengine
@property
def texteditor(self):
if self._texteditor is None:
self._texteditor = TextFileEditor()
return self._texteditor
@property
def wordreplacer(self):
if self._wordreplacer is None:
self._wordreplacer = WordReplacer()
return self._wordreplacer
def textToTitle(self, text, maxnrchars=60):
"""
try to create a title out of text, ignoring irrelevant words and making lower case and removing
not needed chars
"""
ignore = "for in yes no after up down the"
ignoreitems = ignore.split(" ")
keepchars = "abcdefghijklmnopqrstuvwxyz1234567890 "
out = ""
text = text.lower().strip()
for char in text:
if char in keepchars:
out += char
text = out
text = text.replace(" ", "")
text = text.replace(" ", "")
out = ""
nr = 0
for item in text.split(" "):
if item not in ignoreitems:
nr += len(item)
if nr < maxnrchars:
out += item + " "
if len(text.split(" ")) > 0:
text = out.strip()
if len(text) > maxnrchars:
text = text[:maxnrchars]
return text
def classInfoPrint(self, classs):
"""
print info like source code of class
"""
filepath, linenr, sourcecode = self.classInfoGet(classs)
print(("line:%s in path:%s" % (linenr, filepath)))
print(sourcecode)
def classInfoGet(self, classs):
"""
returns filepath,linenr,sourcecode
"""
code, nr = inspect.getsourcelines(classs.__class__)
code = "".join(code)
path = inspect.getsourcefile(classs.__class__)
return path, nr, code
def classEditGeany(self, classs):
"""
look for editor (uses geany) and then edit the file
"""
filepath, linenr, sourcecode = self.classInfoGet(classs)
j.sal.process.executeWithoutPipe("geany %s" % filepath)
def classGetBase(self):
return ClassBase
# def classGetAppserver6GreenletSchedule(self):
# return Appserver6GreenletScheduleBase
# def classGetAppserver6Greenlet(self):
# return Appserver6GreenletBase
# def classGetAppserver6GreenletTasklets(self):
# return Appserver6GreenletTaskletsBase
def dict2object(self, obj, data):
if obj is None:
return Struct(**data)
if hasattr(obj, "_dict2obj"):
return obj._dict2obj(data)
if isinstance(data, dict):
for key, value in list(data.items()):
# is for new obj functionname
objpropname = "%s" % key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
# is a real dict (not a dict as representation of an object)
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
raise j.exceptions.RuntimeError("not supported")
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(data[key], list):
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2object(newobj, valval)
else:
for valval, in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
# is a dict which represents another object
raise j.exceptions.RuntimeError("not supported, only 1 level deep objects")
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
def dict2JSModelobject(self, obj, data):
if isinstance(data, dict):
for key, value in list(data.items()):
# is for new obj functionname
objpropname = "_P_%s" % key if not key.startswith('_P_') else key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
# is a real dict (not a dict as representation of an object)
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valkey, valval in list(value.items()):
newobj = method(valkey)
self.dict2JSModelobject(newobj, valval)
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(value, list):
if key == '_meta':
# we do not duplicate meta
continue
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2JSModelobject(newobj, valval)
else:
for valval in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
# is a dict which represents another object
obj.__dict__[objpropname] = self.dict2JSModelobject(obj.__dict__[objpropname], value)
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
# def dict2object2(self,d):
# if isinstance(d, dict):
#n = {}
# for item in d:
# if isinstance(d[item], dict):
#n[item] = dict2obj(d[item])
# elif isinstance(d[item], (list, tuple)):
#n[item] = [dict2obj(elem) for elem in d[item]]
# else:
#n[item] = d[item]
# return type('obj_from_dict', (object,), n)
# else:
# return d
def object2dict4index(self, obj):
"""
convert object to a dict
only properties on first level are considered
and properties of basic types like int,str,float,bool,dict,list
ideal to index the basics of an object
"""
result = {}
def toStr(obj, possibleList=True):
if isinstance(obj, (str, int, float, bool)) or obj is None:
return str(obj)
elif possibleList == True and j.data.types.list.check(obj):
r = ""
for item in obj:
rr = toStr(obj, possibleList=False)
if rr != "":
r += "%s," % rr
r = r.rstrip(",")
return r
return ""
if isinstance(obj, ClassBase):
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
elif key[0] == "_":
continue
if j.data.types.dict.check(value):
for key2 in list(value.keys()):
r = toStr(value[key2])
if r != "":
result["%s.%s" (key, key2)] = r
else:
r = toStr(value)
if r != "":
result[key] = r
return result
def object2dict(self, obj, dieOnUnknown=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
if j.data.types.dict.check(obj):
return obj
data = {}
def todict(obj, data, ignoreKeys):
if isinstance(obj, dict):
value = {}
for key in list(obj.keys()):
if key in ignoreKeys:
continue
if ignoreUnderscoreKeys and key and key[0] == "_":
continue
value[key] = todict(obj[key], {}, ignoreKeys)
return value
elif isinstance(obj, (tuple, list)):
value = []
for item in obj:
value.append(todict(item, {}, ignoreKeys))
return value
elif isinstance(obj, str):
return obj.encode('utf8')
elif isinstance(obj, (int, str, float, bool)) or obj is None:
return obj
elif isinstance(obj, bytes) or obj is None:
return obj.decode('utf-8', 'ignore')
elif isinstance(obj, ClassBase):
if hasattr(obj, "_obj2dict"):
return obj._obj2dict()
else:
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
if key in ignoreKeys:
continue
elif ignoreUnderscoreKeys and key[0] == "_":
continue
data[key] = todict(value, {}, ignoreKeys)
return data
else:
#from JumpScale.core.Shell import ipshellDebug,ipshell
# print "DEBUG NOW Can only convert object to dict with properties basic types or inherited of ClassBase"
# ipshell()
if dieOnUnknown:
raise j.exceptions.RuntimeError(
"Can only convert object to dict with properties basic types or inherited of ClassBase")
try:
val = str(value)
except:
val = "__UNKNOWN__"
return val
out = todict(obj, data, ignoreKeys)
# print out
return out
def object2yaml(self, obj):
|
def object2json(self, obj, pretty=False, skiperrors=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
obj = self.object2dict(obj, dieOnUnknown=not skiperrors, ignoreKeys=ignoreKeys,
ignoreUnderscoreKeys=ignoreUnderscoreKeys)
if pretty:
return j.data.serializer.json.dumps(obj, indent=2, sort_keys=True)
else:
return j.data.serializer.json.dumps(obj)
def pprint(self, obj):
result = self.object2yaml(obj)
result = result.replace("!!python/unicode", "")
print(result)
def deIndent(self, content, level=1):
for i in range(0, level):
content = self._deIndent(content)
return content
def indent(self, content, level=1):
if not content:
return content
if content[-1] == "\n":
content = content[:-1]
lines = list()
for line in content.splitlines():
indent = " " * 4 * level
lines.append("%s%s\n" % (indent, line))
return "".join(lines)
def _deIndent(self, content):
# remove garbage & fix identation
content2 = ""
for line in content.split("\n"):
if line.strip() == "":
content2 += "\n"
else:
if line.find(" ") != 0:
raise j.exceptions.RuntimeError("identation error for %s." % content)
content2 += "%s\n" % line[4:]
return content2
| return j.data.serializer.yaml.dumps(self.object2dict(obj)) |
parser.rs | use crc16;
use std::cmp::Ordering;
use std::default::Default;
use std::io::{Read, Write};
use xml::reader::{EventReader, XmlEvent};
use quote::{Ident, Tokens};
#[derive(Debug, PartialEq, Clone, Default)]
pub struct MavProfile {
pub includes: Vec<String>,
pub messages: Vec<MavMessage>,
pub enums: Vec<MavEnum>,
}
impl MavProfile {
/// Go over all fields in the messages, and if you encounter an enum,
/// update this enum with information about whether it is a bitmask, and what
/// is the desired width of such.
fn update_enums(mut self) -> Self {
for msg in &self.messages {
for field in &msg.fields {
if let Some(ref enum_name) = field.enumtype {
// it is an enum
if let Some(ref dsp) = field.display {
// it is a bitmask
if dsp == "bitmask" {
// find the corresponding enum
for mut enm in &mut self.enums {
if enm.name == *enum_name {
// this is the right enum
enm.bitfield = Some(field.mavtype.rust_type());
}
}
}
}
}
}
}
self
}
//TODO verify this is no longer necessary since we're supporting both mavlink1 and mavlink2
// ///If we are not using Mavlink v2, remove messages with id's > 254
// fn update_messages(mut self) -> Self {
// //println!("Updating messages");
// let msgs = self.messages.into_iter().filter(
// |x| x.id <= 254).collect::<Vec<MavMessage>>();
// self.messages = msgs;
// self
// }
/// Simple header comment
fn emit_comments(&self) -> Ident {
Ident::from(format!(
"// This file was automatically generated, do not edit \n"
))
}
/// Emit rust messages
fn emit_msgs(&self) -> Vec<Tokens> {
self.messages
.iter()
.map(|d| d.emit_rust())
.collect::<Vec<Tokens>>()
}
/// Emit rust enus
fn emit_enums(&self) -> Vec<Tokens> {
self.enums
.iter()
.map(|d| {
d.emit_rust()
})
.collect::<Vec<Tokens>>()
}
/// Get list of original message names
fn emit_enum_names(&self) -> Vec<Tokens> {
self.messages
.iter()
.map(|msg| {
let name = Ident::from(msg.name.clone());
quote!(#name)
})
.collect::<Vec<Tokens>>()
}
/// Emit message names with "_DATA" at the end
fn emit_struct_names(&self) -> Vec<Tokens> {
self.messages
.iter()
.map(|msg| msg.emit_struct_name())
.collect::<Vec<Tokens>>()
}
/// A list of message IDs
fn emit_msg_ids(&self) -> Vec<Tokens> {
self.messages
.iter()
.map(|msg| {
let id = Ident::from(msg.id.to_string());
quote!(#id)
})
.collect::<Vec<Tokens>>()
}
/// CRC values needed for mavlink parsing
fn emit_msg_crc(&self) -> Vec<Tokens> {
self.messages
.iter()
.map(|msg| {
let crc = Ident::from(extra_crc(&msg).to_string());
quote!(#crc)
})
.collect::<Vec<Tokens>>()
}
fn emit_rust(&self) -> Tokens {
let comment = self.emit_comments();
let msgs = self.emit_msgs();
let enum_names = self.emit_enum_names();
let struct_names = self.emit_struct_names();
let enums = self.emit_enums();
let msg_ids = self.emit_msg_ids();
let msg_crc = self.emit_msg_crc();
let mav_message = self.emit_mav_message(enum_names.clone(), struct_names.clone());
let mav_message_parse =
self.emit_mav_message_parse(enum_names.clone(), struct_names.clone(), msg_ids.clone());
let mav_message_id = self.emit_mav_message_id(enum_names.clone(), msg_ids.clone());
let mav_message_serialize = self.emit_mav_message_serialize(enum_names);
//use id_width of u32 on both v1 and v2, and encode/decode appropriately,
//limiting to u8 on mavlink v1
quote!{
#comment
use bytes::{Buf, BufMut, Bytes, IntoBuf};
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use bitflags::bitflags;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#(#enums)*
#(#msgs)*
#[derive(Clone, PartialEq, Debug)]
#mav_message
impl MavMessage {
#mav_message_parse
#mav_message_id
#mav_message_serialize
pub fn extra_crc(id: u32) -> u8 {
match id {
#(#msg_ids => #msg_crc,)*
_ => 0,
}
}
}
}
}
fn emit_mav_message(&self, enums: Vec<Tokens>, structs: Vec<Tokens>) -> Tokens {
quote!{
pub enum MavMessage {
#(#enums(#structs)),*
}
}
}
fn emit_mav_message_parse(
&self,
enums: Vec<Tokens>,
structs: Vec<Tokens>,
ids: Vec<Tokens>,
) -> Tokens {
let id_width = Ident::from("u32");
quote!{
pub fn parse(version: MavlinkVersion, id: #id_width, payload: &[u8]) -> Option<MavMessage> {
match id {
#(#ids => Some(MavMessage::#enums(#structs::deser(version, payload).unwrap())),)*
_ => None,
}
}
}
}
fn emit_mav_message_id(&self, enums: Vec<Tokens>, ids: Vec<Tokens>) -> Tokens {
let id_width = Ident::from("u32");
quote!{
pub fn message_id(&self) -> #id_width {
match self {
#(MavMessage::#enums(..) => #ids,)*
}
}
}
}
fn emit_mav_message_serialize(&self, enums: Vec<Tokens>) -> Tokens {
quote!{
pub fn ser(&self) -> Vec<u8> {
match self {
#(&MavMessage::#enums(ref body) => body.ser(),)*
}
}
}
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct MavEnum {
pub name: String,
pub description: Option<String>,
pub entries: Vec<MavEnumEntry>,
/// If contains Some, the string represents the type witdh for bitflags
pub bitfield: Option<String>,
}
impl MavEnum {
fn has_enum_values(&self) -> bool {
self.entries.iter().all(|x| x.value.is_some())
}
fn emit_defs(&self) -> Vec<Tokens> {
let mut cnt = 0;
self.entries
.iter()
.map(|enum_entry| {
let name = Ident::from(enum_entry.name.clone());
let value;
if !self.has_enum_values() {
value = Ident::from(cnt.to_string());
cnt += 1;
} else {
value = Ident::from(enum_entry.value.unwrap().to_string());
};
if self.bitfield.is_some() {
quote!(const #name = #value;)
} else {
quote!(#name = #value,)
}
})
.collect::<Vec<Tokens>>()
}
fn emit_name(&self) -> Tokens {
let name = Ident::from(self.name.clone());
quote!(#name)
}
fn emit_rust(&self) -> Tokens {
let defs = self.emit_defs();
let default = Ident::from(self.entries[0].name.clone());
let enum_name = self.emit_name();
let enum_def;
if let Some(width) = self.bitfield.clone() {
let width = Ident::from(width);
enum_def = quote!{
bitflags!{
pub struct #enum_name: #width {
#(#defs)*
}
}
};
} else {
enum_def = quote!{
#[derive(Debug, Copy, Clone, PartialEq, FromPrimitive)]
pub enum #enum_name {
#(#defs)*
}
};
}
quote!{
#enum_def
impl Default for #enum_name {
fn default() -> Self {
#enum_name::#default
}
}
}
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct MavEnumEntry {
pub value: Option<i32>,
pub name: String,
pub description: Option<String>,
pub params: Option<Vec<String>>,
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct MavMessage {
pub id: u32,
pub name: String,
pub description: Option<String>,
pub fields: Vec<MavField>,
}
impl MavMessage {
/// Return Token of "MESSAGE_NAME_DATA
/// for mavlink struct data
fn emit_struct_name(&self) -> Tokens {
let name = Ident::from(format!("{}_DATA", self.name));
quote!(#name)
}
fn emit_name_types(&self) -> (Vec<Tokens>, usize) {
let mut encoded_payload_len: usize = 0;
let field_toks = self.fields
.iter()
.map(|field| {
let nametype = field.emit_name_type();
encoded_payload_len += field.mavtype.len();
#[cfg(feature = "emit-description")]
let description = self.emit_description();
#[cfg(not(feature = "emit-description"))]
let description = Ident::from("");
quote!{
#description
#nametype
}
})
.collect::<Vec<Tokens>>();
(field_toks, encoded_payload_len)
}
/// Generate description for the given message
#[cfg(feature = "emit-description")]
fn emit_description(&self) -> Tokens {
let mut desc = String::from(format!("\n/// id: {}\n", self.id));
if let Some(val) = self.description.clone() {
desc = desc + &format!("/// {}.\n",val);
}
let desc = Ident::from(desc);
quote!(#desc)
}
fn emit_serialize_vars(&self) -> Tokens {
let ser_vars = self.fields.iter()
.map(|f| {
f.rust_writer()
}).collect::<Vec<Tokens>>();
quote!{
let mut _tmp = Vec::new();
#(#ser_vars)*
_tmp
}
}
fn emit_deserialize_vars(&self) -> Tokens {
let deser_vars = self.fields.iter()
.map(|f| {
f.rust_reader()
}).collect::<Vec<Tokens>>();
let encoded_len_name = Ident::from(format!("{}_DATA::ENCODED_LEN", self.name));
if deser_vars.is_empty() {
// struct has no fields
quote!{
Some(Self::default())
}
} else {
quote!{
let avail_len = _input.len();
//fast zero copy
let mut buf = Bytes::from(_input).into_buf();
// handle payload length truncuation due to empty fields
if avail_len < #encoded_len_name {
//copy available bytes into an oversized buffer filled with zeros
let mut payload_buf = [0; #encoded_len_name];
payload_buf[0..avail_len].copy_from_slice(_input);
buf = Bytes::from(&payload_buf[..]).into_buf();
}
let mut _struct = Self::default();
#(#deser_vars)*
Some(_struct)
}
}
}
fn emit_rust(&self) -> Tokens {
let msg_name = self.emit_struct_name();
let (name_types, msg_encoded_len) = self.emit_name_types();
let deser_vars = self.emit_deserialize_vars();
let serialize_vars = self.emit_serialize_vars();
#[cfg(feature = "emit-description")]
let description = self.emit_description();
#[cfg(not(feature = "emit-description"))]
let description = Ident::from("");
quote!{
#description
#[derive(Debug, Clone, PartialEq, Default)]
pub struct #msg_name {
#(#name_types)*
}
impl #msg_name {
pub const ENCODED_LEN: usize = #msg_encoded_len;
pub fn deser(version: MavlinkVersion, _input: &[u8]) -> Option<Self> {
#deser_vars
}
pub fn ser(&self) -> Vec<u8> {
#serialize_vars
}
}
}
}
}
#[derive(Debug, PartialEq, Clone, Default)]
pub struct MavField {
pub mavtype: MavType,
pub name: String,
pub description: Option<String>,
pub enumtype: Option<String>,
pub display: Option<String>,
}
impl MavField {
/// Emit rust name of a given field
fn emit_name(&self) -> Tokens {
let name = Ident::from(self.name.clone());
quote!(#name)
}
/// Emit rust type of the field
fn emit_type(&self) -> Tokens {
let mavtype;
if let Some(ref enumname) = self.enumtype {
mavtype = Ident::from(enumname.clone());
} else {
mavtype = Ident::from(self.mavtype.rust_type());
}
quote!(#mavtype)
}
/// Generate description for the given field
#[cfg(feature = "emit-description")]
fn emit_description(&self) -> Tokens {
let mut desc = Vec::new();
if let Some(val) = self.description.clone() {
desc.push(format!("\n/// {}.",val));
}
desc.push("\n".to_string());
let desc: String = desc.iter().map(|s| s.to_string()).collect();
let desc = Ident::from(desc);
quote!(#desc)
}
/// Combine rust name and type of a given field
fn emit_name_type(&self) -> Tokens {
let name = self.emit_name();
let fieldtype = self.emit_type();
quote!(pub #name: #fieldtype,)
}
/// Emit writer
fn rust_writer(&self) -> Tokens {
let mut name = "self.".to_string() + &self.name.clone();
if let Some(_) = &self.enumtype {
if let Some(dsp) = &self.display {
// potentially a bitflag
if dsp == "bitmask" {
// it is a bitflag
name += ".bits()";
} else {
panic!("Display option not implemented");
}
} else {
// an enum, have to use "*foo as u8" cast
name += " as ";
name += &self.mavtype.rust_type();
}
}
let name = Ident::from(name);
let buf = Ident::from("_tmp");
self.mavtype.rust_writer(name, buf)
}
/// Emit reader
fn rust_reader(&self) -> Tokens {
let name = Ident::from("_struct.".to_string() + &self.name.clone());
let buf = Ident::from("buf");
if let Some(enum_name) = &self.enumtype {
if let Some(dsp) = &self.display {
if dsp == "bitmask" {
// bitflags
let tmp = self.mavtype.rust_reader(Ident::from("let tmp"), buf.clone());
let enum_name = Ident::from(enum_name.clone());
quote!{
#tmp
#name = #enum_name::from_bits(tmp).expect("Unexpected enum value.");
}
} else {
panic!("Display option not implemented");
}
} else {
// handle enum by FromPrimitive
let tmp = self.mavtype.rust_reader(Ident::from("let tmp"), buf.clone());
let val = Ident::from("from_".to_string() + &self.mavtype.rust_type());
quote!(
#tmp
#name = FromPrimitive::#val(tmp).expect(&format!("Unexpected enum value {}.",tmp));
)
}
} else {
self.mavtype.rust_reader(name, buf)
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum MavType {
UInt8MavlinkVersion,
UInt8,
UInt16,
UInt32,
UInt64,
Int8,
Int16,
Int32,
Int64,
Char,
Float,
Double,
Array(Box<MavType>, usize),
}
impl Default for MavType {
fn | () -> MavType {
MavType::UInt8
}
}
impl MavType {
fn parse_type(s: &str) -> Option<MavType> {
use self::MavType::*;
match s {
"uint8_t_mavlink_version" => Some(UInt8MavlinkVersion),
"uint8_t" => Some(UInt8),
"uint16_t" => Some(UInt16),
"uint32_t" => Some(UInt32),
"uint64_t" => Some(UInt64),
"int8_t" => Some(Int8),
"int16_t" => Some(Int16),
"int32_t" => Some(Int32),
"int64_t" => Some(Int64),
"char" => Some(Char),
"float" => Some(Float),
"Double" => Some(Double),
_ => {
if s.ends_with("]") {
let start = s.find("[").unwrap();
let size = s[start + 1..(s.len() - 1)].parse::<usize>().unwrap();
let mtype = MavType::parse_type(&s[0..start]).unwrap();
Some(Array(Box::new(mtype), size))
} else {
panic!("UNHANDLED {:?}", s);
}
}
}
}
/// Emit reader of a given type
pub fn rust_reader(&self, val: Ident, buf: Ident) -> Tokens {
use self::MavType::*;
match self.clone() {
Char => quote!{#val = #buf.get_u8() as char;},
UInt8 => quote!{#val = #buf.get_u8();},
UInt16 => quote!{#val = #buf.get_u16_le();},
UInt32 => quote!{#val = #buf.get_u32_le();},
UInt64 => quote!{#val = #buf.get_u64_le();},
UInt8MavlinkVersion => quote!{#val = #buf.get_u8();},
Int8 => quote!{#val = #buf.get_i8();},
Int16 => quote!{#val = #buf.get_i16_le();},
Int32 => quote!{#val = #buf.get_i32_le();},
Int64 => quote!{#val = #buf.get_i64_le();},
Float => quote!{#val = #buf.get_f32_le();},
Double => quote!{#val = #buf.get_f64_le();},
Array(t, size) => {
if size > 32 {
// it is a vector
let r = t.rust_reader(Ident::from("let val"), buf.clone());
quote!{
for _ in 0..#size {
#r
#val.push(val);
}
}
} else {
// handle as a slice
let r = t.rust_reader(Ident::from("let val"), buf.clone());
quote!{
for idx in 0..#val.len() {
#r
#val[idx] = val;
}
}
}
}
}
}
/// Emit writer of a given type
pub fn rust_writer(&self, val: Ident, buf: Ident) -> Tokens {
use self::MavType::*;
match self.clone() {
UInt8MavlinkVersion => quote!{#buf.put_u8(#val);},
UInt8 => quote!{#buf.put_u8(#val);},
Char => quote!{#buf.put_u8(#val as u8);},
UInt16 => quote!{#buf.put_u16_le(#val);},
UInt32 => quote!{#buf.put_u32_le(#val);},
Int8 => quote!{#buf.put_i8(#val);},
Int16 => quote!{#buf.put_i16_le(#val);},
Int32 => quote!{#buf.put_i32_le(#val);},
Float => quote!{#buf.put_f32_le(#val);},
UInt64 => quote!{#buf.put_u64_le(#val);},
Int64 => quote!{#buf.put_i64_le(#val);},
Double => quote!{#buf.put_f64_le(#val);},
Array(t,_size) => {
let w = t.rust_writer(Ident::from("*val"), buf.clone());
quote!{
#buf.put_u8(#val.len() as u8);
for val in &#val {
#w
}
}
},
}
}
/// Size of a given Mavtype
fn len(&self) -> usize {
use self::MavType::*;
match self.clone() {
UInt8MavlinkVersion | UInt8 | Int8 | Char => 1,
UInt16 | Int16 => 2,
UInt32 | Int32 | Float => 4,
UInt64 | Int64 | Double => 8,
Array(t, size) => t.len() * size,
}
}
/// Used for ordering of types
fn order_len(&self) -> usize {
use self::MavType::*;
match self.clone() {
UInt8MavlinkVersion | UInt8 | Int8 | Char => 1,
UInt16 | Int16 => 2,
UInt32 | Int32 | Float => 4,
UInt64 | Int64 | Double => 8,
Array(t, _) => t.len(),
}
}
/// Used for crc calculation
pub fn primitive_type(&self) -> String {
use self::MavType::*;
match self.clone() {
UInt8MavlinkVersion => "uint8_t".into(),
UInt8 => "uint8_t".into(),
Int8 => "int8_t".into(),
Char => "char".into(),
UInt16 => "uint16_t".into(),
Int16 => "int16_t".into(),
UInt32 => "uint32_t".into(),
Int32 => "int32_t".into(),
Float => "float".into(),
UInt64 => "uint64_t".into(),
Int64 => "int64_t".into(),
Double => "double".into(),
Array(t, _) => t.primitive_type(),
}
}
/// Return rust equivalent of a given Mavtype
/// Used for generating struct fields.
pub fn rust_type(&self) -> String {
use self::MavType::*;
match self.clone() {
UInt8 | UInt8MavlinkVersion => "u8".into(),
Int8 => "i8".into(),
Char => "char".into(),
UInt16 => "u16".into(),
Int16 => "i16".into(),
UInt32 => "u32".into(),
Int32 => "i32".into(),
Float => "f32".into(),
UInt64 => "u64".into(),
Int64 => "i64".into(),
Double => "f64".into(),
Array(t, size) => {
if size > 32 {
// we have to use a vector to make our lives easier
format!("Vec<{}> /* {} elements */", t.rust_type(), size)
} else {
// we can use a slice, as Rust derives lot of thinsg for slices <= 32 elements
format!("[{};{}]", t.rust_type(), size)
}
},
}
}
/// Compare two MavTypes
pub fn compare(&self, other: &Self) -> Ordering {
let len = self.order_len();
(-(len as isize)).cmp(&(-(other.order_len() as isize)))
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum MavXmlElement {
Version,
Mavlink,
Include,
Enums,
Enum,
Entry,
Description,
Param,
Messages,
Message,
Field,
Deprecated,
Wip,
Extensions,
}
fn identify_element(s: &str) -> Option<MavXmlElement> {
use self::MavXmlElement::*;
match s {
"version" => Some(Version),
"mavlink" => Some(Mavlink),
"include" => Some(Include),
"enums" => Some(Enums),
"enum" => Some(Enum),
"entry" => Some(Entry),
"description" => Some(Description),
"param" => Some(Param),
"messages" => Some(Messages),
"message" => Some(Message),
"field" => Some(Field),
"deprecated" => Some(Deprecated),
"wip" => Some(Wip),
"extensions" => Some(Extensions),
_ => None,
}
}
fn is_valid_parent(p: Option<MavXmlElement>, s: MavXmlElement) -> bool {
use self::MavXmlElement::*;
match s {
Version => p == Some(Mavlink),
Mavlink => p == None,
Include => p == Some(Mavlink),
Enums => p == Some(Mavlink),
Enum => p == Some(Enums),
Entry => p == Some(Enum),
Description => p == Some(Entry) || p == Some(Message) || p == Some(Enum),
Param => p == Some(Entry),
Messages => p == Some(Mavlink),
Message => p == Some(Messages),
Field => p == Some(Message),
Deprecated => p == Some(Entry) || p == Some(Message) || p == Some(Enum),
Wip => p == Some(Entry) || p == Some(Message) || p == Some(Enum),
Extensions => p == Some(Message),
}
}
pub fn parse_profile(file: &mut Read) -> MavProfile {
let mut stack: Vec<MavXmlElement> = vec![];
let mut profile = MavProfile {
includes: vec![],
messages: vec![],
enums: vec![],
};
let mut field = MavField::default();
let mut message = MavMessage::default();
let mut mavenum = MavEnum::default();
let mut entry = MavEnumEntry::default();
let mut paramid: Option<usize> = None;
let parser = EventReader::new(file);
for e in parser {
match e {
Ok(XmlEvent::StartElement {
name,
attributes: attrs,
..
}) => {
let id = match identify_element(&name.to_string()) {
None => {
panic!("unexpected element {:?}", name);
}
Some(kind) => kind,
};
if !is_valid_parent(
match stack.last().clone() {
Some(arg) => Some(arg.clone()),
None => None,
},
id.clone(),
) {
panic!("not valid parent {:?} of {:?}", stack.last(), id);
}
match id {
MavXmlElement::Message => {
message = Default::default();
}
MavXmlElement::Field => {
field = Default::default();
}
MavXmlElement::Enum => {
mavenum = Default::default();
}
MavXmlElement::Entry => {
entry = Default::default();
}
MavXmlElement::Param => {
paramid = None;
}
_ => (),
}
stack.push(id);
for attr in attrs {
match stack.last() {
Some(&MavXmlElement::Enum) => match attr.name.local_name.clone().as_ref() {
"name" => {
mavenum.name =
attr.value
.clone()
.split("_")
.map(|x| x.to_lowercase())
.map(|x| {
let mut v: Vec<char> = x.chars().collect();
v[0] = v[0].to_uppercase().nth(0).unwrap();
v.into_iter().collect()
})
.collect::<Vec<String>>()
.join("");
//mavenum.name = attr.value.clone();
}
_ => (),
},
Some(&MavXmlElement::Entry) => {
match attr.name.local_name.clone().as_ref() {
"name" => {
entry.name = attr.value.clone();
}
"value" => {
entry.value = Some(attr.value.parse::<i32>().unwrap());
}
_ => (),
}
}
Some(&MavXmlElement::Message) => {
match attr.name.local_name.clone().as_ref() {
"name" => {
/*message.name = attr
.value
.clone()
.split("_")
.map(|x| x.to_lowercase())
.map(|x| {
let mut v: Vec<char> = x.chars().collect();
v[0] = v[0].to_uppercase().nth(0).unwrap();
v.into_iter().collect()
})
.collect::<Vec<String>>()
.join("");
*/
message.name = attr.value.clone();
}
"id" => {
//message.id = attr.value.parse::<u8>().unwrap();
message.id = attr.value.parse::<u32>().unwrap();
}
_ => (),
}
}
Some(&MavXmlElement::Field) => {
match attr.name.local_name.clone().as_ref() {
"name" => {
field.name = attr.value.clone();
if field.name == "type" {
field.name = "mavtype".to_string();
}
}
"type" => {
field.mavtype = MavType::parse_type(&attr.value).unwrap();
}
"enum" => {
field.enumtype = Some(
attr.value
.clone()
.split("_")
.map(|x| x.to_lowercase())
.map(|x| {
let mut v: Vec<char> = x.chars().collect();
v[0] = v[0].to_uppercase().nth(0).unwrap();
v.into_iter().collect()
})
.collect::<Vec<String>>()
.join(""),
);
//field.enumtype = Some(attr.value.clone());
}
"display" => {
field.display = Some(attr.value);
}
_ => (),
}
}
Some(&MavXmlElement::Param) => {
if let None = entry.params {
entry.params = Some(vec![]);
}
match attr.name.local_name.clone().as_ref() {
"index" => {
paramid = Some(attr.value.parse::<usize>().unwrap());
}
_ => (),
}
}
_ => (),
}
}
}
Ok(XmlEvent::Characters(s)) => {
use self::MavXmlElement::*;
match (stack.last(), stack.get(stack.len() - 2)) {
(Some(&Description), Some(&Message)) => {
message.description = Some(s.replace("\n", " "));
}
(Some(&Field), Some(&Message)) => {
field.description = Some(s.replace("\n", " "));
}
(Some(&Description), Some(&Enum)) => {
mavenum.description = Some(s.replace("\n", " "));
}
(Some(&Description), Some(&Entry)) => {
entry.description = Some(s.replace("\n", " "));
}
(Some(&Param), Some(&Entry)) => {
if let Some(ref mut params) = entry.params {
params.insert(paramid.unwrap() - 1, s);
}
}
(Some(&Include), Some(&Mavlink)) => {
println!("TODO: include {:?}", s);
}
(Some(&Version), Some(&Mavlink)) => {
println!("TODO: version {:?}", s);
}
(Some(Deprecated), _) => {
println!("TODO: deprecated {:?}", s);
}
data => {
panic!("unexpected text data {:?} reading {:?}", data, s);
}
}
}
Ok(XmlEvent::EndElement { .. }) => {
match stack.last() {
Some(&MavXmlElement::Field) => message.fields.push(field.clone()),
Some(&MavXmlElement::Entry) => {
mavenum.entries.push(entry.clone());
}
Some(&MavXmlElement::Message) => {
// println!("message: {:?}", message);
let mut msg = message.clone();
msg.fields.sort_by(|a, b| a.mavtype.compare(&b.mavtype));
profile.messages.push(msg);
}
Some(&MavXmlElement::Enum) => {
profile.enums.push(mavenum.clone());
}
_ => (),
}
stack.pop();
// println!("{}-{}", indent(depth), name);
}
Err(e) => {
println!("Error: {}", e);
break;
}
_ => {}
}
}
//let profile = profile.update_messages(); //TODO verify no longer needed
profile.update_enums()
}
/// Generate protobuf represenation of mavlink message set
/// Generate rust representation of mavlink message set with appropriate conversion methods
pub fn generate<R: Read, W: Write>(input: &mut R, output_rust: &mut W) {
let profile = parse_profile(input);
// rust file
let rust_tokens = profile.emit_rust();
//writeln!(output_rust, "{}", rust_tokens).unwrap();
let rust_src = rust_tokens.into_string();
let mut cfg = rustfmt::config::Config::default();
cfg.set().write_mode(rustfmt::config::WriteMode::Display);
rustfmt::format_input(rustfmt::Input::Text(rust_src), &cfg, Some(output_rust)).unwrap();
}
/// CRC operates over names of the message and names of its fields
/// Hence we have to preserve the original uppercase names delimited with an underscore
/// For field names, we replace "type" with "mavtype" to make it rust compatible (this is
/// needed for generating sensible rust code), but for calculating crc function we have to
/// use the original name "type"
pub fn extra_crc(msg: &MavMessage) -> u8 {
// calculate a 8-bit checksum of the key fields of a message, so we
// can detect incompatible XML changes
let mut crc = crc16::State::<crc16::MCRF4XX>::new();
crc.update(msg.name.as_bytes());
crc.update(" ".as_bytes());
let mut f = msg.fields.clone();
f.sort_by(|a, b| a.mavtype.compare(&b.mavtype));
for field in &f {
crc.update(field.mavtype.primitive_type().as_bytes());
crc.update(" ".as_bytes());
if field.name == "mavtype" {
crc.update("type".as_bytes());
} else {
crc.update(field.name.as_bytes());
}
crc.update(" ".as_bytes());
if let MavType::Array(_, size) = field.mavtype {
crc.update(&[size as u8]);
}
}
let crcval = crc.get();
((crcval & 0xFF) ^ (crcval >> 8)) as u8
}
| default |
prism-javastacktrace.min.js | /// BareSpecifier=prismjs/components/prism-javastacktrace.min | Prism.languages.javastacktrace = { summary: { pattern: /^[\t ]*(?:(?:Caused by:|Suppressed:|Exception in thread "[^"]*")[\t ]+)?[\w$.]+(?:\:.*)?$/m, inside: { keyword: { pattern: /^(\s*)(?:(?:Caused by|Suppressed)(?=:)|Exception in thread)/m, lookbehind: !0 }, string: { pattern: /^(\s*)"[^"]*"/, lookbehind: !0 }, exceptions: { pattern: /^(:?\s*)[\w$.]+(?=:|$)/, lookbehind: !0, inside: { "class-name": /[\w$]+(?=$|:)/, namespace: /[a-z]\w*/, punctuation: /[.:]/ } }, message: { pattern: /(:\s*)\S.*/, lookbehind: !0, alias: "string" }, punctuation: /[:]/ } }, "stack-frame": { pattern: /^[\t ]*at (?:[\w$./]|@[\w$.+-]*\/)+(?:<init>)?\([^()]*\)/m, inside: { keyword: { pattern: /^(\s*)at(?= )/, lookbehind: !0 }, source: [{ pattern: /(\()\w+\.\w+:\d+(?=\))/, lookbehind: !0, inside: { file: /^\w+\.\w+/, punctuation: /:/, "line-number": { pattern: /\d+/, alias: "number" } } }, { pattern: /(\()[^()]*(?=\))/, lookbehind: !0, inside: { keyword: /^(?:Unknown Source|Native Method)$/ } }], "class-name": /[\w$]+(?=\.(?:<init>|[\w$]+)\()/, function: /(?:<init>|[\w$]+)(?=\()/, "class-loader": { pattern: /(\s)[a-z]\w*(?:\.[a-z]\w*)*(?=\/[\w@$.]*\/)/, lookbehind: !0, alias: "namespace", inside: { punctuation: /\./ } }, module: { pattern: /([\s/])[a-z]\w*(?:\.[a-z]\w*)*(?:@[\w$.+-]*)?(?=\/)/, lookbehind: !0, inside: { version: { pattern: /(@)[\s\S]+/, lookbehind: !0, alias: "number" }, punctuation: /[@.]/ } }, namespace: { pattern: /(?:[a-z]\w*\.)+/, inside: { punctuation: /\./ } }, punctuation: /[()/.]/ } }, more: { pattern: /^[\t ]*\.{3} \d+ [a-z]+(?: [a-z]+)*/m, inside: { punctuation: /\.{3}/, number: /\d+/, keyword: /\b[a-z]+(?: [a-z]+)*\b/ } } }; |
|
searchJeuxDeMots.py | #!/usr/sfw/bin/python
# -*- coding: utf-8 -*-
#C:\python27\python.exe C:\Dropbox\Work\2012ExpressionsComposees\CreateGraph.py
import sys, os, re, string, time
from math import *
#------------------------------
# Chargement des paramètres
#------------------------------
args={}
i=1;
selectedRelations = {}
selectedRelations[6] = "r_isa"
selectedRelations[9] = "r_has_part"
selectedRelations[16] = "r_instr"
selectedRelations[17] = "r_carac"
selectedRelations[23] = "r_carac-1"
selectedRelations[15] = "r_lieu"
selectedRelations[24] = "r_agent-1"
selectedRelations[26] = "r_patient-1"
selectedRelations[41] = "r_conseq"
selectedRelations[53] = "r_make"
inputFolder = os.path.abspath(os.path.dirname(sys.argv[0]))
# Addess of the tagged text containing (almost) all text files of the Hackathon:
inputTaggedTexts = inputFolder + "\\tagged.txt"
# Address of the JeuxDeMots data file
# huge one :
#inputJeuxDeMots = inputFolder + "\\09032017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# big one :
#inputJeuxDeMots = inputFolder + "\\06252017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# small one :
inputJeuxDeMots = inputFolder + "\\08152011-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
letters = {}
letters["a"] = 1
letters["b"] = 1
letters["c"] = 1
letters["d"] = 1
letters["e"] = 1
letters["f"] = 1
letters["g"] = 1
letters["h"] = 1
letters["i"] = 1
letters["j"] = 1
letters["k"] = 1
letters["l"] = 1
letters["m"] = 1
letters["n"] = 1
letters["o"] = 1
letters["p"] = 1
letters["q"] = 1
letters["r"] = 1
letters["s"] = 1
letters["t"] = 1
letters["u"] = 1
letters["v"] = 1
letters["w"] = 1
letters["x"] = 1
letters["y"] = 1
letters["z"] = 1
replacements = {}
replacements["æ"] = "ae"
replacements["à"] = "a"
replacements["á"] = "a"
replacements["á"] = "a"
replacements["ã"] = "a"
replacements["ä"] = "a"
replacements["â"] = "a"
replacements["ç"] = "c"
replacements["é"] = "e"
replacements["è"] = "e"
replacements["ë"] = "e"
replacements["ê"] = "e"
replacements["ï"] = "i"
replacements["î"] = "i"
replacements["ì"] = "i"
replacements["ñ"] = "n"
replacements["ô"] = "o"
replacements["ö"] = "o"
replacements["ó"] = "o"
replacements["œ"] = "oe"
replacements["ü"] = "u"
replacements["ù"] = "u"
replacements["ú"] = "u"
def removeAccent(word, replacements):
for letter in replacements:
word = word.replace(letter, replacements[letter])
return word
def readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters):
allWords = {}
i = 0
# Associate all word indices with words in a dictionary
try :
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# only take words with t=1 (real words)
res = re.search("eid=([0-9]*).n=.(.+)..t=1.w=([0-9]*).*",line)
if res:
id = res.group(1)
word = res.group(2)
# only take words whose first character is a letter
firstLetter = word[0].lower()
weight = int(res.group(3))
if firstLetter in letters or firstLetter in replacements:
allWords[id] = word
except ValueError:
print(str(ValueError))
pass
# Create a dictionary of the neighborhoods of all words according to the relations in selectedRelations
if 0 == 0:
i = 0
nbRelations = 0
neighbors = {}
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# extract the edges of the graph, including type and weight
res = re.search("rid=([0-9]*).n1=([0-9]*).n2=([0-9]*).t=([0-9]*).w=([0-9]+).*",line)
if res:
try :
id1 = res.group(2)
id2 = res.group(3)
type = int(res.group(4))
weight = int(res.group(5))
edgeInfo = []
edgeInfo.append(type)
edgeInfo.append(weight)
# if the relation has positive weight, is of one of the expected types
# and links two indexed words, we memorize it by saving its weight and type in a dict of dict
if (weight>0) and (type in selectedRelations) and (id1 in allWords) and (id2 in allWords):
firstWord = allWords[id1]
secondWord = allWords[id2]
if firstWord not in neighbors:
neighbors[firstWord] = {}
neighbors[firstWord][secondWord] = edgeInfo
nbRelations += 1
#print(str(nbRelations) + "relations")
except ValueError:
print(str(ValueError) + line)
pass
print(str(nbRelations) + "relations")
# Extract all sentences of the tagged text, then check which words are indexed (themselves or their lemma) in JeuxDeMots
# and are in relation in JeuxDeMots
sentence = []
results = []
sentenceString = ""
for line in open(inputTaggedTexts,"r"):
res = re.search("([^;]+);([^;]+);([^;]+)",line)
if res:
token = res.group(1)
lemma = res.group(2)
pos = res.group(3)
position = []
position.append(token)
position.append(lemma)
# if the sentence is finished:
if token[0] == token[0].upper():
# check for each pair of token if it is in the dict of relations of JeuxDeMots
for loc1 in sentence:
for loc2 in sentence:
if not (loc1 == loc2):
word1 = ""
word2 = ""
if (loc1[0] in neighbors and loc2[0] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[0]
if (loc1[1] in neighbors and loc2[0] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[0]
if (loc1[0] in neighbors and loc2[1] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[1]
if (loc1[1] in neighbors and loc2[1] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[1]
if len(word1) > 0:
result = []
#print(word1+" found! ")
result.append(word1)
result.append(word2)
result.append(selectedRelations[neighbors[word1][word2][0]])
result.append(sentenceString) | sentence = []
sentenceString = ""
if position[0] in neighbors or position[1] in neighbors :
sentence.append(position)
sentenceString += token+" "
outputFile = open(inputTaggedTexts+".output.txt","w")
for result in results:
for element in result:
outputFile.writelines(element+";")
outputFile.writelines("\n")
outputFile.close()
readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters) | results.append(result) |
test_utils.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import json
import xml.etree.ElementTree
from youtube_dlc.utils import (
age_restricted,
args_to_str,
encode_base_n,
caesar,
clean_html,
clean_podcast_url,
date_from_str,
DateRange,
detect_exe_version,
determine_ext,
dict_get,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
extract_attributes,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
get_element_by_class,
get_element_by_attribute,
get_elements_by_class,
get_elements_by_attribute,
InAdvancePagedList,
int_or_none,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_age_limit,
parse_duration,
parse_filesize,
parse_count,
parse_iso8601,
parse_resolution,
parse_bitrate,
pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
expand_path,
prepend_extension,
replace_extension,
remove_start,
remove_end,
remove_quotes,
rot47,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
url_or_none,
base_url,
urljoin,
urlencode_postdata,
urshift,
update_url_query,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
parse_codecs,
)
from youtube_dlc.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_os_name,
compat_setenv,
compat_urlparse,
compat_parse_qs,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
def test_expand_path(self):
def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
compat_setenv('youtube_dlc_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('youtube_dlc_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('youtube_dlc_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year'))
self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), Non | 56)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
# Python 3 has no long
if sys.version_info < (3, 0):
eval('self.assertEqual(str_to_int(123456L), 123456)')
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': '[email protected]', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
def query_dict(url):
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
# Just drop ! prefix for now though this results in a wrong value
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1.1kk views'), 1100000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:02,080 --> 00:00:05,839
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,839
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,839 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,359
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('youtube-dlc'), r'J@FEF36\5=4')
self.assertEqual(rot47('YOUTUBE-DLC'), r'*~&%&qt\s{r')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
def test_get_element_by_class(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_by_attribute(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = '''
<div itemprop="author" itemscope>foo</div>
'''
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_elements_by_class(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
if __name__ == '__main__':
unittest.main()
| e)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 1234 |
lib.rs | //! A library provides an interface for a unix [PTY/TTY](https://en.wikipedia.org/wiki/Pseudoterminal).
//!
//! It aims to work on all major Unix variants.
//!
//! The library was developed as a backend for a https://github.com/zhiburt/expectrl.
//! If you're interested in a high level operations may you'd better take a look at `zhiburt/expectrl`.
//!
//! ## Usage
//!
//! ```rust
//! use ptyprocess::PtyProcess;
//! use std::process::Command;
//! use std::io::{BufRead, Write, BufReader};
//!
//! // spawn a cat process
//! let mut process = PtyProcess::spawn(Command::new("cat")).expect("failed to spawn a process");
//!
//! // create a communication stream
//! let mut stream = process.get_raw_handle().expect("failed to create a stream");
//!
//! // send a message to process
//! writeln!(stream, "Hello cat").expect("failed to write to a stream");
//!
//! // read a line from the stream
//! let mut reader = BufReader::new(stream);
//! let mut buf = String::new();
//! reader.read_line(&mut buf).expect("failed to read a process output");
//!
//! println!("line={}", buf);
//!
//! // stop the process
//! assert!(process.exit(true).expect("failed to stop the process"))
//! ```
pub mod stream;
pub use nix::sys::signal::Signal;
pub use nix::sys::wait::WaitStatus;
pub use nix::Error;
#[cfg(feature = "async")]
use futures_lite::AsyncWriteExt;
use nix::errno::{self, Errno};
use nix::fcntl::{fcntl, open, FcntlArg, FdFlag, OFlag};
use nix::libc::{self, winsize, STDERR_FILENO, STDIN_FILENO, STDOUT_FILENO};
use nix::pty::PtyMaster;
use nix::pty::{grantpt, posix_openpt, unlockpt};
use nix::sys::stat::Mode;
use nix::sys::wait::{self, waitpid};
use nix::sys::{signal, termios};
use nix::unistd::{
self, close, dup, dup2, fork, isatty, pipe, setsid, sysconf, write, ForkResult, Pid, SysconfVar,
};
use nix::{ioctl_write_ptr_bad, Result};
use signal::Signal::SIGKILL;
use std::fs::File;
use std::os::unix::prelude::{AsRawFd, CommandExt, FromRawFd, RawFd};
use std::process::{self, Command};
use std::thread;
use std::time::{self, Duration};
use stream::Stream;
use termios::SpecialCharacterIndices;
const DEFAULT_TERM_COLS: u16 = 80;
const DEFAULT_TERM_ROWS: u16 = 24;
const DEFAULT_VEOF_CHAR: u8 = 0x4; // ^D
const DEFAULT_INTR_CHAR: u8 = 0x3; // ^C
const DEFAULT_TERMINATE_DELAY: Duration = Duration::from_millis(100);
/// PtyProcess controls a spawned process and communication with this.
///
/// It implements [std::io::Read] and [std::io::Write] to communicate with
/// a child.
///
/// ```no_run,ignore
/// use ptyprocess::PtyProcess;
/// use std::io::Write;
/// use std::process::Command;
///
/// let mut process = PtyProcess::spawn(Command::new("cat")).unwrap();
/// process.write_all(b"Hello World").unwrap();
/// process.flush().unwrap();
/// ```
#[derive(Debug)]
pub struct PtyProcess {
master: Master,
child_pid: Pid,
eof_char: u8,
intr_char: u8,
terminate_delay: Duration,
}
impl PtyProcess {
/// Spawns a child process and create a [PtyProcess].
///
/// ```no_run
/// # use std::process::Command;
/// # use ptyprocess::PtyProcess;
/// let proc = PtyProcess::spawn(Command::new("bash"));
/// ```
pub fn spawn(mut command: Command) -> Result<Self> {
let master = Master::open()?;
master.grant_slave_access()?;
master.unlock_slave()?;
// handle errors in child executions by pipe
let (exec_err_pipe_r, exec_err_pipe_w) = pipe()?;
let fork = unsafe { fork()? };
match fork {
ForkResult::Child => {
let err = || -> Result<()> {
make_controlling_tty(&master)?;
let slave_fd = master.get_slave_fd()?;
redirect_std_streams(slave_fd)?;
set_echo(STDIN_FILENO, false)?;
set_term_size(STDIN_FILENO, DEFAULT_TERM_COLS, DEFAULT_TERM_ROWS)?;
// Do not allow child to inherit open file descriptors from parent
close_all_descriptors(&[
0,
1,
2,
slave_fd,
exec_err_pipe_w,
exec_err_pipe_r,
master.as_raw_fd(),
])?;
close(slave_fd)?;
close(exec_err_pipe_r)?;
drop(master);
// close pipe on sucessfull exec
fcntl(exec_err_pipe_w, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))?;
let _ = command.exec();
Err(Error::last())
}()
.unwrap_err();
let code = err.as_errno().map_or(-1, |e| e as i32);
// Intentionally ignoring errors to exit the process properly
let _ = write(exec_err_pipe_w, &code.to_be_bytes());
let _ = close(exec_err_pipe_w);
process::exit(code);
}
ForkResult::Parent { child } => {
close(exec_err_pipe_w)?;
let mut pipe_buf = [0u8; 4];
unistd::read(exec_err_pipe_r, &mut pipe_buf)?;
close(exec_err_pipe_r)?;
let code = i32::from_be_bytes(pipe_buf);
if code != 0 {
return Err(Error::from_errno(errno::from_i32(code)));
}
// Some systems may work in this way? (not sure)
// that we need to set a terminal size in a parent.
set_term_size(master.as_raw_fd(), DEFAULT_TERM_COLS, DEFAULT_TERM_ROWS)?;
let eof_char = get_eof_char();
let intr_char = get_intr_char();
Ok(Self {
master,
child_pid: child,
eof_char,
intr_char,
terminate_delay: DEFAULT_TERMINATE_DELAY,
})
}
}
}
/// Returns a pid of a child process
pub fn pid(&self) -> Pid {
self.child_pid
}
/// Returns a file representation of a PTY, which can be used
/// to communicate with a spawned process.
///
/// The file behaivor is platform dependent.
///
/// # Safety
///
/// Be carefull changing a descriptors inner state (e.g `fcntl`)
/// because it affects all structures which use it.
///
/// Be carefull using this method in async mode.
/// Because descriptor is set to a non-blocking mode will affect all dublicated descriptors
/// which may be unexpected.
///
/// # Example
///
/// ```no_run
/// use ptyprocess::PtyProcess;
/// use std::{process::Command, io::{BufReader, LineWriter}};
///
/// let mut process = PtyProcess::spawn(Command::new("cat")).unwrap();
/// let pty = process.get_raw_handle().unwrap();
/// let mut writer = LineWriter::new(&pty);
/// let mut reader = BufReader::new(&pty);
/// ```
pub fn get_raw_handle(&self) -> Result<File> {
self.master.get_file_handle()
}
/// Returns a stream representation of a PTY.
/// Which can be used to communicate with a spawned process.
///
/// It differs from [Self::get_raw_handle] because it is
/// platform independent.
pub fn get_pty_stream(&self) -> Result<Stream> {
self.get_raw_handle().map(Stream::new)
}
/// Get a end of file character if set or a default.
pub fn get_eof_char(&self) -> u8 {
self.eof_char
}
/// Get a interapt character if set or a default.
pub fn get_intr_char(&self) -> u8 {
self.intr_char
}
/// Get window size of a terminal.
///
/// Default size is 80x24.
pub fn get_window_size(&self) -> Result<(u16, u16)> {
get_term_size(self.master.as_raw_fd())
}
/// Sets a terminal size.
pub fn set_window_size(&mut self, cols: u16, rows: u16) -> Result<()> {
set_term_size(self.master.as_raw_fd(), cols, rows)
}
/// The function returns true if an echo setting is setup.
pub fn get_echo(&self) -> Result<bool> {
termios::tcgetattr(self.master.as_raw_fd())
.map(|flags| flags.local_flags.contains(termios::LocalFlags::ECHO))
}
/// Sets a echo setting for a terminal
pub fn set_echo(&mut self, on: bool, timeout: Option<Duration>) -> Result<bool> {
set_echo(self.master.as_raw_fd(), on)?;
self.wait_echo(on, timeout)
}
/// Returns true if a underline `fd` connected with a TTY.
pub fn isatty(&self) -> Result<bool> {
isatty(self.master.as_raw_fd())
}
/// Set the pty process's terminate approach delay.
pub fn set_terminate_delay(&mut self, terminate_approach_delay: Duration) {
self.terminate_delay = terminate_approach_delay;
}
/// Status returns a status a of child process.
pub fn status(&self) -> Result<WaitStatus> {
waitpid(self.child_pid, Some(wait::WaitPidFlag::WNOHANG))
}
/// Kill sends a signal to a child process.
///
/// The operation is non-blocking.
pub fn kill(&mut self, signal: signal::Signal) -> Result<()> {
signal::kill(self.child_pid, signal)
}
/// Signal is an alias to [PtyProcess::kill].
///
/// [PtyProcess::kill]: struct.PtyProcess.html#method.kill
pub fn signal(&mut self, signal: signal::Signal) -> Result<()> {
self.kill(signal)
}
/// Wait blocks until a child process exits.
///
/// It returns a error if the child was DEAD or not exist
/// at the time of a call.
///
/// If you need to verify that a process is dead in non-blocking way you can use
/// [is_alive] method.
///
/// [is_alive]: struct.PtyProcess.html#method.is_alive
pub fn wait(&self) -> Result<WaitStatus> {
waitpid(self.child_pid, None)
}
/// Checks if a process is still exists.
///
/// It's a non blocking operation.
///
/// Keep in mind that after calling this method process might be marked as DEAD by kernel,
/// because a check of its status.
/// Therefore second call to [Self::status] or [Self::is_alive] might return a different status.
pub fn is_alive(&self) -> Result<bool> {
let status = self.status();
match status {
Ok(status) if status == WaitStatus::StillAlive => Ok(true),
Ok(_) | Err(Error::Sys(Errno::ECHILD)) | Err(Error::Sys(Errno::ESRCH)) => Ok(false),
Err(err) => Err(err),
}
}
/// Try to force a child to terminate.
///
/// This returns true if the child was terminated. and returns false if the
/// child could not be terminated.
///
/// It makes 4 tries getting more thorough.
///
/// 1. SIGHUP
/// 2. SIGCONT
/// 3. SIGINT
/// 4. SIGTERM
///
/// If "force" is `true` then moves onto SIGKILL.
pub fn exit(&mut self, force: bool) -> Result<bool> {
if !self.is_alive()? {
return Ok(true);
}
for &signal in &[
signal::SIGHUP,
signal::SIGCONT,
signal::SIGINT,
signal::SIGTERM,
] {
if self.try_to_terminate(signal)? {
return Ok(true);
}
}
if !force {
return Ok(false);
}
self.try_to_terminate(SIGKILL)
}
fn try_to_terminate(&mut self, signal: signal::Signal) -> Result<bool> {
self.kill(signal)?;
thread::sleep(self.terminate_delay);
self.is_alive().map(|is_alive| !is_alive)
}
fn wait_echo(&self, on: bool, timeout: Option<Duration>) -> Result<bool> {
let now = time::Instant::now();
while timeout.is_none() || now.elapsed() < timeout.unwrap() {
if on == self.get_echo()? {
return Ok(true);
}
thread::sleep(Duration::from_millis(100));
}
Ok(false)
}
}
impl Drop for PtyProcess {
fn | (&mut self) {
if let Ok(WaitStatus::StillAlive) = self.status() {
self.exit(true).unwrap();
}
}
}
fn set_term_size(fd: i32, cols: u16, rows: u16) -> Result<()> {
ioctl_write_ptr_bad!(_set_window_size, libc::TIOCSWINSZ, winsize);
let size = winsize {
ws_row: rows,
ws_col: cols,
ws_xpixel: 0,
ws_ypixel: 0,
};
let _ = unsafe { _set_window_size(fd, &size) }?;
Ok(())
}
fn get_term_size(fd: i32) -> Result<(u16, u16)> {
nix::ioctl_read_bad!(_get_window_size, libc::TIOCGWINSZ, winsize);
let mut size = winsize {
ws_col: 0,
ws_row: 0,
ws_xpixel: 0,
ws_ypixel: 0,
};
let _ = unsafe { _get_window_size(fd, &mut size) }?;
Ok((size.ws_col, size.ws_row))
}
#[derive(Debug)]
struct Master {
fd: PtyMaster,
}
impl Master {
fn open() -> Result<Self> {
let master_fd = posix_openpt(OFlag::O_RDWR)?;
Ok(Self { fd: master_fd })
}
fn grant_slave_access(&self) -> Result<()> {
grantpt(&self.fd)
}
fn unlock_slave(&self) -> Result<()> {
unlockpt(&self.fd)
}
fn get_slave_name(&self) -> Result<String> {
get_slave_name(&self.fd)
}
#[cfg(not(target_os = "freebsd"))]
fn get_slave_fd(&self) -> Result<RawFd> {
let slave_name = self.get_slave_name()?;
let slave_fd = open(
slave_name.as_str(),
OFlag::O_RDWR | OFlag::O_NOCTTY,
Mode::empty(),
)?;
Ok(slave_fd)
}
#[cfg(target_os = "freebsd")]
fn get_slave_fd(&self) -> Result<RawFd> {
let slave_name = self.get_slave_name()?;
let slave_fd = open(
format!("/dev/{}", slave_name.as_str()).as_str(),
OFlag::O_RDWR | OFlag::O_NOCTTY,
Mode::empty(),
)?;
Ok(slave_fd)
}
fn get_file_handle(&self) -> Result<File> {
let fd = dup(self.as_raw_fd())?;
let file = unsafe { File::from_raw_fd(fd) };
Ok(file)
}
}
impl AsRawFd for Master {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
#[cfg(target_os = "linux")]
fn get_slave_name(fd: &PtyMaster) -> Result<String> {
nix::pty::ptsname_r(fd)
}
#[cfg(target_os = "freebsd")]
fn get_slave_name(fd: &PtyMaster) -> Result<String> {
use std::ffi::CStr;
use std::os::raw::c_char;
use std::os::unix::prelude::AsRawFd;
let fd = fd.as_raw_fd();
if !isptmaster(fd)? {
// never reached according current implementation of isptmaster
return Err(nix::Error::Sys(Errno::EINVAL));
}
// todo: Need to determine the correct size via some contstant like SPECNAMELEN in <sys/filio.h>
let mut buf: [c_char; 128] = [0; 128];
let _ = fdevname_r(fd, &mut buf)?;
// todo: determine how CStr::from_ptr handles not NUL terminated string.
let string = unsafe { CStr::from_ptr(buf.as_ptr()) }
.to_string_lossy()
.into_owned();
return Ok(string);
}
// https://github.com/freebsd/freebsd-src/blob/main/lib/libc/stdlib/ptsname.c#L52
#[cfg(target_os = "freebsd")]
fn isptmaster(fd: RawFd) -> Result<bool> {
use nix::libc::ioctl;
use nix::libc::TIOCPTMASTER;
match unsafe { ioctl(fd, TIOCPTMASTER as u64, 0) } {
0 => Ok(true),
_ => Err(Error::last()),
}
}
/* automatically generated by rust-bindgen 0.59.1 */
// bindgen filio.h --allowlist-type fiodgname_arg -o bindings.rs
// it may be worth to use a build.rs if we will need more FFI structures.
#[cfg(target_os = "freebsd")]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct fiodgname_arg {
pub len: ::std::os::raw::c_int,
pub buf: *mut ::std::os::raw::c_void,
}
// https://github.com/freebsd/freebsd-src/blob/6ae38ab45396edaea26b4725e0c7db8cffa5f208/lib/libc/gen/fdevname.c#L39
#[cfg(target_os = "freebsd")]
fn fdevname_r(fd: RawFd, buf: &mut [std::os::raw::c_char]) -> Result<()> {
use nix::libc::{ioctl, FIODGNAME};
nix::ioctl_read_bad!(_ioctl_fiodgname, FIODGNAME, fiodgname_arg);
let mut fgn = fiodgname_arg {
len: buf.len() as i32,
buf: buf.as_mut_ptr() as *mut ::std::os::raw::c_void,
};
let _ = unsafe { _ioctl_fiodgname(fd, &mut fgn) }?;
Ok(())
}
/// Getting a slave name on darvin platform
/// https://blog.tarq.io/ptsname-on-osx-with-rust/
#[cfg(target_os = "macos")]
fn get_slave_name(fd: &PtyMaster) -> Result<String> {
use nix::libc::ioctl;
use nix::libc::TIOCPTYGNAME;
use std::ffi::CStr;
use std::os::raw::c_char;
use std::os::unix::prelude::AsRawFd;
// ptsname_r is a linux extension but ptsname isn't thread-safe
// we could use a static mutex but instead we re-implemented ptsname_r with a syscall
// ioctl(fd, TIOCPTYGNAME, buf) manually
// the buffer size on OSX is 128, defined by sys/ttycom.h
let mut buf: [c_char; 128] = [0; 128];
let fd = fd.as_raw_fd();
match unsafe { ioctl(fd, TIOCPTYGNAME as u64, &mut buf) } {
0 => {
let string = unsafe { CStr::from_ptr(buf.as_ptr()) }
.to_string_lossy()
.into_owned();
return Ok(string);
}
_ => Err(Error::last()),
}
}
fn redirect_std_streams(fd: RawFd) -> Result<()> {
// If fildes2 is already a valid open file descriptor, it shall be closed first
close(STDIN_FILENO)?;
close(STDOUT_FILENO)?;
close(STDERR_FILENO)?;
// use slave fd as std[in/out/err]
dup2(fd, STDIN_FILENO)?;
dup2(fd, STDOUT_FILENO)?;
dup2(fd, STDERR_FILENO)?;
Ok(())
}
fn set_echo(fd: RawFd, on: bool) -> Result<()> {
// Set echo off
// Even though there may be something left behind https://stackoverflow.com/a/59034084
let mut flags = termios::tcgetattr(fd)?;
match on {
true => flags.local_flags |= termios::LocalFlags::ECHO,
false => flags.local_flags &= !termios::LocalFlags::ECHO,
}
termios::tcsetattr(fd, termios::SetArg::TCSANOW, &flags)?;
Ok(())
}
pub fn set_raw(fd: RawFd) -> Result<()> {
let mut flags = termios::tcgetattr(fd)?;
#[cfg(not(target_os = "macos"))]
{
termios::cfmakeraw(&mut flags);
}
#[cfg(target_os = "macos")]
{
// implementation is taken from https://github.com/python/cpython/blob/3.9/Lib/tty.py
use nix::libc::{VMIN, VTIME};
use termios::ControlFlags;
use termios::InputFlags;
use termios::LocalFlags;
use termios::OutputFlags;
flags.input_flags &= !(InputFlags::BRKINT
| InputFlags::ICRNL
| InputFlags::INPCK
| InputFlags::ISTRIP
| InputFlags::IXON);
flags.output_flags &= !OutputFlags::OPOST;
flags.control_flags &= !(ControlFlags::CSIZE | ControlFlags::PARENB);
flags.control_flags |= ControlFlags::CS8;
flags.local_flags &=
!(LocalFlags::ECHO | LocalFlags::ICANON | LocalFlags::IEXTEN | LocalFlags::ISIG);
flags.control_chars[VMIN] = 1;
flags.control_chars[VTIME] = 0;
}
termios::tcsetattr(fd, termios::SetArg::TCSANOW, &flags)?;
Ok(())
}
fn get_this_term_char(char: SpecialCharacterIndices) -> Option<u8> {
for &fd in &[STDIN_FILENO, STDOUT_FILENO] {
if let Ok(char) = get_term_char(fd, char) {
return Some(char);
}
}
None
}
fn get_intr_char() -> u8 {
get_this_term_char(SpecialCharacterIndices::VINTR).unwrap_or(DEFAULT_INTR_CHAR)
}
fn get_eof_char() -> u8 {
get_this_term_char(SpecialCharacterIndices::VEOF).unwrap_or(DEFAULT_VEOF_CHAR)
}
fn get_term_char(fd: RawFd, char: SpecialCharacterIndices) -> Result<u8> {
let flags = termios::tcgetattr(fd)?;
let b = flags.control_chars[char as usize];
Ok(b)
}
fn make_controlling_tty(ptm: &Master) -> Result<()> {
#[cfg(not(any(target_os = "freebsd", target_os = "macos")))]
{
let pts_name = ptm.get_slave_name()?;
// https://github.com/pexpect/ptyprocess/blob/c69450d50fbd7e8270785a0552484182f486092f/ptyprocess/_fork_pty.py
// Disconnect from controlling tty, if any
//
// it may be a simmilar call to ioctl TIOCNOTTY
// https://man7.org/linux/man-pages/man4/tty_ioctl.4.html
let fd = open("/dev/tty", OFlag::O_RDWR | OFlag::O_NOCTTY, Mode::empty());
match fd {
Ok(fd) => {
close(fd)?;
}
Err(Error::Sys(Errno::ENXIO)) => {
// Sometimes we get ENXIO right here which 'probably' means
// that we has been already disconnected from controlling tty.
// Specifically it was discovered on ubuntu-latest Github CI platform.
}
Err(err) => return Err(err),
}
// setsid() will remove the controlling tty. Also the ioctl TIOCNOTTY does this.
// https://www.win.tue.nl/~aeb/linux/lk/lk-10.html
setsid()?;
// Verify we are disconnected from controlling tty by attempting to open
// it again. We expect that OSError of ENXIO should always be raised.
let fd = open("/dev/tty", OFlag::O_RDWR | OFlag::O_NOCTTY, Mode::empty());
match fd {
Err(Error::Sys(Errno::ENXIO)) => {} // ok
Ok(fd) => {
close(fd)?;
return Err(Error::UnsupportedOperation);
}
Err(_) => return Err(Error::UnsupportedOperation),
}
// Verify we can open child pty.
let fd = open(pts_name.as_str(), OFlag::O_RDWR, Mode::empty())?;
close(fd)?;
// Verify we now have a controlling tty.
let fd = open("/dev/tty", OFlag::O_WRONLY, Mode::empty())?;
close(fd)?;
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
{
let pts_fd = ptm.get_slave_fd()?;
// https://docs.freebsd.org/44doc/smm/01.setup/paper-3.html
setsid()?;
use nix::libc::ioctl;
use nix::libc::TIOCSCTTY;
match unsafe { ioctl(pts_fd, TIOCSCTTY as u64, 0) } {
0 => {}
_ => return Err(Error::last()),
}
}
Ok(())
}
// Except is used for cases like double free memory
fn close_all_descriptors(except: &[RawFd]) -> Result<()> {
// On linux could be used getrlimit(RLIMIT_NOFILE, rlim) interface
let max_open_fds = sysconf(SysconfVar::OPEN_MAX)?.unwrap() as i32;
(0..max_open_fds)
.filter(|fd| !except.contains(fd))
.for_each(|fd| {
// We don't handle errors intentionally,
// because it will be hard to determine which descriptors closed already.
let _ = close(fd);
});
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_pty() -> Result<()> {
let master = Master::open()?;
master.grant_slave_access()?;
master.unlock_slave()?;
let slavename = master.get_slave_name()?;
let expected_path = if cfg!(target_os = "freebsd") {
"pts/"
} else if cfg!(target_os = "macos") {
"/dev/ttys"
} else {
"/dev/pts/"
};
assert!(
slavename.starts_with(expected_path),
"pty_path=={}",
slavename
);
Ok(())
}
#[test]
#[ignore = "The test should be run in a sigle thread mode --jobs 1 or --test-threads 1"]
fn release_pty_master() -> Result<()> {
let master = Master::open()?;
let old_master_fd = master.fd.as_raw_fd();
drop(master);
let master = Master::open()?;
assert!(master.fd.as_raw_fd() == old_master_fd);
Ok(())
}
}
| drop |
image.py | """
.. module:: Katna.image
:platform: OS X
:synopsis: This module has functions related to smart cropping
"""
import os
import cv2
import numpy as np
from Katna.decorators import FileDecorators
from Katna.feature_list import FeatureList
from Katna.filter_list import FilterList
from Katna.crop_extractor import CropExtractor
from Katna.crop_selector import CropSelector
import Katna.config as config
from Katna.decorators import DebugDecorators
class UserFiltersEnum:
"""Enum class for filters"""
text = "TextDetector"
class Image(object):
"""Class for all image cropping operations
:param object: base class inheritance
:type object: class:`Object`
"""
def __init__(self, disable_text=True):
"""Constructor for image files"""
featureList = FeatureList()
filterList = FilterList()
self.user_filters_enum = UserFiltersEnum()
self.crop_extractor = CropExtractor()
self.crop_selector = CropSelector()
self.features = featureList.get_features()
self.definedFilters = filterList.get_filters()
def _get_crop_specs(
self, image_height, image_width, ratio_height, ratio_width, is_height_small=True
):
"""Internal function to create the crop specs for a given aspect ratio
:param image_height: height of image
:type image_height: int, required
:param image_width: width of image
:type image_width: int, required
:param ratio_height: aspect ratio height (eg. 3 from 4:3)
:type ratio_height: int, required
:param ratio_width: aspect ratio width (eg. 4 from 4:3)
:type ratio_width: int, required
:param is_height_small: parameter to check if crop dimension should be reduced wrt height[default=True]
:type is_height_small: boolean, required
:return: list of crop height and crop width
:rtype:list of tuples
"""
# multiplication factor by which height/width of crop should be decreased to get crop specs
multiply_by = 1
crop_list_tuple = []
# Calculating the height and width ratio wrt aspect ratio
hr, wr = image_height / ratio_height, image_width / ratio_width
# print("hr, wr",hr, wr)
# Check if height is smaller than the width.If yes, interchange height and width.
if not is_height_small:
image_height, image_width = image_width, image_height
hr, wr = wr, hr
crop_height, crop_width = image_height, hr * ratio_width
# Decreasing the height and width for crops while checking it don't get small by 1/(min) of image height/width
while True:
if not (
(crop_height >= (image_height // config.Image.min_image_to_crop_factor))
and (
crop_width >= (image_width // config.Image.min_image_to_crop_factor)
)
):
break
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
crop_list_tuple.append((crop_height, crop_width))
crop_height /= multiply_by
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
multiply_by += config.Image.crop_height_reduction_factor_in_each_iteration
return crop_list_tuple
# Apply optional Debug mode decorator , If config=DEBUG is true this decorator
# will populate internal variables of Image module.debug_images with debug images
# Which you can see by opencv Imshow to check if every feature is working as expected
@DebugDecorators.add_optional_debug_images_for_image_module
def crop_image_from_cvimage(
self,
input_image,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param input_image: Input image
:type input_image: numpy array, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(only returns crops containing english text where the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
self.crop_extractor.down_sample_factor = down_sample_factor
if (
input_image.shape[0] + 5 <= crop_height
or input_image.shape[1] + 5 <= crop_width
):
# print(
# "Error: crop width or crop height larger than Image",
# "input_image.shape",
# input_image.shape,
# "crop_width",
# crop_width,
# "crop_height",
# crop_height,
# )
return []
extracted_candidate_crops = self.crop_extractor.extract_candidate_crops(
input_image, crop_width, crop_height, self.features
)
# print(extracted_candidate_crops)
# text: TextDetector
# dummy: DummyDetector
self.filters = []
for x in filters:
try:
self.filters.append(eval("self.user_filters_enum." + x))
except AttributeError as e:
print(str(e))
# self.filters = [eval("user_filters_enum."+x) for x in filters]
crops_list = self.crop_selector.select_candidate_crops(
input_image,
num_of_crops,
extracted_candidate_crops,
self.definedFilters,
self.filters,
)
return crops_list
def _extract_crop_for_files_iterator(
self,
list_of_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor,
):
"""Generator which yields crop data / error for filepaths in a list
:param list_of_files: list of files to process for crop
:type list_of_files: list, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:yield: dict containing error (if any), data ,and filepath of image processed
:rtype: dict
"""
for filepath in list_of_files:
print("Running for : ", filepath)
try:
crop_list = self._crop_image(
filepath,
crop_width,
crop_height,
num_of_crops,
filters, | down_sample_factor,
)
yield {"crops": crop_list, "error": None,"filepath": filepath}
except Exception as e:
yield {"crops": crop_list, "error": e,"filepath": filepath}
@FileDecorators.validate_dir_path
def crop_image_from_dir(
self,
dir_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops all the images (inside a directory) based on the specification - width and height
:param dir_path: Input Directory path
:type dir_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: number of crops required
:type writer: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop dict with key as filepath and crop list for the file
:rtype: dict
"""
valid_files = []
all_crops = {}
for path, subdirs, files in os.walk(dir_path):
for filename in files:
filepath = os.path.join(path, filename)
if self._check_if_valid_image(filepath):
valid_files.append(filepath)
if len(valid_files) > 0:
generator = self._extract_crop_for_files_iterator(
valid_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor
)
for data in generator:
file_path = data["filepath"]
file_crops = data["crops"]
error = data["error"]
if error is None:
writer.write(file_path, file_crops)
print("Completed processing for : ", file_path)
else:
print("Error processing file : ", file_path)
print(error)
else:
print("All the files in directory %s are invalid video files" % dir_path)
def _crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
imgFile = cv2.imread(file_path)
crop_list = self.crop_image_from_cvimage(
input_image=imgFile,
crop_width=crop_width,
crop_height=crop_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
return crop_list
@FileDecorators.validate_file_path
def crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: writer object to process data
:type writer: Writer, required
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
crop_list = self._crop_image(
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor
)
writer.write(file_path, crop_list)
@FileDecorators.validate_file_path
def crop_image_with_aspect(
self,
file_path,
crop_aspect_ratio,
num_of_crops,
writer,
filters=[],
down_sample_factor=8
):
"""smartly crops the imaged based on the aspect ratio and returns number of specified crops for each crop spec found in the image with
the specified aspect ratio
:param file_path: Input file path
:type file_path: str, required
:param crop_aspect_ratio: output crop ratio
:type crop_aspect_ratio: str (eg. '4:3')
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:param writer: writer to process the image
:type num_of_crops: Writer, required
:return: crop list
:rtype: list of structure crop_rect
"""
imgFile = cv2.imread(file_path)
image_height, image_width, _ = imgFile.shape
ratio_width, ratio_height = map(int, crop_aspect_ratio.split(":"))
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
imgFile,
image_width,
image_height,
ratio_width,
ratio_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
crop_list = sorted_list[:num_of_crops]
writer.write(file_path, crop_list)
#
@FileDecorators.validate_file_path
def save_crop_to_disk(self, crop_rect, frame, file_path, file_name, file_ext, rescale=False):
"""saves an in-memory crop on drive.
:param crop_rect: In-memory crop_rect.
:type crop_rect: crop_rect, required
:param frame: In-memory input image.
:type frame: numpy.ndarray, required
:param file_name: name of the image.
:type file_name: str, required
:param file_path: Folder location where files needs to be saved
:type file_path: str, required
:param file_ext: File extension indicating the file type for example - '.jpg'
:type file_ext: str, required
:return: None
"""
cropped_img = crop_rect.get_image_crop(frame)
file_full_path = os.path.join(file_path, file_name + file_ext)
cv2.imwrite(file_full_path, cropped_img)
@FileDecorators.validate_file_path
def resize_image(
self,
file_path,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly resizes the image based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param target_width: output image width
:type target_width: int
:param target_height: output image height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: resized image
:rtype: cv_image
"""
if not self._check_if_valid_image(file_path):
print("Error: Invalid Image, check image path: ", file_path)
return
imgFile = cv2.imread(file_path)
input_image_height, input_image_width, _ = imgFile.shape
target_image_aspect_ratio = target_width / target_height
input_image_aspect_ratio = input_image_width / input_image_height
if input_image_aspect_ratio == target_image_aspect_ratio:
target_image = cv2.resize(imgFile, (target_width, target_height))
return target_image
else:
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
imgFile,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops=1,
filters=[],
down_sample_factor=down_sample_factor,
)
# From list of crop options sort and get best crop using crop score variables in each
# crop option
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
# Get top crop image
resized_image = sorted_list[0].get_image_crop(imgFile)
target_image = cv2.resize(resized_image, (target_width, target_height))
return target_image
def resize_from_cvimage(
self,
cv_image,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor
):
"""smartly resizes a cv image based on the specification - width and height
:param cv_image: Input cv_image
:type cv_image: numpy.ndarray object , required
:param target_width: output image width
:type target_width: int
:param target_height: output image height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: resized image
:rtype: cv_image
"""
input_image_height, input_image_width, _ = cv_image.shape
target_image_aspect_ratio = target_width / target_height
input_image_aspect_ratio = input_image_width / input_image_height
if input_image_aspect_ratio == target_image_aspect_ratio:
target_image = cv2.resize(cv_image, (target_width, target_height))
return target_image
else:
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
cv_image,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops=1,
filters=[],
down_sample_factor=down_sample_factor,
)
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
resized_image = sorted_list[0].get_image_crop(cv_image)
target_image = cv2.resize(resized_image, (target_width, target_height))
return target_image
def _generate_crop_options_given_for_given_aspect_ratio(
self,
imgFile,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops,
filters,
down_sample_factor,
):
""" Internal function to which for given aspect ratio (target_width/target_height)
Generates ,scores and returns list of image crops
:param imgFile: Input image
:type imgFile: opencv image
:param input_image_width: input image width
:type input_image_width: int
:param input_image_height: input image height
:type input_image_height: int
:param target_width: target aspect ratio width
:type target_width: int
:param target_height: target aspect ratio height
:type target_height: int
:param num_of_crops: number of crop needed in the end
:type num_of_crops: int
:param filters: filters
:type filters: list of filters
:param down_sample_factor: image down sample factor for optimizing processing time
:type down_sample_factor: int
:return: list of candidate crop rectangles as per input aspect ratio
:rtype: list of CropRect
"""
crop_list_tuple, crop_list = [], []
# Calculate height ratio and width ratio of input and target image
height_ratio, width_ratio = (
input_image_height / target_height,
input_image_width / target_width,
)
# Generate candidate crops, _get_crop_spec function changes it's behavior based
# on whether height_ratio is greater or smaller than width ratio.
if height_ratio <= width_ratio:
crop_list_tuple += self._get_crop_specs(
input_image_height,
input_image_width,
target_height,
target_width,
is_height_small=True,
)
else: # elif width_ratio < height_ratio:
crop_list_tuple += self._get_crop_specs(
input_image_height,
input_image_width,
target_height,
target_width,
is_height_small=False,
)
# For each of crop_specifications generated by _get_crop_spec() function
# generate actual crop as well as give score to each of these crop
for crop_height, crop_width in crop_list_tuple:
crop_list += self.crop_image_from_cvimage(
input_image=imgFile,
crop_width=crop_width,
crop_height=crop_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
return crop_list
@FileDecorators.validate_dir_path
def resize_image_from_dir(
self,
dir_path,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly resizes all the images (inside a directory) based on the specification - width and height
:param dir_path: Input Directory path
:type dir_path: str, required
:param target_width: output width
:type target_width: int
:param target_height: output height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: dict with key as filepath and resized image as in opencv format as value
:rtype: dict
"""
all_resized_images = {}
for path, subdirs, files in os.walk(dir_path):
for filename in files:
filepath = os.path.join(path, filename)
image_file_path = os.path.join(path, filename)
if self._check_if_valid_image(image_file_path):
resized_image = self.resize_image(
image_file_path, target_width, target_height, down_sample_factor
)
all_resized_images[filepath] = resized_image
else:
print("Error: Not a valid image file:", image_file_path)
return all_resized_images
@FileDecorators.validate_file_path
def save_image_to_disk(self, image, file_path, file_name, file_ext):
"""saves an in-memory image obtained from image resize on drive.
:param image: In-memory input image.
:type image: numpy.ndarray, required
:param file_name: name of the image.
:type file_name: str, required
:param file_path: Folder location where files needs to be saved
:type file_path: str, required
:param file_ext: File extension indicating the file type for example - '.jpg'
:type file_ext: str, required
:return: None
"""
file_full_path = os.path.join(file_path, file_name + file_ext)
cv2.imwrite(file_full_path, image)
@FileDecorators.validate_file_path
def _check_if_valid_image(self, file_path):
"""Function to check if given image file is a valid image compatible with
opencv
:param file_path: image filename
:type file_path: str
:return: Return True if valid image file else False
:rtype: bool
"""
try:
frame = cv2.imread(file_path)
# Making sure video frame is not empty
if frame is not None:
return True
else:
return False
except cv2.error as e:
print("cv2.error:", e)
return False
except Exception as e:
print("Exception:", e)
return False | |
index.ts | export * from './AnimatedBorder'
export * from './AnimatedSwitch'
export * from './Button'
export * from './Checkbox'
export * from './DeviceTypes'
export * from './elements/InfoButton/InfoButton'
export * from './elements/lists/ExternalLinkListUnstyled'
export * from './elements/lists/LinkListUnstyled'
export * from './elements/lists/ListUnstyled'
export * from './elements/Slider/Slider'
export * from './elements/StatElement/StatElement'
export * from './elements/ToggleSwitch/ToggleSwitch'
export * from './Fade' | export * from './Head'
export * from './LoadingPage'
export * from './Modal'
export * from './ModalPage'
export * from './OnboardingPage'
export * from './Overlay'
export * from './Portal'
export * from './primitives/content/ErrorText'
export * from './primitives/content/Li'
export * from './primitives/content/Paragraph'
export * from './primitives/headers/CondensedHeader'
export * from './primitives/layout/Divider'
export * from './primitives/percentages/Percentage'
export * from './primitives/titles/ComputerName'
export * from './primitives/titles/HeroTitle'
export * from './primitives/titles/MenuTitle'
export * from './primitives/titles/SectionHeader'
export * from './primitives/titles/Username'
export * from './ProgressBar'
export * from './Scrollbar'
export * from './SearchBar'
export * from './SettingsPage'
export * from './SmartLink'
export * from './TextField'
export * from './ToggleSetting'
export * from './Tooltip'
export * from './VerticalProgress' | |
platform.rs | use std::convert::TryFrom;
/// Error when a u32 cannot be converted into a platform.
#[derive(Debug)]
pub struct InvalidPlatformCode(pub u32);
impl std::error::Error for InvalidPlatformCode {}
impl std::fmt::Display for InvalidPlatformCode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "The code {} is not a valid platform", self.0)
}
}
/// The representation of a platform
#[derive(Debug, Clone, Copy, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
#[serde(try_from = "u32")]
#[serde(into = "u32")]
pub enum Platform {
Pc,
Xbox,
Ps4,
}
impl Platform {
/// Converts a platform into its code
pub fn as_u32(self) -> u32 {
match self { | Platform::Xbox => 1,
Platform::Ps4 => 2,
}
}
/// Tries to convert a u32 into a Platform
pub fn from_u32(n: u32) -> Result<Self, InvalidPlatformCode> {
match n {
4 => Ok(Platform::Pc),
1 => Ok(Platform::Xbox),
2 => Ok(Platform::Ps4),
n => Err(InvalidPlatformCode(n)),
}
}
}
impl TryFrom<u32> for Platform {
type Error = InvalidPlatformCode;
fn try_from(n: u32) -> Result<Self, Self::Error> {
Self::from_u32(n)
}
}
impl From<Platform> for u32 {
fn from(platform: Platform) -> Self {
platform.as_u32()
}
} | Platform::Pc => 4, |
analyzer.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var util_1 = require("./parser/util");
var isExportArray = function (e) {
return e.startsWith('[') && e.endsWith(']');
};
var parseExportArray = function (e) {
return e
.replace('[', '')
.replace(']', '')
.split(',')
.map(function (e) { return e.trim(); });
};
var getFileExports = function (file) {
var exports = {};
file.exports.forEach(function (e, index) {
var addExport = function (exportName) {
exports[exportName] = {
usageCount: 0,
location: file.exportLocations[index],
};
};
if (isExportArray(e)) {
var exportArray = parseExportArray(e);
exportArray.forEach(addExport);
}
else {
addExport(e);
}
});
return { exports: exports, path: file.fullPath };
};
var getExportMap = function (files) {
var map = {};
files.forEach(function (file) {
map[file.path] = getFileExports(file);
});
return map;
};
var processImports = function (file, exportMap) {
Object.keys(file.imports).forEach(function (key) {
var _a, _b;
var ex = (_a = exportMap[key]) === null || _a === void 0 ? void 0 : _a.exports;
// Handle imports from an index file
if (!ex && key === '.') {
var indexCandidates = ['index', 'index.ts', 'index.tsx'];
for (var c = 0; c < indexCandidates.length; c++) {
var indexKey = indexCandidates[c];
ex = ((_b = exportMap[indexKey]) === null || _b === void 0 ? void 0 : _b.exports) || undefined;
if (ex)
break;
}
}
if (!ex)
return;
var addUsage = function (imp) {
if (!ex[imp]) {
// The imported symbol we are checking was not found in the imported
// file. For example:
// `a.ts` import { b } from './b';
// `b.ts` does not export a `b` symbol
// In here `imp` is `b`, `imports` represents `a.ts` and `ex.exports`
// are the symbols exported by `b.ts`
ex[imp] = {
usageCount: 0,
location: {
line: 1, | ex[imp].usageCount++;
};
file.imports[key].forEach(function (imp) {
return imp === '*'
? Object.keys(ex)
.filter(function (e) { return e != 'default'; })
.forEach(addUsage)
: addUsage(imp);
});
});
};
var expandExportFromStarOrStarAsForFile = function (file, exportMap, prefix, isWithAlias) {
var fileExports = exportMap[file.path];
file.exports
.filter(function (ex) { return ex.startsWith(prefix); })
.forEach(function (ex) {
var _a;
delete fileExports.exports[ex];
var exports = (_a = exportMap[util_1.removeExportStarPrefix(ex)]) === null || _a === void 0 ? void 0 : _a.exports;
if (exports) {
Object.keys(exports)
.filter(function (e) { return e != 'default'; })
.forEach(function (key) {
if (!isWithAlias) {
// Copy the exports from the imported file:
if (!fileExports.exports[key]) {
var export1 = exports[key];
fileExports.exports[key] = {
usageCount: 0,
location: export1.location,
};
}
fileExports.exports[key].usageCount = 0;
}
// else is export-as: so this file exports a new namespace.
// Mark the items as imported, for the imported file:
var importedFileExports = exportMap[util_1.removeExportStarPrefix(ex)];
if (importedFileExports) {
importedFileExports.exports[key].usageCount++;
}
});
}
});
};
// export * from 'a' (no 'alias')
var expandExportFromStarForFile = function (file, exportMap) {
expandExportFromStarOrStarAsForFile(file, exportMap, '*:', false);
};
// export * as X from 'a' (has 'alias')
var expandExportFromStarAsForFile = function (file, exportMap) {
expandExportFromStarOrStarAsForFile(file, exportMap, '*as:', true);
};
var expandExportFromStar = function (files, exportMap) {
files.forEach(function (file) {
expandExportFromStarForFile(file, exportMap);
expandExportFromStarAsForFile(file, exportMap);
});
};
// Allow disabling of *results*, by path from command line (useful for large projects)
var shouldPathBeExcludedFromResults = function (path, extraOptions) {
if (!extraOptions || !extraOptions.pathsToExcludeFromReport) {
return false;
}
return extraOptions.pathsToExcludeFromReport.some(function (ignore) {
return path.includes(ignore);
});
};
var filterFiles = function (files, extraOptions) {
var _a;
if (!(extraOptions === null || extraOptions === void 0 ? void 0 : extraOptions.ignoreFilesRegex)) {
return files;
}
var regexes = (_a = extraOptions.ignoreFilesRegex) === null || _a === void 0 ? void 0 : _a.map(function (rex) { return new RegExp(rex); });
var shouldIgnoreFile = function (fileName) {
return regexes.some(function (reg) {
return reg.test(fileName);
});
};
return files.filter(function (f) { return !shouldIgnoreFile(f.path); });
};
exports.default = (function (files, extraOptions) {
var filteredFiles = filterFiles(files, extraOptions);
var exportMap = getExportMap(filteredFiles);
expandExportFromStar(filteredFiles, exportMap);
filteredFiles.forEach(function (file) { return processImports(file, exportMap); });
var analysis = {};
Object.keys(exportMap).forEach(function (file) {
var expItem = exportMap[file];
var exports = expItem.exports, path = expItem.path;
if (shouldPathBeExcludedFromResults(path, extraOptions))
return;
var unusedExports = Object.keys(exports).filter(function (k) { return exports[k].usageCount === 0; });
if (unusedExports.length === 0) {
return;
}
analysis[path] = [];
unusedExports.forEach(function (e) {
analysis[path].push({
exportName: e,
location: exports[e].location,
});
});
});
return analysis;
});
//# sourceMappingURL=analyzer.js.map | character: 1,
},
};
} |
main.py | #!/usr/bin/python3
import requests
import re
import datetime
import html
import json
from epg_sources.tele import tele
from epg_sources.teleboy import teleboy
from icon_sources.tele import tele as teleicon
from icon_sources.teleboy import teleboy as teleboyicon
class channel_item:
id: str
lang: str
display_name: str
class programm_item:
start: datetime
stop: datetime
channel: str
icon: str
title: str
country: str
desc: str
sub_title: str
credits: dict
category: str
episode_num: str
date: int
length: int
def __main__():
print("[*] Getting/parsing Init7 tvchannels.m3u playlist")
channels = get_channel_list()
channels = prepare_channel_list(channels)
print("[*] Getting EPG and icons data from teleboy.ch")
teleboy_raw = teleboy.get_epg_by_duration(7*24*60)
teleboy_icons = teleboyicon.get_images(teleboy_raw)
teleboy_icons_matched = match_icons(
channels, teleboy_icons, './mappings/teleboy.json')
teleboy_epg = match_teleboy_epg(channels, teleboy_raw)
print("[✓] Matched " +
str(len(teleboy_icons_matched)) + " teleboy.ch icons")
print("[*] Getting icons data from tele.ch")
tele_icons = teleicon.get_images()
tele_icons_matched = match_icons(
channels, tele_icons, './mappings/tele.json')
print("[✓] Matched " + str(len(tele_icons_matched)) + " tele.ch icons")
print("[*] Getting EPG data from tele.ch")
tele_raw = tele.get_epg_by_duration(7*24*60)
tele_epg = match_tele_epg(channels, tele_raw)
# generate the xml for the channels
all_icons = {**tele_icons_matched, **teleboy_icons_matched}
print("[✓] Total " + str(len(all_icons)) + " icons")
channels_xmltv = channels_to_xmltv(channels, all_icons)
# generate tv7_teleboy_epg.xml
with open('tv7_teleboy_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(teleboy_epg) + "</tv>")
# generate tv7_tele_epg.xml
with open('tv7_tele_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(tele_epg) + "</tv>")
# generate tv7_epg.xml
full_epg = []
full_epg.extend(tele_epg)
full_epg.extend(teleboy_epg)
programms_xmltv = programms_to_xmltv(full_epg)
with open('tv7_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_xmltv + "</tv>")
def get_channel_list():
tv7channel_list = requests.get("https://api.init7.net/tvchannels.m3u").text
tv7channel_list = re.sub(r"udp:\/\/.+", "", tv7channel_list)
tv7channel_list = tv7channel_list.replace("\n", "")
tv7channel_list = tv7channel_list.replace("#EXTM3U", "")
tv7channel_list = tv7channel_list.split("#EXTINF:-1,")
return tv7channel_list
def prepare_channel_list(channel_list):
prepared_list = []
for channel in channel_list:
prepared_list.append({
"display_name": channel,
"id": channel.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", ""),
"lang": "de"
})
return prepared_list
def gen_channel_id_from_name(channel_name):
return channel_name.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", "")
def find_channel_by_id(id, channel_list):
for channel in channel_list:
if id == channel["id"]:
return True
return False
def match_ | el_list, tele_epg):
print("[*] Matching tele.ch EPG data (" + str(len(tele_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/tele.json', 'r').read())
programms = []
matched_channels = set()
for programm in tele_epg:
channel_id = gen_channel_id_from_name(programm["channelLong"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["availabilityStartTime"],
"stop": programm["availabilityEndTime"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "productionCountry" in programm and programm["productionCountry"]:
programm_matched["country"] = programm["productionCountry"]
if "synopsis" in programm and programm["synopsis"]:
programm_matched["desc"] = programm["synopsis"]
if "persons" in programm and programm["persons"]:
programm_matched["credits"] = programm["persons"]
if "cast" in programm["persons"] and programm["persons"]["cast"]:
programm_matched["credits"]["actors"] = programm["persons"]["cast"]
del programm_matched["credits"]["cast"]
if "category" in programm and programm["category"]:
programm_matched["category"] = programm["category"]
if "episode" in programm and "season" in programm and programm["episode"] and programm["season"]:
programm_matched["episode_num"] = "S" + \
str(programm["season"]) + " E" + str(programm["episode"])
elif "episode" in programm and programm["episode"]:
programm_matched["episode_num"] = programm["episode"]
if "productionYearFirst" in programm and programm["productionYearFirst"]:
programm_matched["date"] = programm["productionYearFirst"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " tele.ch channels")
return programms
def match_icons(channel_list, icons, mapping):
print("[*] Matching channel icons (" + str(len(icons)) +
" icons to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open(mapping, 'r').read())
icons_matched = {}
for icon in icons:
channel_id = gen_channel_id_from_name(icon['name'])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
icons_matched[channel_id] = icon['src']
return icons_matched
def match_teleboy_epg(channel_list, teleboy_epg):
print("[*] Matching teleboy.ch EPG data (" + str(len(teleboy_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/teleboy.json', 'r').read())
programms = []
matched_channels = set()
for programm in teleboy_epg:
channel_id = gen_channel_id_from_name(programm["station"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["begin"],
"stop": programm["end"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "country" in programm and programm["country"]:
programm_matched["country"] = programm["country"]
if "desc" in programm and programm["desc"]:
programm_matched["desc"] = programm["desc"]
if "episode_num" in programm and "season_num" in programm and programm["episode_num"] and programm["season_num"]:
programm_matched["episode_num"] = "S" + \
str(programm["season_num"]) + " E" + \
str(programm["episode_num"])
elif "episode_num" in programm and programm["episode_num"]:
programm_matched["episode_num"] = programm["episode_num"]
if "year" in programm and programm["year"]:
programm_matched["date"] = programm["year"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " teleboy.ch channels")
return programms
def programms_to_xmltv(programms):
print("[*] Generating XML for " + str(len(programms)) + " programms")
programms_xml = ""
for programm in programms:
programm_xml = ""
programm_xml = programm_xml + "<programme start=\""+programm["start"].strftime(
"%Y%m%d%H%M%S %z")+"\" stop=\""+programm["stop"].strftime("%Y%m%d%H%M%S %z")+"\" channel=\""+programm["channel"]+"\">"
programm_xml = programm_xml + "<icon src=\""+programm["icon"]+"\" />"
programm_xml = programm_xml + "<title>" + \
html.escape(programm["title"] or "")+"</title>"
if "sub_title" in programm:
programm_xml = programm_xml + "<sub-title>" + \
html.escape(programm["sub_title"] or "")+"</sub-title>"
if "country" in programm:
programm_xml = programm_xml + "<country>" + \
html.escape(programm["country"] or "")+"</country>"
if "category" in programm:
programm_xml = programm_xml + "<category lang=\"de\">" + \
html.escape(programm["category"] or "")+"</category>"
if "desc" in programm:
programm_xml = programm_xml + "<desc lang=\"de\">" + \
html.escape(programm["desc"] or "")+"</desc>"
if "persons" in programm:
programm_xml = programm_xml + "<credits>"
for attrib in programm["persons"]:
if attrib == "actors":
for actor in programm["persons"]["actors"]:
programm_xml = programm_xml + "<actor>" + actor + "</actor>"
else:
programm_xml = programm_xml + "<"+attrib+">" + \
programm["persons"][attrib] + "</"+attrib+">"
programm_xml = programm_xml + "</credits>"
if "episode-num" in programm:
programm_xml = programm_xml + "<episode-num>" + \
programm["episode_num"]+"</episode-num>"
if "date" in programm:
programm_xml = programm_xml + "<date>" + \
str(programm["date"])+"</date>"
if "durationSeconds" in programm:
programm_xml = programm_xml + "<length>" + \
str(programm["duration"])+"</length>"
programm_xml = programm_xml + "</programme>"
programms_xml = programms_xml + programm_xml
return programms_xml
def channels_to_xmltv(channel_list, icons):
print("[*] Generating XML for " + str(len(channel_list)) + " channels")
channels_xml = ""
for channel in channel_list:
channel_xml = "<channel id=\"" + channel["id"] + "\">"
channel_xml = channel_xml + "<display-name lang=\"de\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"fr\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"it\">" + \
channel["display_name"] + "</display-name>"
if channel['id'] in icons:
channel_xml = channel_xml + "<icon src=\"" + \
icons[channel['id']] + "\" />"
channel_xml = channel_xml + "</channel>"
channels_xml = channels_xml + channel_xml
return channels_xml
__main__()
# programm.availabilityStartTime.strftime("%Y%m%d%H%M%S %z"),
| tele_epg(chann |
user.go | // Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package externalaccount
import (
"strings"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/login"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/structs"
"github.com/markbates/goth"
)
// LinkAccountToUser link the gothUser to the user
func LinkAccountToUser(user *user_model.User, gothUser goth.User) error | {
loginSource, err := login.GetActiveOAuth2LoginSourceByName(gothUser.Provider)
if err != nil {
return err
}
externalLoginUser := &user_model.ExternalLoginUser{
ExternalID: gothUser.UserID,
UserID: user.ID,
LoginSourceID: loginSource.ID,
RawData: gothUser.RawData,
Provider: gothUser.Provider,
Email: gothUser.Email,
Name: gothUser.Name,
FirstName: gothUser.FirstName,
LastName: gothUser.LastName,
NickName: gothUser.NickName,
Description: gothUser.Description,
AvatarURL: gothUser.AvatarURL,
Location: gothUser.Location,
AccessToken: gothUser.AccessToken,
AccessTokenSecret: gothUser.AccessTokenSecret,
RefreshToken: gothUser.RefreshToken,
ExpiresAt: gothUser.ExpiresAt,
}
if err := user_model.LinkExternalToUser(user, externalLoginUser); err != nil {
return err
}
externalID := externalLoginUser.ExternalID
var tp structs.GitServiceType
for _, s := range structs.SupportedFullGitService {
if strings.EqualFold(s.Name(), gothUser.Provider) {
tp = s
break
}
}
if tp.Name() != "" {
return models.UpdateMigrationsByType(tp, externalID, user.ID)
}
return nil
} |
|
smd.py | #
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import os
import sys
import logging
import requests
import tarfile
import numpy as np
import pandas as pd
from pathlib import Path
from ts_datasets.anomaly.base import TSADBaseDataset
| _handler.setLevel(logging.DEBUG)
_logger.addHandler(_handler)
class SMD(TSADBaseDataset):
"""
The Server Machine Dataset (SMD) is a new 5-week-long dataset from
a large Internet company collected and made publicly available.
It contains data from 28 server machines and each machine is monitored by 33 metrics.
SMD is divided into training set and testing set of equal size.
- source: https://github.com/NetManAIOps/OmniAnomaly
"""
filename = "ServerMachineDataset"
url = "https://www.dropbox.com/s/x53ph5cru62kv0f/ServerMachineDataset.tar.gz?dl=1"
valid_subsets = (
[f"machine-1-{i}" for i in range(1, 9)]
+ [f"machine-2-{i}" for i in range(1, 10)]
+ [f"machine-3-{i}" for i in range(1, 12)]
)
def __init__(self, subset="all", rootdir=None):
super().__init__()
if subset == "all":
subset = self.valid_subsets
elif type(subset) == str:
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
subset = [subset]
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "smd")
# Download the SMD dataset if it doesn't exist
download(_logger, rootdir, SMD.url, SMD.filename)
for s in subset:
# Load training/test datasets
df, metadata = combine_train_test_datasets(
*SMD._load_data(directory=os.path.join(rootdir, SMD.filename), sequence_name=s)
)
self.time_series.append(df)
self.metadata.append(metadata)
@staticmethod
def _load_data(directory, sequence_name):
with open(os.path.join(directory, "test", f"{sequence_name}.txt"), "r") as f:
test_data = np.genfromtxt(f, dtype=np.float32, delimiter=",")
with open(os.path.join(directory, "test_label", f"{sequence_name}.txt"), "r") as f:
test_labels = np.genfromtxt(f, dtype=np.float32, delimiter=",")
with open(os.path.join(directory, "train", f"{sequence_name}.txt"), "r") as f:
train_data = np.genfromtxt(f, dtype=np.float32, delimiter=",")
return (pd.DataFrame(train_data), pd.DataFrame(test_data), test_labels.astype(int))
def combine_train_test_datasets(train_df, test_df, test_labels):
train_df.columns = [str(c) for c in train_df.columns]
test_df.columns = [str(c) for c in test_df.columns]
df = pd.concat([train_df, test_df]).reset_index()
if "index" in df:
df.drop(columns=["index"], inplace=True)
df.index = pd.to_datetime(df.index * 60, unit="s")
df.index.rename("timestamp", inplace=True)
# There are no labels for training examples, so the training labels are set to 0 by default
# The dataset is only for unsupervised time series anomaly detection
metadata = pd.DataFrame(
{
"trainval": df.index < df.index[train_df.shape[0]],
"anomaly": np.concatenate([np.zeros(train_df.shape[0], dtype=int), test_labels]),
},
index=df.index,
)
return df, metadata
def download(logger, datapath, url, filename):
os.makedirs(datapath, exist_ok=True)
compressed_file = os.path.join(datapath, f"{filename}.tar.gz")
# Download the compressed dataset
if not os.path.exists(compressed_file):
logger.info("Downloading " + url)
with requests.get(url, stream=True) as r:
with open(compressed_file, "wb") as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Uncompress the downloaded tar file
if not os.path.exists(os.path.join(datapath, "_SUCCESS")):
logger.info(f"Uncompressing {compressed_file}")
tar = tarfile.open(compressed_file, "r:gz")
tar.extractall(path=datapath)
tar.close()
Path(os.path.join(datapath, "_SUCCESS")).touch() | _logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
_handler = logging.StreamHandler(sys.stdout) |
DescribeSMBFileSharesCommand.ts | import { ServiceInputTypes, ServiceOutputTypes, StorageGatewayClientResolvedConfig } from "../StorageGatewayClient";
import { DescribeSMBFileSharesInput, DescribeSMBFileSharesOutput } from "../models/models_0";
import {
deserializeAws_json1_1DescribeSMBFileSharesCommand,
serializeAws_json1_1DescribeSMBFileSharesCommand,
} from "../protocols/Aws_json1_1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export interface DescribeSMBFileSharesCommandInput extends DescribeSMBFileSharesInput {}
export interface DescribeSMBFileSharesCommandOutput extends DescribeSMBFileSharesOutput, __MetadataBearer {}
/**
* <p>Gets a description for one or more Server Message Block (SMB) file shares from a file
* gateway. This operation is only supported for file gateways.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { StorageGatewayClient, DescribeSMBFileSharesCommand } from "@aws-sdk/client-storage-gateway"; // ES Modules import
* // const { StorageGatewayClient, DescribeSMBFileSharesCommand } = require("@aws-sdk/client-storage-gateway"); // CommonJS import
* const client = new StorageGatewayClient(config);
* const command = new DescribeSMBFileSharesCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link DescribeSMBFileSharesCommandInput} for command's `input` shape.
* @see {@link DescribeSMBFileSharesCommandOutput} for command's `response` shape.
* @see {@link StorageGatewayClientResolvedConfig | config} for command's `input` shape.
*
*/
export class DescribeSMBFileSharesCommand extends $Command<
DescribeSMBFileSharesCommandInput,
DescribeSMBFileSharesCommandOutput,
StorageGatewayClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: DescribeSMBFileSharesCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: StorageGatewayClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<DescribeSMBFileSharesCommandInput, DescribeSMBFileSharesCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration; | logger,
clientName,
commandName,
inputFilterSensitiveLog: DescribeSMBFileSharesInput.filterSensitiveLog,
outputFilterSensitiveLog: DescribeSMBFileSharesOutput.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: DescribeSMBFileSharesCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_1DescribeSMBFileSharesCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<DescribeSMBFileSharesCommandOutput> {
return deserializeAws_json1_1DescribeSMBFileSharesCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | const clientName = "StorageGatewayClient";
const commandName = "DescribeSMBFileSharesCommand";
const handlerExecutionContext: HandlerExecutionContext = { |
pb.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pb reflects over protocol buffer descriptors to generate objects
// that simplify type, enum, and field lookup.
package pb
import (
"bytes"
"compress/gzip"
"fmt"
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"io/ioutil"
"reflect"
)
// DescribeEnum takes a qualified enum name and returns an EnumDescription.
func | (enumName string) (*EnumDescription, error) {
enumName = sanitizeProtoName(enumName)
if fd, found := revFileDescriptorMap[enumName]; found {
return fd.GetEnumDescription(enumName)
}
return nil, fmt.Errorf("unrecognized enum '%s'", enumName)
}
// DescribeFile takes a protocol buffer message and indexes all of the message
// types and enum values contained within the message's file descriptor.
func DescribeFile(message proto.Message) (*FileDescription, error) {
if fd, found := revFileDescriptorMap[proto.MessageName(message)]; found {
return fd, nil
}
fileDesc, _ := descriptor.ForMessage(message.(descriptor.Message))
fd, err := describeFileInternal(fileDesc)
if err != nil {
return nil, err
}
pkg := fd.Package()
fd.indexTypes(pkg, fileDesc.MessageType)
fd.indexEnums(pkg, fileDesc.EnumType)
return fd, nil
}
// DescribeType provides a TypeDescription given a qualified type name.
func DescribeType(typeName string) (*TypeDescription, error) {
typeName = sanitizeProtoName(typeName)
if fd, found := revFileDescriptorMap[typeName]; found {
return fd.GetTypeDescription(typeName)
}
return nil, fmt.Errorf("unrecognized type '%s'", typeName)
}
// DescribeValue takes an instance of a protocol buffer message and returns
// the associated TypeDescription.
func DescribeValue(value proto.Message) (*TypeDescription, error) {
fd, err := DescribeFile(value)
if err != nil {
return nil, err
}
typeName := proto.MessageName(value)
return fd.GetTypeDescription(typeName)
}
var (
// map from file / message / enum name to file description.
fileDescriptorMap = make(map[string]*FileDescription)
revFileDescriptorMap = make(map[string]*FileDescription)
// map from reflected type to type description.
descriptorMap = make(map[reflect.Type]*TypeDescription)
)
func describeFileInternal(fileDesc *descpb.FileDescriptorProto) (*FileDescription, error) {
fd := &FileDescription{
desc: fileDesc,
types: make(map[string]*TypeDescription),
enums: make(map[string]*EnumDescription)}
fileDescriptorMap[fileDesc.GetName()] = fd
for _, dep := range fileDesc.Dependency {
if _, found := fileDescriptorMap[dep]; !found {
nestedDesc, err := fileDescriptor(dep)
if err != nil {
panic(err)
return nil, err
}
describeFileInternal(nestedDesc)
}
}
return fd, nil
}
func fileDescriptor(protoFileName string) (*descpb.FileDescriptorProto, error) {
gzipped := proto.FileDescriptor(protoFileName)
r, err := gzip.NewReader(bytes.NewReader(gzipped))
if err != nil {
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
}
unzipped, err := ioutil.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
}
fd := &descpb.FileDescriptorProto{}
if err := proto.Unmarshal(unzipped, fd); err != nil {
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
}
return fd, nil
}
| DescribeEnum |
dog.spec.ts | import { Test, TestingModule } from '@nestjs/testing';
import { DogProvider } from './dog';
describe('Dog', () => {
let provider: DogProvider;
| providers: [DogProvider],
}).compile();
provider = module.get<Dog>(Dog);
});
it('should be defined', () => {
expect(provider).toBeDefined();
});
}); | beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({ |
main.go | // Package main is a tracing example with Jaeger
package main
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/deixis/spine"
"github.com/deixis/spine/log"
"github.com/deixis/spine/net/http"
"github.com/deixis/spine/stats"
)
type AppConfig struct {
Foo string `json:"foo"`
}
var (
seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
)
func | () {
// Create spine
config := &AppConfig{}
app, err := spine.New("api", config)
if err != nil {
fmt.Println("Problem initialising spine", err)
os.Exit(1)
}
// Register HTTP handler
h := handler{}
s := http.NewServer()
s.HandleFunc("/test", http.GET, h.Test)
app.RegisterServer("127.0.0.1:3003", s)
// Start serving requests
err = app.Serve()
if err != nil {
fmt.Println("Problem serving requests", err)
os.Exit(1)
}
}
type handler struct {
}
// Cache handler example
func (h *handler) Test(ctx context.Context, w http.ResponseWriter, r *http.Request) {
// Stats
startTime := time.Now()
stats := stats.FromContext(ctx)
tags := map[string]string{
"method": r.HTTP.Method,
"path": r.HTTP.URL.Path,
}
stats.Inc("http.conc", tags)
n := seededRand.Intn(2000)
log.FromContext(ctx).Trace("http.stats.test", "Test request",
log.Int("wait_ms", n),
)
time.Sleep(time.Duration(n) * time.Millisecond)
w.Data(http.StatusOK, "text/plain", ioutil.NopCloser(strings.NewReader("OK")))
// Stats
tags["status"] = strconv.Itoa(w.Code())
stats.Histogram("http.call", 1, tags)
stats.Timing("http.time", time.Since(startTime), tags)
stats.Dec("http.conc", tags)
}
| main |
prepare_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
if __name__ == '__main__':
import autopath
import argparse
import glob
import os
import xml.dom.minidom
import random
import sys
import multiprocessing
import alex.utils.various as various
from alex.utils.config import as_project_path
from alex.corpustools.text_norm_cs import normalise_text, exclude_slu
from alex.corpustools.wavaskey import save_wavaskey
from alex.components.asr.common import asr_factory
from alex.components.asr.utterance import Utterance, UtteranceNBList
from alex.components.slu.base import CategoryLabelDatabase
from alex.applications.PublicTransportInfoCS.preprocessing import PTICSSLUPreprocessing
from alex.applications.PublicTransportInfoCS.hdc_slu import PTICSHDCSLU
from alex.utils.config import Config
""" The script has commands:
--asr-log it uses the asr hypotheses from call logs
"""
asr_log = 0
num_workers = 1
cldb = CategoryLabelDatabase('../data/database.py')
preprocessing = PTICSSLUPreprocessing(cldb)
slu = PTICSHDCSLU(preprocessing, cfg = {'SLU': {PTICSHDCSLU: {'utt2da': as_project_path("applications/PublicTransportInfoCS/data/utt2da_dict.txt")}}})
cfg = Config.load_configs(['../kaldi.cfg',], use_default=True)
asr_rec = asr_factory(cfg)
def normalise_semi_words(txt):
# normalise these semi-words
if txt == '__other__':
txt = '_other_'
elif txt == '__silence__':
txt = '_other_'
elif not txt:
txt = '_other_'
return txt
def | (fn):
name = multiprocessing.current_process().name
asr = []
nbl = []
sem = []
trn = []
trn_hdc_sem = []
fcount = 0
tcount = 0
f_dir = os.path.dirname(fn)
print "Process name:", name
print "File #", fcount
fcount += 1
print "Processing:", fn
doc = xml.dom.minidom.parse(fn)
turns = doc.getElementsByTagName("turn")
for i, turn in enumerate(turns):
if turn.getAttribute('speaker') != 'user':
continue
recs = turn.getElementsByTagName("rec")
trans = turn.getElementsByTagName("asr_transcription")
asrs = turn.getElementsByTagName("asr")
if len(recs) != 1:
print "Skipping a turn {turn} in file: {fn} - recs: {recs}".format(turn=i, fn=fn, recs=len(recs))
continue
if len(asrs) == 0 and (i + 1) < len(turns):
next_asrs = turns[i + 1].getElementsByTagName("asr")
if len(next_asrs) != 2:
print "Skipping a turn {turn} in file: {fn} - asrs: {asrs} - next_asrs: {next_asrs}".format(turn=i,
fn=fn,
asrs=len(
asrs),
next_asrs=len(
next_asrs))
continue
print "Recovered from missing ASR output by using a delayed ASR output from the following turn of turn {turn}. File: {fn} - next_asrs: {asrs}".format(
turn=i, fn=fn, asrs=len(next_asrs))
hyps = next_asrs[0].getElementsByTagName("hypothesis")
elif len(asrs) == 1:
hyps = asrs[0].getElementsByTagName("hypothesis")
elif len(asrs) == 2:
print "Recovered from EXTRA ASR outputs by using a the last ASR output from the turn. File: {fn} - asrs: {asrs}".format(
fn=fn, asrs=len(asrs))
hyps = asrs[-1].getElementsByTagName("hypothesis")
else:
print "Skipping a turn {turn} in file {fn} - asrs: {asrs}".format(turn=i, fn=fn, asrs=len(asrs))
continue
if len(trans) == 0:
print "Skipping a turn in {fn} - trans: {trans}".format(fn=fn, trans=len(trans))
continue
wav_key = recs[0].getAttribute('fname')
wav_path = os.path.join(f_dir, wav_key)
# FIXME: Check whether the last transcription is really the best! FJ
t = various.get_text_from_xml_node(trans[-1])
t = normalise_text(t)
if '--asr-log' not in sys.argv:
asr_rec_nbl = asr_rec.rec_wav_file(wav_path)
a = unicode(asr_rec_nbl.get_best())
else:
a = various.get_text_from_xml_node(hyps[0])
a = normalise_semi_words(a)
if exclude_slu(t) or 'DOM Element:' in a:
print "Skipping transcription:", unicode(t)
print "Skipping ASR output: ", unicode(a)
continue
# The silence does not have a label in the language model.
t = t.replace('_SIL_', '')
trn.append((wav_key, t))
print
print "Transcritpiton #", tcount
tcount += 1
print "Parsing transcription:", unicode(t)
print " ASR:", unicode(a)
# HDC SLU on transcription
s = slu.parse_1_best({'utt': Utterance(t)}).get_best_da()
trn_hdc_sem.append((wav_key, s))
# 1 best ASR
asr.append((wav_key, a))
# N best ASR
n = UtteranceNBList()
if '--asr-log' not in sys.argv:
n = asr_rec_nbl
print 'ASR RECOGNITION NBLIST\n', unicode(n)
else:
for h in hyps:
txt = various.get_text_from_xml_node(h)
txt = normalise_semi_words(txt)
n.add(abs(float(h.getAttribute('p'))), Utterance(txt))
n.merge()
n.normalise()
nbl.append((wav_key, n.serialise()))
# there is no manual semantics in the transcriptions yet
sem.append((wav_key, None))
return asr, nbl, sem, trn, trn_hdc_sem, fcount, tcount
def main():
global asr_log
global num_workers
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""This program prepares data for training Alex PTIcs SLU.
""")
parser.add_argument('--num_workers', action="store", default=num_workers, type=int,
help='number of workers used for ASR: default %d' % num_workers)
parser.add_argument('--asr_log', action="store", default=asr_log, type=int,
help='use ASR results from logs: default %d' % asr_log)
args = parser.parse_args()
asr_log = args.asr_log
num_workers = args.num_workers
fn_uniq_trn = 'uniq.trn'
fn_uniq_trn_hdc_sem = 'uniq.trn.hdc.sem'
fn_uniq_trn_sem = 'uniq.trn.sem'
fn_all_sem = 'all.sem'
fn_all_trn = 'all.trn'
fn_all_trn_hdc_sem = 'all.trn.hdc.sem'
fn_all_asr = 'all.asr'
fn_all_nbl = 'all.nbl'
fn_train_sem = 'train.sem'
fn_train_trn = 'train.trn'
fn_train_trn_hdc_sem = 'train.trn.hdc.sem'
fn_train_asr = 'train.asr'
fn_train_nbl = 'train.nbl'
fn_dev_sem = 'dev.sem'
fn_dev_trn = 'dev.trn'
fn_dev_trn_hdc_sem = 'dev.trn.hdc.sem'
fn_dev_asr = 'dev.asr'
fn_dev_nbl = 'dev.nbl'
fn_test_sem = 'test.sem'
fn_test_trn = 'test.trn'
fn_test_trn_hdc_sem = 'test.trn.hdc.sem'
fn_test_asr = 'test.asr'
fn_test_nbl = 'test.nbl'
indomain_data_dir = "indomain_data"
print "Generating the SLU train and test data"
print "-"*120
###############################################################################################
files = []
files.append(glob.glob(os.path.join(indomain_data_dir, 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', '*', 'asr_transcribed.xml')))
files = various.flatten(files)
files = files[:100000]
asr = []
nbl = []
sem = []
trn = []
trn_hdc_sem = []
p_process_call_logs = multiprocessing.Pool(num_workers)
processed_cls = p_process_call_logs.imap_unordered(process_call_log, files)
count = 0
for pcl in processed_cls:
count += 1
#process_call_log(fn) # uniq utterances
#print pcl
print "="*80
print "Processed files ", count, "/", len(files)
print "="*80
asr.extend(pcl[0])
nbl.extend(pcl[1])
sem.extend(pcl[2])
trn.extend(pcl[3])
trn_hdc_sem.extend(pcl[4])
uniq_trn = {}
uniq_trn_hdc_sem = {}
uniq_trn_sem = {}
trn_set = set()
sem = dict(trn_hdc_sem)
for k, v in trn:
if not v in trn_set:
trn_set.add(v)
uniq_trn[k] = v
uniq_trn_hdc_sem[k] = sem[k]
uniq_trn_sem[k] = v + " <=> " + unicode(sem[k])
save_wavaskey(fn_uniq_trn, uniq_trn)
save_wavaskey(fn_uniq_trn_hdc_sem, uniq_trn_hdc_sem, trans = lambda da: '&'.join(sorted(unicode(da).split('&'))))
save_wavaskey(fn_uniq_trn_sem, uniq_trn_sem)
# all
save_wavaskey(fn_all_trn, dict(trn))
save_wavaskey(fn_all_trn_hdc_sem, dict(trn_hdc_sem), trans = lambda da: '&'.join(sorted(unicode(da).split('&'))))
save_wavaskey(fn_all_asr, dict(asr))
save_wavaskey(fn_all_nbl, dict(nbl))
seed_value = 10
random.seed(seed_value)
random.shuffle(trn)
random.seed(seed_value)
random.shuffle(trn_hdc_sem)
random.seed(seed_value)
random.shuffle(asr)
random.seed(seed_value)
random.shuffle(nbl)
# trn
train_trn = trn[:int(0.8*len(trn))]
dev_trn = trn[int(0.8*len(trn)):int(0.9*len(trn))]
test_trn = trn[int(0.9*len(trn)):]
save_wavaskey(fn_train_trn, dict(train_trn))
save_wavaskey(fn_dev_trn, dict(dev_trn))
save_wavaskey(fn_test_trn, dict(test_trn))
# trn_hdc_sem
train_trn_hdc_sem = trn_hdc_sem[:int(0.8*len(trn_hdc_sem))]
dev_trn_hdc_sem = trn_hdc_sem[int(0.8*len(trn_hdc_sem)):int(0.9*len(trn_hdc_sem))]
test_trn_hdc_sem = trn_hdc_sem[int(0.9*len(trn_hdc_sem)):]
save_wavaskey(fn_train_trn_hdc_sem, dict(train_trn_hdc_sem), trans = lambda da: '&'.join(sorted(unicode(da).split('&'))))
save_wavaskey(fn_dev_trn_hdc_sem, dict(dev_trn_hdc_sem), trans = lambda da: '&'.join(sorted(unicode(da).split('&'))))
save_wavaskey(fn_test_trn_hdc_sem, dict(test_trn_hdc_sem), trans = lambda da: '&'.join(sorted(unicode(da).split('&'))))
# asr
train_asr = asr[:int(0.8*len(asr))]
dev_asr = asr[int(0.8*len(asr)):int(0.9*len(asr))]
test_asr = asr[int(0.9*len(asr)):]
save_wavaskey(fn_train_asr, dict(train_asr))
save_wavaskey(fn_dev_asr, dict(dev_asr))
save_wavaskey(fn_test_asr, dict(test_asr))
# n-best lists
train_nbl = nbl[:int(0.8*len(nbl))]
dev_nbl = nbl[int(0.8*len(nbl)):int(0.9*len(nbl))]
test_nbl = nbl[int(0.9*len(nbl)):]
save_wavaskey(fn_train_nbl, dict(train_nbl))
save_wavaskey(fn_dev_nbl, dict(dev_nbl))
save_wavaskey(fn_test_nbl, dict(test_nbl))
if __name__ == '__main__':
main()
| process_call_log |
const_test.go | package core
import (
"context"
"gotest.tools/assert"
"testing" | assert.DeepEqual(t, perms, []Permission{PermAdmin, PermSign, PermWrite, PermRead})
}
func TestWithPerm(t *testing.T) {
ctx := WithPerm(context.Background(), PermAdmin)
callerPerms, ok := ctx.Value(PermCtxKey).([]Permission)
if !ok {
t.Fatal()
}
t.Log(callerPerms)
} | )
func TestAdaptOldStrategy(t *testing.T) {
perms := AdaptOldStrategy(PermAdmin) |
istio_config.go | package business
import (
"fmt"
"sync"
"github.com/kiali/kiali/kubernetes"
"github.com/kiali/kiali/services/models"
)
type IstioConfigService struct {
k8s kubernetes.IstioClientInterface
}
type IstioConfigCriteria struct {
Namespace string
IncludeGateways bool
IncludeVirtualServices bool
IncludeDestinationRules bool
IncludeServiceEntries bool
IncludeRules bool
IncludeQuotaSpecs bool
IncludeQuotaSpecBindings bool
}
// GetIstioConfig returns a list of Istio routing objects
// and Mixer Rules per a given Namespace.
func (in *IstioConfigService) GetIstioConfig(criteria IstioConfigCriteria) (models.IstioConfigList, error) {
if criteria.Namespace == "" {
return models.IstioConfigList{}, fmt.Errorf("GetIstioConfig needs a non null Namespace")
}
istioConfigList := models.IstioConfigList{
Namespace: models.Namespace{Name: criteria.Namespace},
Gateways: models.Gateways{},
VirtualServices: models.VirtualServices{},
DestinationRules: models.DestinationRules{},
ServiceEntries: models.ServiceEntries{},
Rules: models.IstioRules{},
QuotaSpecs: models.QuotaSpecs{},
QuotaSpecBindings: models.QuotaSpecBindings{},
}
var gg, vs, dr, se, qs, qb []kubernetes.IstioObject
var mr *kubernetes.IstioRules
var ggErr, vsErr, drErr, seErr, mrErr, qsErr, qbErr error
var wg sync.WaitGroup
wg.Add(7)
go func() {
defer wg.Done()
if criteria.IncludeGateways {
if gg, ggErr = in.k8s.GetGateways(criteria.Namespace); ggErr == nil {
(&istioConfigList.Gateways).Parse(gg)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeVirtualServices {
if vs, vsErr = in.k8s.GetVirtualServices(criteria.Namespace, ""); vsErr == nil {
(&istioConfigList.VirtualServices).Parse(vs)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeDestinationRules {
if dr, drErr = in.k8s.GetDestinationRules(criteria.Namespace, ""); drErr == nil {
(&istioConfigList.DestinationRules).Parse(dr)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeServiceEntries {
if se, seErr = in.k8s.GetServiceEntries(criteria.Namespace); seErr == nil {
(&istioConfigList.ServiceEntries).Parse(se)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeRules {
if mr, mrErr = in.k8s.GetIstioRules(criteria.Namespace); mrErr == nil {
istioConfigList.Rules = models.CastIstioRulesCollection(mr)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeQuotaSpecs {
if qs, qsErr = in.k8s.GetQuotaSpecs(criteria.Namespace); qsErr == nil {
(&istioConfigList.QuotaSpecs).Parse(qs)
}
}
}()
go func() {
defer wg.Done()
if criteria.IncludeQuotaSpecBindings {
if qb, qbErr = in.k8s.GetQuotaSpecBindings(criteria.Namespace); qbErr == nil {
(&istioConfigList.QuotaSpecBindings).Parse(qb)
}
}
}()
wg.Wait()
for _, genErr := range []error{ggErr, vsErr, drErr, seErr, mrErr, qsErr, qbErr} {
if genErr != nil {
return models.IstioConfigList{}, genErr
}
}
return istioConfigList, nil
}
func (in *IstioConfigService) GetIstioConfigDetails(namespace string, objectType string, object string) (models.IstioConfigDetails, error) {
istioConfigDetail := models.IstioConfigDetails{}
istioConfigDetail.Namespace = models.Namespace{Name: namespace}
istioConfigDetail.ObjectType = objectType
var gw, vs, dr, se, qs, qb kubernetes.IstioObject
var r *kubernetes.IstioRuleDetails
var err error
switch objectType {
case "gateways":
if gw, err = in.k8s.GetGateway(namespace, object); err == nil {
istioConfigDetail.Gateway = &models.Gateway{}
istioConfigDetail.Gateway.Parse(gw)
}
case "virtualservices": | if vs, err = in.k8s.GetVirtualService(namespace, object); err == nil {
istioConfigDetail.VirtualService = &models.VirtualService{}
istioConfigDetail.VirtualService.Parse(vs)
}
case "destinationrules":
if dr, err = in.k8s.GetDestinationRule(namespace, object); err == nil {
istioConfigDetail.DestinationRule = &models.DestinationRule{}
istioConfigDetail.DestinationRule.Parse(dr)
}
case "serviceentries":
if se, err = in.k8s.GetServiceEntry(namespace, object); err == nil {
istioConfigDetail.ServiceEntry = &models.ServiceEntry{}
istioConfigDetail.ServiceEntry.Parse(se)
}
case "rules":
if r, err = in.k8s.GetIstioRuleDetails(namespace, object); err == nil {
istioConfigDetail.ObjectType = "rules"
istioConfigDetail.Rule = models.CastIstioRuleDetails(r)
}
case "quotaspecs":
if qs, err = in.k8s.GetQuotaSpec(namespace, object); err == nil {
istioConfigDetail.ObjectType = "quotaspecs"
istioConfigDetail.QuotaSpec = &models.QuotaSpec{}
istioConfigDetail.QuotaSpec.Parse(qs)
}
case "quotaspecbindings":
if qb, err = in.k8s.GetQuotaSpecBinding(namespace, object); err == nil {
istioConfigDetail.ObjectType = "quotaspecbindings"
istioConfigDetail.QuotaSpecBinding = &models.QuotaSpecBinding{}
istioConfigDetail.QuotaSpecBinding.Parse(qb)
}
default:
err = fmt.Errorf("Object type not found: %v", objectType)
}
return istioConfigDetail, err
} | |
handler_test.go | package updatecheck
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/types"
)
func TestLatestDockerVersionPushed(t *testing.T) {
if testing.Short() {
t.Skip("Skipping due to network request against dockerhub")
}
url := fmt.Sprintf("https://index.docker.io/v1/repositories/sourcegraph/server/tags/%s", latestReleaseDockerServerImageBuild.Version)
resp, err := http.Get(url)
if err != nil |
if resp.StatusCode == 404 {
t.Fatalf("sourcegraph/server:%s does not exist on dockerhub. %s", latestReleaseDockerServerImageBuild.Version, url)
}
if resp.StatusCode != 200 {
t.Skip("unexpected response from dockerhub", resp.StatusCode)
}
}
func TestLatestKubernetesVersionPushed(t *testing.T) {
if testing.Short() {
t.Skip("Skipping due to network request")
}
url := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph/releases/tag/v%v", latestReleaseKubernetesBuild.Version)
resp, err := http.Head(url)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("Could not find Kubernetes release %s on GitHub. Response code %s from %s, err: %v", latestReleaseKubernetesBuild.Version, resp.Status, url, err)
}
}
func TestLatestDockerComposeOrPureDockerVersionPushed(t *testing.T) {
if testing.Short() {
t.Skip("Skipping due to network request")
}
url := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph-docker/releases/tag/v%v", latestReleaseDockerComposeOrPureDocker.Version)
resp, err := http.Head(url)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("Could not find Docker Compose or Pure Docker release %s on GitHub. Response code %s from %s, err: %v", latestReleaseDockerComposeOrPureDocker.Version, resp.Status, url, err)
}
}
func TestCanUpdate(t *testing.T) {
tests := []struct {
name string
now time.Time
clientVersionString string
latestReleaseBuild build
hasUpdate bool
err error
}{
{
name: "no version update",
clientVersionString: "v1.2.3",
latestReleaseBuild: newBuild("1.2.3"),
hasUpdate: false,
},
{
name: "version update",
clientVersionString: "v1.2.3",
latestReleaseBuild: newBuild("1.2.4"),
hasUpdate: true,
},
{
name: "no date update clock skew",
now: time.Date(2018, time.August, 1, 0, 0, 0, 0, time.UTC),
clientVersionString: "19272_2018-08-02_f7dec47",
latestReleaseBuild: newBuild("1.2.3"),
hasUpdate: false,
},
{
name: "no date update",
now: time.Date(2018, time.September, 1, 0, 0, 0, 0, time.UTC),
clientVersionString: "19272_2018-08-01_f7dec47",
latestReleaseBuild: newBuild("1.2.3"),
hasUpdate: false,
},
{
name: "date update",
now: time.Date(2018, time.August, 42, 0, 0, 0, 0, time.UTC),
clientVersionString: "19272_2018-08-01_f7dec47",
latestReleaseBuild: newBuild("1.2.3"),
hasUpdate: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// Mock the current time for this test.
timeNow = func() time.Time {
return test.now
}
// Restore the real time after this test is done.
defer func() {
timeNow = time.Now
}()
hasUpdate, err := canUpdate(test.clientVersionString, test.latestReleaseBuild)
if err != test.err {
t.Fatalf("expected error %s; got %s", test.err, err)
}
if hasUpdate != test.hasUpdate {
t.Fatalf("expected hasUpdate=%t; got hasUpdate=%t", test.hasUpdate, hasUpdate)
}
})
}
}
func TestSerializeBasic(t *testing.T) {
pr := &pingRequest{
ClientSiteID: "0101-0101",
LicenseKey: "mylicense",
DeployType: "server",
ClientVersionString: "3.12.6",
AuthProviders: []string{"foo", "bar"},
ExternalServices: []string{extsvc.KindGitHub, extsvc.KindGitLab},
CodeHostVersions: nil,
BuiltinSignupAllowed: true,
HasExtURL: false,
UniqueUsers: 123,
Activity: json.RawMessage([]byte(`{"foo":"bar"}`)),
BatchChangesUsage: nil,
CodeIntelUsage: nil,
CodeMonitoringUsage: nil,
CodeHostIntegrationUsage: nil,
IDEExtensionsUsage: nil,
SearchUsage: nil,
GrowthStatistics: nil,
SavedSearches: nil,
HomepagePanels: nil,
SearchOnboarding: nil,
InitialAdminEmail: "[email protected]",
TotalUsers: 234,
HasRepos: true,
EverSearched: false,
EverFindRefs: true,
RetentionStatistics: nil,
}
now := time.Now()
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "mylicense",
"has_update": "true",
"unique_users_today": "123",
"site_activity": {"foo":"bar"},
"batch_changes_usage": null,
"code_intel_usage": null,
"new_code_intel_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"search_onboarding": null,
"homepage_panels": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": null,
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func TestSerializeFromQuery(t *testing.T) {
pr, err := readPingRequestFromQuery(url.Values{
"site": []string{"0101-0101"},
"deployType": []string{"server"},
"version": []string{"3.12.6"},
"auth": []string{"foo,bar"},
"extsvcs": []string{"GITHUB,GITLAB"},
"signup": []string{"true"},
"hasExtURL": []string{"false"},
"u": []string{"123"},
"act": []string{`{"foo": "bar"}`},
"initAdmin": []string{"[email protected]"},
"totalUsers": []string{"234"},
"repos": []string{"true"},
"searched": []string{"false"},
"refs": []string{"true"},
})
if err != nil {
t.Fatalf("unexpected error %s", err)
}
now := time.Now()
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "",
"has_update": "true",
"unique_users_today": "123",
"site_activity": {"foo":"bar"},
"batch_changes_usage": null,
"code_intel_usage": null,
"new_code_intel_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"homepage_panels": null,
"search_onboarding": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": null,
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func TestSerializeBatchChangesUsage(t *testing.T) {
pr := &pingRequest{
ClientSiteID: "0101-0101",
DeployType: "server",
ClientVersionString: "3.12.6",
AuthProviders: []string{"foo", "bar"},
ExternalServices: []string{extsvc.KindGitHub, extsvc.KindGitLab},
CodeHostVersions: nil,
BuiltinSignupAllowed: true,
HasExtURL: false,
UniqueUsers: 123,
Activity: json.RawMessage([]byte(`{"foo":"bar"}`)),
BatchChangesUsage: json.RawMessage([]byte(`{"baz":"bonk"}`)),
CodeIntelUsage: nil,
CodeMonitoringUsage: nil,
CodeHostIntegrationUsage: nil,
IDEExtensionsUsage: nil,
NewCodeIntelUsage: nil,
SearchUsage: nil,
GrowthStatistics: nil,
SavedSearches: nil,
HomepagePanels: nil,
SearchOnboarding: nil,
InitialAdminEmail: "[email protected]",
TotalUsers: 234,
HasRepos: true,
EverSearched: false,
EverFindRefs: true,
RetentionStatistics: nil,
}
now := time.Now()
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "",
"has_update": "true",
"unique_users_today": "123",
"site_activity": {"foo":"bar"},
"batch_changes_usage": {"baz":"bonk"},
"code_intel_usage": null,
"new_code_intel_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"homepage_panels": null,
"search_onboarding": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": null,
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func TestSerializeCodeIntelUsage(t *testing.T) {
now := time.Unix(1587396557, 0).UTC()
testUsage, err := json.Marshal(types.NewCodeIntelUsageStatistics{
StartOfWeek: now,
WAUs: int32Ptr(25),
SearchBasedWAUs: int32Ptr(10),
PreciseCrossRepositoryWAUs: int32Ptr(40),
EventSummaries: []types.CodeIntelEventSummary{
{
Action: types.HoverAction,
Source: types.PreciseSource,
LanguageID: "go",
CrossRepository: false,
WAUs: 1,
TotalActions: 1,
},
{
Action: types.HoverAction,
Source: types.SearchSource,
LanguageID: "",
CrossRepository: true,
WAUs: 2,
TotalActions: 2,
},
{
Action: types.DefinitionsAction,
Source: types.PreciseSource,
LanguageID: "go",
CrossRepository: true,
WAUs: 3,
TotalActions: 3,
},
{
Action: types.DefinitionsAction,
Source: types.SearchSource,
LanguageID: "go",
CrossRepository: false,
WAUs: 4,
TotalActions: 4,
},
{
Action: types.ReferencesAction,
Source: types.PreciseSource,
LanguageID: "",
CrossRepository: false,
WAUs: 5,
TotalActions: 1,
},
{
Action: types.ReferencesAction,
Source: types.SearchSource,
LanguageID: "typescript",
CrossRepository: false,
WAUs: 6,
TotalActions: 3,
},
},
NumRepositories: int32Ptr(50 + 85),
NumRepositoriesWithUploadRecords: int32Ptr(50),
NumRepositoriesWithFreshUploadRecords: int32Ptr(40),
NumRepositoriesWithIndexRecords: int32Ptr(30),
NumRepositoriesWithFreshIndexRecords: int32Ptr(20),
NumRepositoriesWithAutoIndexConfigurationRecords: int32Ptr(7),
CountsByLanguage: map[string]types.CodeIntelRepositoryCountsByLanguage{
"go": {
NumRepositoriesWithUploadRecords: int32Ptr(10),
NumRepositoriesWithFreshUploadRecords: int32Ptr(20),
NumRepositoriesWithIndexRecords: int32Ptr(30),
NumRepositoriesWithFreshIndexRecords: int32Ptr(40),
},
"typescript": {
NumRepositoriesWithUploadRecords: int32Ptr(15),
NumRepositoriesWithFreshUploadRecords: int32Ptr(25),
NumRepositoriesWithIndexRecords: int32Ptr(35),
NumRepositoriesWithFreshIndexRecords: int32Ptr(45),
},
},
SettingsPageViewCount: int32Ptr(1489),
LanguageRequests: []types.LanguageRequest{
{
LanguageID: "frob",
NumRequests: 123,
},
{
LanguageID: "borf",
NumRequests: 321,
},
},
InvestigationEvents: []types.CodeIntelInvestigationEvent{
{
Type: types.CodeIntelUploadErrorInvestigationType,
WAUs: 25,
Total: 42,
},
},
})
if err != nil {
t.Fatalf("unexpected error %s", err)
}
pr := &pingRequest{
ClientSiteID: "0101-0101",
DeployType: "server",
ClientVersionString: "3.12.6",
AuthProviders: []string{"foo", "bar"},
ExternalServices: []string{extsvc.KindGitHub, extsvc.KindGitLab},
CodeHostVersions: nil,
BuiltinSignupAllowed: true,
HasExtURL: false,
UniqueUsers: 123,
Activity: json.RawMessage([]byte(`{"foo":"bar"}`)),
BatchChangesUsage: nil,
CodeIntelUsage: nil,
CodeMonitoringUsage: nil,
CodeHostIntegrationUsage: nil,
IDEExtensionsUsage: nil,
NewCodeIntelUsage: testUsage,
SearchUsage: nil,
GrowthStatistics: nil,
SavedSearches: nil,
HomepagePanels: nil,
SearchOnboarding: nil,
InitialAdminEmail: "[email protected]",
TotalUsers: 234,
HasRepos: true,
EverSearched: false,
EverFindRefs: true,
RetentionStatistics: nil,
}
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "",
"has_update": "true",
"unique_users_today": "123",
"site_activity": {"foo":"bar"},
"batch_changes_usage": null,
"code_intel_usage": null,
"new_code_intel_usage": {
"start_time": "2020-04-20T15:29:17Z",
"waus": 25,
"precise_waus": null,
"search_waus": 10,
"xrepo_waus": null,
"precise_xrepo_waus": 40,
"search_xrepo_waus": null,
"event_summaries": [
{
"action": "hover",
"source": "precise",
"language_id": "go",
"cross_repository": false,
"waus": 1,
"total_actions": 1
},
{
"action": "hover",
"source": "search",
"language_id": "",
"cross_repository": true,
"waus": 2,
"total_actions": 2
},
{
"action": "definitions",
"source": "precise",
"language_id": "go",
"cross_repository": true,
"waus": 3,
"total_actions": 3
},
{
"action": "definitions",
"source": "search",
"language_id": "go",
"cross_repository": false,
"waus": 4,
"total_actions": 4
},
{
"action": "references",
"source": "precise",
"language_id": "",
"cross_repository": false,
"waus": 5,
"total_actions": 1
},
{
"action": "references",
"source": "search",
"language_id": "typescript",
"cross_repository": false,
"waus": 6,
"total_actions": 3
}
],
"num_repositories": 135,
"num_repositories_with_upload_records": 50,
"num_repositories_without_upload_records": 85,
"num_repositories_with_fresh_upload_records": 40,
"num_repositories_with_index_records": 30,
"num_repositories_with_fresh_index_records": 20,
"num_repositories_with_index_configuration_records": 7,
"counts_by_language": [
{
"language_id": "go",
"num_repositories_with_upload_records": 10,
"num_repositories_with_fresh_upload_records": 20,
"num_repositories_with_index_records": 30,
"num_repositories_with_fresh_index_records": 40
},
{
"language_id": "typescript",
"num_repositories_with_upload_records": 15,
"num_repositories_with_fresh_upload_records": 25,
"num_repositories_with_index_records": 35,
"num_repositories_with_fresh_index_records": 45
}
],
"settings_page_view_count": 1489,
"language_requests": [
{
"language_id": "frob",
"num_requests": 123
},
{
"language_id": "borf",
"num_requests": 321
}
],
"investigation_events": [
{
"type": "CodeIntelligenceUploadErrorInvestigated",
"waus": 25,
"total": 42
}
]
},
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"homepage_panels": null,
"search_onboarding": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": null,
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func TestSerializeOldCodeIntelUsage(t *testing.T) {
now := time.Unix(1587396557, 0).UTC()
testPeriod, err := json.Marshal(&types.OldCodeIntelUsagePeriod{
StartTime: now,
Hover: &types.OldCodeIntelEventCategoryStatistics{
LSIF: &types.OldCodeIntelEventStatistics{UsersCount: 1, EventsCount: int32Ptr(1)},
Search: &types.OldCodeIntelEventStatistics{UsersCount: 2, EventsCount: int32Ptr(2)},
},
Definitions: &types.OldCodeIntelEventCategoryStatistics{
LSIF: &types.OldCodeIntelEventStatistics{UsersCount: 3, EventsCount: int32Ptr(3)},
Search: &types.OldCodeIntelEventStatistics{UsersCount: 4, EventsCount: int32Ptr(4)},
},
References: &types.OldCodeIntelEventCategoryStatistics{
LSIF: &types.OldCodeIntelEventStatistics{UsersCount: 5, EventsCount: int32Ptr(1)},
Search: &types.OldCodeIntelEventStatistics{UsersCount: 6, EventsCount: int32Ptr(3)},
},
})
if err != nil {
t.Fatalf("unexpected error %s", err)
}
period := string(testPeriod)
pr := &pingRequest{
ClientSiteID: "0101-0101",
DeployType: "server",
ClientVersionString: "3.12.6",
AuthProviders: []string{"foo", "bar"},
ExternalServices: []string{extsvc.KindGitHub, extsvc.KindGitLab},
CodeHostVersions: nil,
BuiltinSignupAllowed: true,
HasExtURL: false,
UniqueUsers: 123,
Activity: json.RawMessage([]byte(`{"foo":"bar"}`)),
BatchChangesUsage: nil,
CodeIntelUsage: json.RawMessage([]byte(`{"Weekly": [` + period + `]}`)),
CodeMonitoringUsage: nil,
CodeHostIntegrationUsage: nil,
IDEExtensionsUsage: nil,
NewCodeIntelUsage: nil,
SearchUsage: nil,
GrowthStatistics: nil,
SavedSearches: nil,
HomepagePanels: nil,
SearchOnboarding: nil,
InitialAdminEmail: "[email protected]",
TotalUsers: 234,
HasRepos: true,
EverSearched: false,
EverFindRefs: true,
RetentionStatistics: nil,
}
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "",
"has_update": "true",
"unique_users_today": "123",
"site_activity": {"foo":"bar"},
"batch_changes_usage": null,
"code_intel_usage": null,
"new_code_intel_usage": {
"start_time": "2020-04-20T15:29:17Z",
"waus": null,
"precise_waus": null,
"search_waus": null,
"xrepo_waus": null,
"precise_xrepo_waus": null,
"search_xrepo_waus": null,
"event_summaries": [
{
"action": "hover",
"source": "precise",
"language_id": "",
"cross_repository": false,
"waus": 1,
"total_actions": 1
},
{
"action": "hover",
"source": "search",
"language_id": "",
"cross_repository": false,
"waus": 2,
"total_actions": 2
},
{
"action": "definitions",
"source": "precise",
"language_id": "",
"cross_repository": false,
"waus": 3,
"total_actions": 3
},
{
"action": "definitions",
"source": "search",
"language_id": "",
"cross_repository": false,
"waus": 4,
"total_actions": 4
},
{
"action": "references",
"source": "precise",
"language_id": "",
"cross_repository": false,
"waus": 5,
"total_actions": 1
},
{
"action": "references",
"source": "search",
"language_id": "",
"cross_repository": false,
"waus": 6,
"total_actions": 3
}
],
"num_repositories": null,
"num_repositories_with_upload_records": null,
"num_repositories_without_upload_records": null,
"num_repositories_with_fresh_upload_records": null,
"num_repositories_with_index_records": null,
"num_repositories_with_fresh_index_records": null,
"num_repositories_with_index_configuration_records": null,
"counts_by_language": null,
"settings_page_view_count": null,
"language_requests": null,
"investigation_events": null
},
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"homepage_panels": null,
"search_onboarding": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": null,
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func TestSerializeCodeHostVersions(t *testing.T) {
pr := &pingRequest{
ClientSiteID: "0101-0101",
DeployType: "server",
ClientVersionString: "3.12.6",
AuthProviders: []string{"foo", "bar"},
ExternalServices: []string{extsvc.KindGitHub, extsvc.KindGitLab},
CodeHostVersions: json.RawMessage([]byte(`[{"external_service_kind":"GITHUB","version":"1.2.3.4"}]`)),
BuiltinSignupAllowed: true,
HasExtURL: false,
UniqueUsers: 123,
Activity: nil,
BatchChangesUsage: nil,
CodeIntelUsage: nil,
CodeMonitoringUsage: nil,
CodeHostIntegrationUsage: nil,
IDEExtensionsUsage: nil,
NewCodeIntelUsage: nil,
SearchUsage: nil,
GrowthStatistics: nil,
SavedSearches: nil,
HomepagePanels: nil,
SearchOnboarding: nil,
InitialAdminEmail: "[email protected]",
TotalUsers: 234,
HasRepos: true,
EverSearched: false,
EverFindRefs: true,
RetentionStatistics: nil,
}
now := time.Now()
payload, err := marshalPing(pr, true, "127.0.0.1", now)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
compareJSON(t, payload, `{
"remote_ip": "127.0.0.1",
"remote_site_version": "3.12.6",
"remote_site_id": "0101-0101",
"license_key": "",
"has_update": "true",
"unique_users_today": "123",
"site_activity": null,
"batch_changes_usage": null,
"code_intel_usage": null,
"new_code_intel_usage": null,
"dependency_versions": null,
"extensions_usage": null,
"code_insights_usage": null,
"code_insights_critical_telemetry": null,
"code_monitoring_usage": null,
"code_host_integration_usage": null,
"ide_extensions_usage": null,
"cta_usage": null,
"search_usage": null,
"growth_statistics": null,
"saved_searches": null,
"homepage_panels": null,
"search_onboarding": null,
"repositories": null,
"retention_statistics": null,
"installer_email": "[email protected]",
"auth_providers": "foo,bar",
"ext_services": "GITHUB,GITLAB",
"code_host_versions": [{"external_service_kind":"GITHUB","version":"1.2.3.4"}],
"builtin_signup_allowed": "true",
"deploy_type": "server",
"total_user_accounts": "234",
"has_external_url": "false",
"has_repos": "true",
"ever_searched": "false",
"ever_find_refs": "true",
"timestamp": "`+now.UTC().Format(time.RFC3339)+`"
}`)
}
func compareJSON(t *testing.T, actual []byte, expected string) {
var o1 any
if err := json.Unmarshal(actual, &o1); err != nil {
t.Fatalf("unexpected error %s", err)
}
var o2 any
if err := json.Unmarshal([]byte(expected), &o2); err != nil {
t.Fatalf("unexpected error %s", err)
}
if diff := cmp.Diff(o2, o1); diff != "" {
t.Fatalf("mismatch (-want +got):\n%s", diff)
}
}
func int32Ptr(v int32) *int32 {
return &v
}
| {
t.Skip("Failed to contact dockerhub", err)
} |
check_if_favorited_filter.py | from django import template
register = template.Library()
@register.filter
def | (document, user):
"""Returns boolean of whether or not a user favorited this document"""
return document.is_favorited(user)
| check_if_favorited |
wallet.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde_json as json;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
/// Wallet commands processing
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use clap::ArgMatches;
use config::GlobalWalletConfig;
use core::{core, global};
use grin_wallet::{self, controller, display, libwallet};
use grin_wallet::{HTTPWalletClient, LMDBBackend, WalletConfig, WalletInst, WalletSeed};
use keychain;
use servers::start_webwallet_server;
use util::LOGGER;
pub fn | (wallet_config: WalletConfig) {
if let Err(_) = WalletSeed::from_file(&wallet_config) {
WalletSeed::init_file(&wallet_config).expect("Failed to create wallet seed file.");
};
}
pub fn seed_exists(wallet_config: WalletConfig) -> bool {
let mut data_file_dir = PathBuf::new();
data_file_dir.push(wallet_config.data_file_dir);
data_file_dir.push(grin_wallet::SEED_FILE);
if data_file_dir.exists() {
true
} else {
false
}
}
pub fn instantiate_wallet(
wallet_config: WalletConfig,
passphrase: &str,
) -> Box<WalletInst<HTTPWalletClient, keychain::ExtKeychain>> {
if grin_wallet::needs_migrate(&wallet_config.data_file_dir) {
// Migrate wallet automatically
warn!(LOGGER, "Migrating legacy File-Based wallet to LMDB Format");
if let Err(e) = grin_wallet::migrate(&wallet_config.data_file_dir, passphrase) {
error!(LOGGER, "Error while trying to migrate wallet: {:?}", e);
error!(LOGGER, "Please ensure your file wallet files exist and are not corrupted, and that your password is correct");
panic!();
} else {
warn!(LOGGER, "Migration successful. Using LMDB Wallet backend");
}
warn!(LOGGER, "Please check the results of the migration process using `grin wallet info` and `grin wallet outputs`");
warn!(LOGGER, "If anything went wrong, you can try again by deleting the `db` directory and running a wallet command");
warn!(LOGGER, "If all is okay, you can move/backup/delete all files in the wallet directory EXCEPT FOR wallet.seed");
}
let client = HTTPWalletClient::new(&wallet_config.check_node_api_http_addr);
let db_wallet = LMDBBackend::new(wallet_config.clone(), "", client).unwrap_or_else(|e| {
panic!(
"Error creating DB wallet: {} Config: {:?}",
e, wallet_config
);
});
info!(LOGGER, "Using LMDB Backend for wallet");
Box::new(db_wallet)
}
pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
// just get defaults from the global config
let mut wallet_config = config.members.unwrap().wallet;
if let Some(t) = wallet_config.chain_type.clone() {
global::set_mining_mode(t);
}
if wallet_args.is_present("external") {
wallet_config.api_listen_interface = "0.0.0.0".to_string();
}
if let Some(dir) = wallet_args.value_of("dir") {
wallet_config.data_file_dir = dir.to_string().clone();
}
if let Some(sa) = wallet_args.value_of("api_server_address") {
wallet_config.check_node_api_http_addr = sa.to_string().clone();
}
let mut show_spent = false;
if wallet_args.is_present("show_spent") {
show_spent = true;
}
// Derive the keychain based on seed from seed file and specified passphrase.
// Generate the initial wallet seed if we are running "wallet init".
if let ("init", Some(_)) = wallet_args.subcommand() {
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
info!(LOGGER, "Wallet seed file created");
let client = HTTPWalletClient::new(&wallet_config.check_node_api_http_addr);
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
LMDBBackend::new(wallet_config.clone(), "", client).unwrap_or_else(|e| {
panic!(
"Error creating DB for wallet: {} Config: {:?}",
e, wallet_config
);
});
info!(LOGGER, "Wallet database backend created");
// give logging thread a moment to catch up
thread::sleep(Duration::from_millis(200));
// we are done here with creating the wallet, so just return
return;
}
let passphrase = wallet_args
.value_of("pass")
.expect("Failed to read passphrase.");
// Handle listener startup commands
{
let wallet = instantiate_wallet(wallet_config.clone(), passphrase);
match wallet_args.subcommand() {
("listen", Some(listen_args)) => {
if let Some(port) = listen_args.value_of("port") {
wallet_config.api_listen_port = port.parse().unwrap();
}
controller::foreign_listener(wallet, &wallet_config.api_listen_addr())
.unwrap_or_else(|e| {
panic!(
"Error creating wallet listener: {:?} Config: {:?}",
e, wallet_config
)
});
}
("owner_api", Some(_api_args)) => {
controller::owner_listener(wallet, "127.0.0.1:13420").unwrap_or_else(|e| {
panic!(
"Error creating wallet api listener: {:?} Config: {:?}",
e, wallet_config
)
});
}
("web", Some(_api_args)) => {
// start owner listener and run static file server
start_webwallet_server();
controller::owner_listener(wallet, "127.0.0.1:13420").unwrap_or_else(|e| {
panic!(
"Error creating wallet api listener: {:?} Config: {:?}",
e, wallet_config
)
});
}
_ => {}
};
}
// Handle single-use (command line) owner commands
let wallet = Arc::new(Mutex::new(instantiate_wallet(
wallet_config.clone(),
passphrase,
)));
let res = controller::owner_single_use(wallet.clone(), |api| {
match wallet_args.subcommand() {
("send", Some(send_args)) => {
let amount = send_args
.value_of("amount")
.expect("Amount to send required");
let amount = core::amount_from_hr_string(amount)
.expect("Could not parse amount as a number with optional decimal point.");
let minimum_confirmations: u64 = send_args
.value_of("minimum_confirmations")
.unwrap()
.parse()
.expect("Could not parse minimum_confirmations as a whole number.");
let selection_strategy = send_args
.value_of("selection_strategy")
.expect("Selection strategy required");
let method = send_args
.value_of("method")
.expect("Payment method required");
let dest = send_args
.value_of("dest")
.expect("Destination wallet address required");
let change_outputs = send_args
.value_of("change_outputs")
.unwrap()
.parse()
.expect("Failed to parse number of change outputs.");
let fluff = send_args.is_present("fluff");
let max_outputs = 500;
if method == "http" {
if dest.starts_with("http://") {
let result = api.issue_send_tx(
amount,
minimum_confirmations,
dest,
max_outputs,
change_outputs,
selection_strategy == "all",
);
let slate = match result {
Ok(s) => {
info!(
LOGGER,
"Tx created: {} grin to {} (strategy '{}')",
core::amount_to_hr_string(amount, false),
dest,
selection_strategy,
);
s
}
Err(e) => {
error!(LOGGER, "Tx not created: {:?}", e);
match e.kind() {
// user errors, don't backtrace
libwallet::ErrorKind::NotEnoughFunds { .. } => {}
libwallet::ErrorKind::FeeDispute { .. } => {}
libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
_ => {
// otherwise give full dump
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
}
};
panic!();
}
};
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
Err(e)
}
}
} else {
error!(
LOGGER,
"HTTP Destination should start with http://: {}", dest
);
panic!();
}
} else if method == "file" {
api.send_tx(
true,
amount,
minimum_confirmations,
dest,
max_outputs,
change_outputs,
selection_strategy == "all",
).expect("Send failed");
Ok(())
} else {
error!(LOGGER, "unsupported payment method: {}", method);
panic!();
}
}
("receive", Some(send_args)) => {
let mut receive_result: Result<(), grin_wallet::libwallet::Error> = Ok(());
let res = controller::foreign_single_use(wallet, |api| {
let tx_file = send_args
.value_of("input")
.expect("Transaction file required");
receive_result = api.file_receive_tx(tx_file);
Ok(())
});
if res.is_err() {
exit(1);
}
receive_result
}
("finalize", Some(send_args)) => {
let fluff = send_args.is_present("fluff");
let tx_file = send_args
.value_of("input")
.expect("Receiver's transaction file required");
let mut pub_tx_f = File::open(tx_file)?;
let mut content = String::new();
pub_tx_f.read_to_string(&mut content)?;
let mut slate: grin_wallet::libtx::slate::Slate = json::from_str(&content)
.map_err(|_| grin_wallet::libwallet::ErrorKind::Format)?;
let _ = api.finalize_tx(&mut slate).expect("Finalize failed");
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent");
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
Err(e)
}
}
}
("burn", Some(send_args)) => {
let amount = send_args
.value_of("amount")
.expect("Amount to burn required");
let amount = core::amount_from_hr_string(amount)
.expect("Could not parse amount as number with optional decimal point.");
let minimum_confirmations: u64 = send_args
.value_of("minimum_confirmations")
.unwrap()
.parse()
.expect("Could not parse minimum_confirmations as a whole number.");
let max_outputs = 500;
api.issue_burn_tx(amount, minimum_confirmations, max_outputs)
.unwrap_or_else(|e| {
panic!("Error burning tx: {:?} Config: {:?}", e, wallet_config)
});
Ok(())
}
("info", Some(_)) => {
let (validated, wallet_info) =
api.retrieve_summary_info(true).unwrap_or_else(|e| {
panic!(
"Error getting wallet info: {:?} Config: {:?}",
e, wallet_config
)
});
display::info(&wallet_info, validated);
Ok(())
}
("outputs", Some(_)) => {
let (height, _) = api.node_height()?;
let (validated, outputs) = api.retrieve_outputs(show_spent, true, None)?;
let _res = display::outputs(height, validated, outputs).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {:?} Config: {:?}",
e, wallet_config
)
});
Ok(())
}
("txs", Some(txs_args)) => {
let tx_id = match txs_args.value_of("id") {
None => None,
Some(tx) => match tx.parse() {
Ok(t) => Some(t),
Err(_) => panic!("Unable to parse argument 'id' as a number"),
},
};
let (height, _) = api.node_height()?;
let (validated, txs) = api.retrieve_txs(true, tx_id)?;
let include_status = !tx_id.is_some();
let _res =
display::txs(height, validated, txs, include_status).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {} Config: {:?}",
e, wallet_config
)
});
// if given a particular transaction id, also get and display associated
// inputs/outputs
if tx_id.is_some() {
let (_, outputs) = api.retrieve_outputs(true, false, tx_id)?;
let _res = display::outputs(height, validated, outputs).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {} Config: {:?}",
e, wallet_config
)
});
};
Ok(())
}
("repost", Some(repost_args)) => {
let tx_id: u32 = match repost_args.value_of("id") {
None => {
error!(LOGGER, "Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
panic!();
}
Some(tx) => match tx.parse() {
Ok(t) => t,
Err(_) => {
panic!("Unable to parse argument 'id' as a number");
}
},
};
let dump_file = repost_args.value_of("dumpfile");
let fluff = repost_args.is_present("fluff");
match dump_file {
None => {
let result = api.post_stored_tx(tx_id, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Reposted transaction at {}", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
Err(e)
}
}
}
Some(f) => {
let result = api.dump_stored_tx(tx_id, true, f);
match result {
Ok(_) => {
warn!(LOGGER, "Dumped transaction data for tx {} to {}", tx_id, f);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
Err(e)
}
}
}
}
}
("cancel", Some(tx_args)) => {
let tx_id = tx_args
.value_of("id")
.expect("'id' argument (-i) is required.");
let tx_id = tx_id.parse().expect("Could not parse id parameter.");
let result = api.cancel_tx(tx_id);
match result {
Ok(_) => {
info!(LOGGER, "Transaction {} Cancelled", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "TX Cancellation failed: {}", e);
Err(e)
}
}
}
("restore", Some(_)) => {
let result = api.restore();
match result {
Ok(_) => {
info!(LOGGER, "Wallet restore complete",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Wallet restore failed: {:?}", e);
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
Err(e)
}
}
}
_ => panic!("Unknown wallet command, use 'grin help wallet' for details"),
}
});
// we need to give log output a chance to catch up before exiting
thread::sleep(Duration::from_millis(100));
if res.is_err() {
exit(1);
}
}
| _init_wallet_seed |
fanbeams.py | from astropy.io import fits
from coords import nsew_of_constant_dec,Molonglo,hadec_to_nsew
from scipy.ndimage import filters
import ephem as e
import numpy as np
import copy
SEC_TO_SID = 0.9972685185185185 # convert seconds to siderial "seconds"
EW_R = np.radians(2.0) # EW beam HWHM
NS_R = np.radians(1.0) # NS beam HWHM
class FanBeamTimeMap(object):
def __init__(self,data,lsts):
"""
Base class for fanbeams.
data: numpy array containing data with axes [nbeams,nsamps]
lsts: the lsts for each set of beams
"""
self.data = data
self.nsamps = data.shape[1]
self.nbeams = data.shape[0]
self.total_power = np.copy(self.data[0])
self.data[0]= self.data[1]
self.lsts = lsts
self.mask = np.ones(data.shape,dtype='bool')
self._cache = {}
self._xcoords = np.arange(self.nsamps)
def copy(self):
fb = copy.deepcopy(self)
fb._cache = self._cache
return fb
def append(self,other):
self.data = np.hstack((self.data,other.data))
self.nsamps += other.nsamps
self.total_power = np.hstack((self.total_power,other.total_power))
self.lsts = np.hstack((self.lsts,other.lsts))
self.mask = np.hstack((self.mask,other.mask))
self._cache = {}
self._xcoords = np.arange(self.nsamps)
def radec_to_track(self,ra,dec):
"""
Convert a single ra and dec to a track through the fanbeams.
ra: ra in radians
dec: dec in radians
Note: does not cache track
Returns fanbeam indexes and NS offsets
"""
ns,ew = nsew_of_constant_dec(self.lsts-ra,dec)
ew_offset = ew-self.ew
ns_offset = ns-self.ns
idxs = (self.nbeams * (ew_offset/self.ew_r + 1)/2).astype("int")
return idxs, ns_offset
def radecs_to_tracks(self,ras,decs):
"""
Convert an array of ra and dec coords to a set of tracks and offsets.
ras: ra array in radians
decs: dec array in radians
Note: Caches based on hash of inputs
Returns fanbeam indexes and NS offsets
"""
key = (hash(ras.tobytes()),hash(decs.tobytes()))
if key in self._cache.keys():
return self._cache[key]
tracks = np.empty([decs.size,ras.size,self.nsamps],dtype='int32')
offsets = np.empty([decs.size,ras.size,self.nsamps],dtype='float32')
for ii,ra in enumerate(ras):
#print ii,"/",ras.size,"\r",
for jj,dec in enumerate(decs):
idxs,ns_offset = self.radec_to_track(ra,dec)
tracks[jj,ii] = idxs
offsets[jj,ii] = ns_offset
#print
self._cache[key] = (tracks,offsets)
return tracks,offsets
def extract(self,idxs,xcoords):
"""
Extract a trail through fanbeam space.
idxs: array of fambeam indexes (can be any value)
Note: only valid data are returned
"""
mask = (idxs > 0) & (idxs < self.nbeams)
pixel_mask = self.mask[(idxs[mask],xcoords[mask])]
x = xcoords[mask][pixel_mask]
return self.data[(idxs[mask][pixel_mask],x)],x
class TransitFanBeamTimeMap(FanBeamTimeMap):
def __init__(self,data,ns,ew,lsts):
"""
Fanbeam container for transit observations
data: numpy array containing data with axes [nbeams,nsamps]
lsts: the lsts for each set of beams
ns: ns coordinate of central fanbeam (radians)
ew: ew coordinate of central fanbeam (radians)
"""
super(TransitFanBeamTimeMap,self).__init__(data,lsts)
self.ns = ns
self.ew = ew
self.ew_r = EW_R/np.cos(self.ew)
class TrackedFanBeamTimeMap(FanBeamTimeMap):
def __init__(self,data,ra,dec,lsts):
"""
Fanbeam container for transit observations
data: numpy array containing data with axes [nbeams,nsamps]
lsts: the lsts for each set of beams
ra: ra coordinate of central fanbeam (radians)
dec: dec coordinate of central fanbeam (radians)
"""
super(TrackedFanBeamTimeMap,self).__init__(data,lsts)
self.ra = float(ra)
self.dec = float(dec)
self.hour_angles = self.lsts-self.ra
ns,ew = nsew_of_constant_dec(self.hour_angles,self.dec)
self.ns = ns
self.ew = ew
self.ew_r = EW_R/np.cos(self.ew)
def append(self,other):
self.ns = np.hstack((self.ns,other.ns))
self.ew = np.hstack((self.ew,other.ew))
self.ew_r = np.hstack((self.ew_r,other.ew_r))
self.hour_angles = np.hstack((self.hour_angles,other.hour_angles))
super(TrackedFanBeamTimeMap,self).append(other)
def subtract_background(fanbeams,background,thresh=None):
"""
Subtract a background array and normalise the fanbeams.
fanbeams: An object of basetype FanBeamTimeMap
background: A FanBeamTimeMap containing a background
thresh: Sigma threshold for clipping
"""
clean = fanbeams.copy()
data = clean.data
data-=background.data
mad = 1.4826 * np.median(abs(data-np.median(data,axis=0)),axis=0)
muad = 1.253314 * np.mean(abs(data-np.mean(data,axis=0)),axis=0)
mad[mad==0] = muad[mad==0]
data/=mad
if thresh is not None:
data = data.clip(max=thresh)
clean.data = data
return clean
def median_filter(fanbeams,window,mode='nearest'):
"""
Apply a median filter to a set of fanbeams.
fanbeams: An object of basetype FanBeamTimeMap
window: size of the filter window.
mode: filter mode for edges
"""
background = fanbeams.copy()
background.data = filters.median_filter(fanbeams.data,size=[window,1],mode=mode)
return background
def poly_filter(fanbeams,deg):
"""
Apply a polynomial filter to a set of fanbeams.
fanbeams: An object of basetype FanBeamTimeMap
deg: degree of polynomial
"""
background = fanbeams.copy()
data = background.data
x = np.arange(background.nbeams)
background.data = np.array([np.polyval(j,x) for j in np.polyfit(x,data,deg).T]).T
return background
def _load_fanbeams(fname,utc_start,tsamp):
"""Helper function"""
obs = Molonglo(date=utc_start)
lst = obs.sidereal_time()
hdu = fits.open(fname)[0]
dlst = 2*np.pi*tsamp/86400.0 * SEC_TO_SID
lsts = np.arange(hdu.data.shape[1])*dlst + lst
return hdu.data,lsts,obs
def load_tracked_fanbeams(fname,utc_start,ra,dec,tsamp):
"""Load a tracked fanbeam observation"""
data,lsts,obs = _load_fanbeams(fname,utc_start,tsamp)
body = e.FixedBody()
body._ra = ra
body._dec = dec
body.compute(obs)
return TrackedFanBeamTimeMap(data,body.ra,body.dec,lsts)
def load_transit_fanbeams(fname,utc_start,ha,dec,tsamp):
"""Load a transit fanbeam observation"""
data,lsts,obs = _load_fanbeams(fname,utc_start,tsamp)
ns,ew = hadec_to_nsew(ha,dec)
return TransitFanBeamTimeMap(data,ns,ew,lsts)
##### testing: IGNORE #####
def test():
ra,dec = "07:16:35","-19:00:40"
eq = e.Equatorial(ra,dec)
ra = eq.ra
dec = eq.dec
lst = float(e.hours("03:56"))
dlst = float(e.hours("00:00:20.0001"))*SEC_TO_SID
data = read("molonglo_fb_map.fits")
lsts = np.arange(data.shape[1])*dlst + lst
return FanBeamTimeMap(data,ra,dec,lst,dlst)
def | (fname="molonglo_fb_map.fits"):
ra,dec = "07:16:35","-19:00:40"
eq = e.Equatorial(ra,dec)
ra = eq.ra
dec = eq.dec
lst = float(e.hours("03:56"))
dlst = float(e.hours("00:00:20.0016"))*SEC_TO_SID
data = read(fname)
lsts = np.arange(data.shape[1])*dlst + lst
return TrackedFanBeamTimeMap(data,ra,dec,lsts)
def track_map(fname,utc_start,ra,dec):
obs = Molonglo(date=utc_start)
body = e.FixedBody()
body._ra = ra
body._dec = dec
body.compute(obs)
data = read(fname)
lst = obs.sidereal_time()
dlst = float(e.hours("00:00:20.0016"))*SEC_TO_SID
lsts = np.arange(data.shape[1])*dlst + lst
return TrackedFanBeamTimeMap(data,body.ra,body.dec,lsts)
def test_transit_map():
obs = Molonglo("2015/07/28 22:34:23")
lst = obs.sidereal_time()
dlst = float(e.hours("00:00:01"))*SEC_TO_SID
data = read("SN1987A.2015-07-28-22_34_23.fixed.fits")
lsts = np.arange(data.shape[1])*dlst + lst
ns,ew = hadec_to_nsew(0.0,np.radians(-70.0))
return TransitFanBeamTimeMap(data,ns,ew,lsts)
| test_track_map |
common.go | package token
import (
"github.com/pinke/muses/pkg/cache/redis"
"github.com/pinke/muses/pkg/database/mysql"
"github.com/pinke/muses/pkg/logger"
)
type Cfg struct {
Muses struct {
Token map[string]CallerCfg `toml:"token"`
} `toml:"muses"`
}
type MysqlCallerCfg mysql.CallerCfg
type RedisCallerCfg redis.CallerCfg
type LoggerCallerCfg logger.CallerCfg
// CallerCfg是token的配置。
// 需要注意的是,XXXRef是指指定了一个已经在配置文件里面的Caller。
// 比如说,你已经设置了一个mysql数据库myDB用于存储数据,而你又希望同时使用该数据库来存放token的数据,
// 那么你只需将MysqlRef设置为myDB。
// 如果你没有指定Ref,那么token会在初始化的时候依据配置来重新创建一个
type CallerCfg struct {
Mode string
LoggerRef string
Logger LoggerCallerCfg `toml:"logger"`
MysqlRef string | Mysql MysqlCallerCfg `toml:"mysql"`
RedisRef string
Redis RedisCallerCfg `toml:"redis"`
} | |
julia.rs | use wasm_bindgen::prelude::*;
//use complex::Cplx;
use num_complex::Complex as Cplx;
use crate::utils;
use crate::ZPlane;
// need to copy this from rand beacuse the crate links to C++ static libs
pub struct LCG {
/// The seed
r: u32
}
impl LCG {
const A: u64 = 48271;
const M: u64 = std::i32::MAX as u64;
pub fn new(seed: u32) -> LCG {
assert_ne!(seed, 0);
LCG{r: seed}
}
fn next_1(&mut self) -> u32 {
self.r = ((self.r as u64 * LCG::A) % LCG::M) as u32;
self.r
}
}
#[wasm_bindgen]
pub struct Julia {
z: ZPlane<u8>,
c: Cplx<f64>, // as in z <-> z*z + c
a: Cplx<f64>, // attrction point that c moves to
rng: LCG
}
const MAX_DEPTH: u8 = 14;
// speed at which c is pulled to a
const SPEED: f64 = 0.01;
#[wasm_bindgen]
impl Julia {
pub fn new(cr: f64, ci: f64, scale: f64, width: u32, height: u32) -> Julia {
utils::set_panic_hook();
let mut julia = Julia {
z: ZPlane::<u8>::new(Cplx::new(-scale, -scale), Cplx::new(scale, scale), width, height),
c: Cplx::new(cr, ci),
a: Cplx::new(0.0, 0.0),
rng: LCG::new(19937)
};
julia.draw();
julia
}
pub fn cells(&self) -> *const u8 {
self.z.cells.as_ptr()
}
pub fn locus_r(&self) -> u32 {
((self.c.re - self.z.zmin.re) * self.z.rscale) as u32
}
pub fn locus_i(&self) -> u32 {
((self.c.im - self.z.zmin.im) * self.z.iscale) as u32
}
pub fn set_attract(&mut self, row: u32, col: u32) {
let (c, _) = self.z.get_point(row, col);
self.a = c;
}
pub fn tick(&mut self) {
//let theta = self.c.arg();
//let dr = 0.0001 * (1.0 - (self.rng.next_1() as f64 / std::i32::MAX as f64));
//self.c = Cplx::from_polar(&(self.c.norm() * (1.0 + dr)), &(theta + 0.01));
self.c += Cplx::new((self.a.re - self.c.re) * SPEED, (self.a.im - self.c.im) * SPEED);
if self.c.re > self.z.zmax.re { self.c.re = self.z.zmax.re; }
if self.c.re < self.z.zmin.re { self.c.re = self.z.zmin.re; }
if self.c.im > self.z.zmax.im { self.c.im = self.z.zmax.im; }
if self.c.im < self.z.zmin.im { self.c.im = self.z.zmin.im; }
self.draw();
}
// Uses the MIIM algorithm
fn draw(&mut self) {
//let mut next = self.cells.clone();
let mut next = vec![0u8; (self.z.width * self.z.height) as usize];
//let mut rng = LCG::new(19937);
let mut z = Cplx::new(0.0, 0.0);
let mut sign = 1.0;
// warmup
for _ in 0..25 {
if self.rng.next_1() % 2 == 1 |
z = (z - self.c).sqrt() * sign;
}
self.draw_impl(z, &mut next, 0);
self.z.cells = next;
}
fn draw_impl(&mut self, z: Cplx<f64>, cells: &mut Vec<u8>, depth: u8) {
let z = (z - self.c).sqrt();
let idx = self.z.get_index(&z);
cells[idx] += 1;
let idx = self.z.get_index(&-z);
cells[idx] += 1;
if depth >= MAX_DEPTH { return; }
self.draw_impl(z, cells, depth+1);
self.draw_impl(-z, cells, depth+1);
}
}
| { sign *= -1.0; } |
machine.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8s
import (
"yunion.io/x/onecloud/pkg/mcclient/modules"
)
var (
KubeMachines *ResourceManager
)
func init() | {
KubeMachines = NewResourceManager("kubemachine", "kubemachines",
NewResourceCols("role", "first_node", "cluster", "provider", "resource_type", "status", "address", "hypervisor"),
NewColumns())
modules.Register(KubeMachines)
} |
|
endpoint_resolver_rules.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with self work for additional information
# regarding copyright ownership. The ASF licenses self file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use self file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
class EndpointResolverRules(LocalConfigRegionalEndpointResolver):
def __init__(self, *args, **kwargs):
LocalConfigRegionalEndpointResolver.__init__(self)
self.product_code_valid = False
self.region_id_valid = False
self.endpoint_map = None
self.endpoint_regional = None
self.request_network = 'public'
self.product_suffix = ''
def resolve(self, request):
if request.endpoint_map is None or request.endpoint_regional is None:
return None
request_network = "public" if not request.request_network else request.request_network
endpoint_regional = request.endpoint_regional
endpoint = ""
if request_network == "public":
endpoint = request.endpoint_map.get(request.region_id, "")
if endpoint == "":
if endpoint_regional == "regional":
if not self.verify_region_id(request.region_id.lower()):
return
endpoint_domain = ".{region_id}.aliyuncs.com".format(
region_id=request.region_id.lower())
elif endpoint_regional == "central":
endpoint_domain = ".aliyuncs.com"
else: | endpoint_param_list = [request.product_code_lower, suffix, network, endpoint_domain]
endpoint = "".join(list(filter(lambda x: x, endpoint_param_list)))
return endpoint
def is_product_code_valid(self, request):
return self.product_code_valid
def is_region_id_valid(self, request):
return self.region_id_valid
@classmethod
def get_valid_region_ids_by_product(cls, product_code):
return None | return None
network = "" if request_network == "public" else "-" + request_network
suffix = "-" + request.product_suffix if request.product_suffix else "" |
localServer.js | import http from 'http';
import connect from 'connect';
import findPort from 'find-port';
import enableDestroy from 'server-destroy';
import url from 'url';
import path from 'path';
import fs from 'fs-plus';
import send from 'send';
import mime from 'mime';
const oneYearInSeconds = 60 * 60 * 24 * 365;
// This is a stream protocol response object that is compatible in very limited way with
// node's http.ServerResponse.
// https://github.com/electron/electron/blob/master/docs/api/structures/stream-protocol-response.md
class StreamProtocolResponse {
constructor() {
this._headers = {}; // eslint-disable-line
this.statusCode = 200;
this.data = null;
this.headers = {};
}
setHeader(key, value) {
this._headers[key] = value; // eslint-disable-line
}
setStream(stream) {
this.data = stream;
}
setStatusCode(code) {
this.statusCode = code;
}
finalize() {
this.headers = this._headers; // eslint-disable-line
}
}
function* iterate(array) {
for (const entry of array) { // eslint-disable-line
yield entry;
}
}
/**
* Creates stream protocol response for a given file acting like it would come
* from a real HTTP server.
*
* @param {string} filePath - path to the file being sent
* @param {StreamProtocolResponse} res - the response object
* @param {Function} beforeFinalize - function to be run before finalizing the response object
*/
function createStreamProtocolResponse(filePath, res, beforeFinalize) {
if (!fs.existsSync(filePath)) {
return;
}
// Setting file size.
const stat = fs.statSync(filePath);
res.setHeader('Content-Length', stat.size);
// Setting last modified date.
const modified = stat.mtime.toUTCString();
res.setHeader('Last-Modified', modified);
// Determining mime type.
const type = mime.getType(filePath);
if (type) {
const charset = mime.getExtension(type);
res.setHeader('Content-Type', type + (charset ? `; charset=${charset}` : ''));
} else {
res.setHeader('Content-Type', 'application/octet-stream');
}
res.setHeader('Connection', 'close');
res.setStream(fs.createReadStream(filePath));
res.setStatusCode(200);
beforeFinalize();
res.finalize();
}
/**
* Simple local HTTP server tailored for meteor app bundle.
* Additionally it supports a local mode that creates a StreamProtocolResponse objects
* for a given path.
*
* @param {Object} log - Logger instance
* @param {Object} settings
* @param {Object} skeletonApp
*
* @property {Array} errors
* @constructor
*/
export default class LocalServer {
constructor({ log, settings = { localFilesystem: false, allowOriginLocalServer: false }, skeletonApp }) {
this.log = log;
this.httpServerInstance = null;
this.server = null;
this.retries = 0;
this.maxRetries = 3;
this.serverPath = '';
this.parentServerPath = '';
this.portRange = [57200, 57400];
this.portSearchStep = 20;
this.assetBundle = null;
this.errors = [];
this.errors[0] = 'Could not find free port.';
this.errors[1] = 'Could not start http server.';
this.localFilesystemUrl = '/local-filesystem/';
this.desktopAssetsUrl = '/___desktop/';
this.settings = settings;
this.portFilePath = path.join(skeletonApp.userDataDir, 'port.cfg');
this.lastUsedPort = this.loadPort();
this.handlers = [];
}
/**
* Registers a handler for the local mode.
* @param {Function} handler
*/
use(handler) {
this.handlers.push(handler);
}
/**
* Returns a HTTP 500 response.
* @returns {StreamProtocolResponse}
*/
static getServerErrorResponse() {
const res = new StreamProtocolResponse();
res.setStatusCode(500);
return res;
}
/**
* Processes a request url and responds with StreamProtocolResponse
* @param {string} requestUrl
* @returns {Promise<any>}
*/
getStreamProtocolResponse(requestUrl) {
const res = new StreamProtocolResponse();
const req = { url: requestUrl };
const it = iterate(this.handlers);
const next = () => {
const handler = it.next();
if (handler.done) {
res.setStatusCode(404); // ma a handler only for local streams
} else {
handler.value(
req,
res,
next,
true
);
}
};
return new Promise((resolve, reject) => {
try {
next();
resolve(res);
} catch (e) {
reject(e);
}
});
}
/**
* Sets refs for the callbacks.
*
* @param {function} onStartupFailed
* @param {function} onServerReady
* @param {function} onServerRestarted
*/
setCallbacks(onStartupFailed, onServerReady, onServerRestarted) {
this.onStartupFailed = onStartupFailed;
this.onServerReady = onServerReady;
this.onServerRestarted = onServerRestarted;
}
/**
* Initializes the module. Configures `connect` and searches for free port.
*
* @param {AssetBundle} assetBundle - asset bundle from the autoupdate
* @param {string} desktopPath - path to desktop.asar
* @param {boolean} restart - are we restarting the server?
*/
init(assetBundle, desktopPath, restart) {
const self = this;
/**
* Responds with HTTP status code and a message.
*
* @param {Object} res - response object
* @param {number} code - http response code
* @param {string} message - message
*/
function respondWithCode(res, code, message) {
/* eslint-disable */
res._headers = {};
res._headerNames = {};
res.statusCode = code;
/* eslint-enable */
res.setHeader('Content-Type', 'text/plain; charset=UTF-8');
res.setHeader('Content-Length', Buffer.byteLength(message));
res.setHeader('X-Content-Type-Options', 'nosniff');
res.end(message);
}
/**
* If there is a path for a source map - adds a X-SourceMap header pointing to it.
*
* @param {Asset} asset - currently sent asset
* @param {Object} res - response object
*/
function addSourceMapHeader(asset, res) {
if (asset.sourceMapUrlPath) {
res.setHeader('X-SourceMap', asset.sourceMapUrlPath);
}
}
/**
* If there is a hash, adds an ETag header with it.
*
* @param {Asset} asset - currently sent asset
* @param {Object} res - response object
*/
function addETagHeader(asset, res) {
if (asset.hash) {
res.setHeader('ETag', asset.hash);
}
}
/**
* If the manifest defines the file as cacheable and query has a cache buster (i.e.
* hash added to it after ?) adds a Cache-Control header letting know Chrome that this
* file can be cached. If that is not the case, no-cache is passed.
*
* @param {Asset} asset - currently sent asset
* @param {Object} res - response object
* @param {string} fullUrl - url
*/
function addCacheHeader(asset, res, fullUrl) {
const shouldCache = asset.cacheable && (/[0-9a-z]{40}/).test(fullUrl);
res.setHeader('Cache-Control', shouldCache ? `max-age=${oneYearInSeconds}` : 'no-cache');
}
/**
* Provides assets defined in the manifest.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {boolean} local - local mode
*/
function AssetHandler(req, res, next, local = false) {
const parsedUrl = url.parse(req.url);
// Check if we have an asset for that url defined.
/** @type {Asset} */
const asset = self.assetBundle.assetForUrlPath(parsedUrl.pathname);
if (!asset) return next();
const processors = () => (
addSourceMapHeader(asset, res),
addETagHeader(asset, res),
addCacheHeader(asset, res, req.url)
);
if (local) {
return createStreamProtocolResponse(asset.getFile(), res, processors);
}
return send(
req,
encodeURIComponent(asset.getFile()),
{ etag: false, cacheControl: false }
)
.on('file', processors)
.pipe(res);
}
/**
* Right now this is only used to serve cordova.js and it might seems like an overkill but
* it will be used later for serving desktop specific files bundled into meteor bundle.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {boolean} local - local mode
*/
function WwwHandler(req, res, next, local = false) {
const parsedUrl = url.parse(req.url);
if (parsedUrl.pathname !== '/cordova.js') {
return next();
}
const parentAssetBundle = self.assetBundle.getParentAssetBundle();
// We need to obtain a path for the initial asset bundle which usually is the parent
// asset bundle, but if there were not HCPs yet, the main asset bundle is the
// initial one.
const initialAssetBundlePath =
parentAssetBundle ?
parentAssetBundle.getDirectoryUri() : self.assetBundle.getDirectoryUri();
const filePath = path.join(initialAssetBundlePath, parsedUrl.pathname);
if (fs.existsSync(filePath)) {
return local ?
createStreamProtocolResponse(filePath, res, () => {}) :
send(req, encodeURIComponent(filePath)).pipe(res);
}
return next();
}
/**
* Provides files from the filesystem on a specified url alias.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {string} urlAlias - url alias on which to serve the files
* @param {string=} localPath - serve files only from this path
* @param {boolean} local - local mode
*/
function FilesystemHandler(req, res, next, urlAlias, localPath, local = false) {
const parsedUrl = url.parse(req.url);
if (!parsedUrl.pathname.startsWith(urlAlias)) {
return next();
}
if (self.settings.allowOriginLocalServer) {
res.setHeader('Access-Control-Allow-Origin', '*');
}
const bareUrl = parsedUrl.pathname.substr(urlAlias.length);
let filePath;
if (localPath) {
filePath = path.join(localPath, decodeURIComponent(bareUrl));
if (filePath.toLowerCase().lastIndexOf(localPath.toLowerCase(), 0) !== 0) {
return respondWithCode(res, 400, 'Wrong path.');
}
} else {
filePath = decodeURIComponent(bareUrl);
}
if (fs.existsSync(filePath)) {
return local ?
createStreamProtocolResponse(filePath, res, () => {}) :
send(req, encodeURIComponent(filePath)).pipe(res);
}
return local ? res.setStatusCode(404) : respondWithCode(res, 404, 'File does not exist.');
}
/**
* Serves files from the entire filesystem if enabled in settings.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {boolean} local - local mode
*/
function LocalFilesystemHandler(req, res, next, local = false) {
if (!self.settings.localFilesystem) {
return next();
}
return FilesystemHandler(req, res, next, self.localFilesystemUrl, undefined, local);
}
/**
* Serves files from the assets directory.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {boolean} local - local mode
*/
function DesktopAssetsHandler(req, res, next, local = false) {
return FilesystemHandler(req, res, next, self.desktopAssetsUrl, path.join(desktopPath, 'assets'), local);
}
/**
* Serves index.html as the last resort.
*
* @param {Object} req - request object
* @param {Object} res - response object
* @param {Function} next - called on handler miss
* @param {boolean} local - local mode
*/
function IndexHandler(req, res, next, local = false) {
const parsedUrl = url.parse(req.url);
if (!parsedUrl.pathname.startsWith(self.localFilesystemUrl) &&
parsedUrl.pathname !== '/favicon.ico'
) {
/** @type {Asset} */
const indexFile = self.assetBundle.getIndexFile();
if (local) {
createStreamProtocolResponse(indexFile.getFile(), res, () => {
});
} else {
send(req, encodeURIComponent(indexFile.getFile())).pipe(res);
}
} else {
next();
}
}
if (this.assetBundle === null) {
// `connect` will do the job!
const server = connect();
if (restart) {
if (this.httpServerInstance) {
this.httpServerInstance.destroy();
}
}
this.log.info('will serve from: ', assetBundle.getDirectoryUri());
server.use(AssetHandler);
server.use(WwwHandler);
server.use(LocalFilesystemHandler);
server.use(DesktopAssetsHandler);
server.use(IndexHandler);
this.use(AssetHandler);
this.use(WwwHandler);
this.use(LocalFilesystemHandler);
this.use(DesktopAssetsHandler);
this.use(IndexHandler);
this.server = server;
}
this.assetBundle = assetBundle;
this.findPort()
.then(() => {
this.startHttpServer(restart);
})
.catch(() => {
this.log.error('could not find free port');
this.onStartupFailed(0);
});
}
/**
* Checks for a free port in a given port range.
* @param {number} startPort - port range start
* @param {number} stopPort - port range end
* @returns {Promise}
*/
static findFreePortInRange(startPort, stopPort) {
return new Promise((resolve, reject) => {
findPort(
'127.0.0.1',
startPort,
stopPort,
(ports) => {
if (ports.length === 0) {
reject();
} else {
const port = ports[Math.floor(Math.random() * (ports.length - 1))];
resolve(port);
}
}
);
});
}
/**
* Looks for a free port to reserve for the local server.
* @returns {Promise}
*/
findPort() {
const self = this;
let startPort;
let endPort;
if (this.lastUsedPort !== null) {
startPort = this.lastUsedPort;
endPort = this.lastUsedPort;
} else {
([startPort] = this.portRange);
endPort = this.portRange[0] + this.portSearchStep;
}
return new Promise((resolve, reject) => {
function success(port) {
self.port = port;
self.log.info(`assigned port ${self.port}`);
resolve();
}
function fail() {
if (startPort === self.lastUsedPort && endPort === startPort) {
([startPort] = self.portRange);
endPort = self.portRange[0] + self.portSearchStep;
} else {
startPort += self.portSearchStep;
endPort += self.portSearchStep;
}
if (startPort === self.portRange[1]) {
reject();
} else {
find(); // eslint-disable-line no-use-before-define
}
}
function find() {
LocalServer.findFreePortInRange(startPort, endPort)
.then(success)
.catch(fail);
}
find();
});
}
/**
* Loads the last used port number.
* @returns {null|number}
*/
loadPort() {
let port = null;
try {
port = parseInt(fs.readFileSync(this.portFilePath, this.port), 10);
} catch (e) { | }
this.log.info(`last used port is ${port}`);
return port;
}
/**
* Save the currently used port so that it will be reused on the next start.
*/
savePort() {
try {
fs.writeFileSync(this.portFilePath, this.port);
} catch (e) {
// No harm in that.
}
}
/**
* Tries to start the http server.
* @param {boolean} restart - is this restart
*/
startHttpServer(restart) {
try {
this.httpServerInstance = http.createServer(this.server);
this.httpServerInstance.on('error', (e) => {
this.log.error(e);
this.retries += 1;
if (this.retries < this.maxRetries) {
this.init(this.serverPath, this.parentServerPath, true);
} else {
this.onStartupFailed(1);
}
});
this.httpServerInstance.on('listening', () => {
this.retries = 0;
this.savePort();
if (restart) {
this.onServerRestarted(this.port);
} else {
this.onServerReady(this.port);
}
});
this.httpServerInstance.listen(this.port, '127.0.0.1');
enableDestroy(this.httpServerInstance);
} catch (e) {
this.log.error(e);
this.onStartupFailed(1);
}
}
} | // No harm in that.
}
if (port < this.portRange[0] && port > this.portRange[1]) {
return null; |
lib.rs | //! An extremely fast, lookup table based, ECMAScript lexer which yields SyntaxKind tokens used by the rslint_parse parser.
//! For the purposes of error recovery, tokens may have an error attached to them, which is reflected in the Iterator Item.
//! The lexer will also yield `COMMENT` and `WHITESPACE` tokens.
//!
//! The lexer operates on raw bytes to take full advantage of lookup table optimizations, these bytes **must** be valid utf8,
//! therefore making a lexer from a `&[u8]` is unsafe since you must make sure the bytes are valid utf8.
//! Do not use this to learn how to lex JavaScript, this is just needlessly fast and demonic because i can't control myself :)
//!
//! basic ANSI syntax highlighting is also offered through the `highlight` feature.
//!
//! # Warning ⚠️
//!
//! `>>` and `>>>` are not emitted as single tokens, they are emitted as multiple `>` tokens. This is because of
//! TypeScript parsing and productions such as `T<U<N>>`
#![allow(clippy::or_fun_call)]
#[macro_use]
mod token;
mod state;
mod tests;
#[rustfmt::skip]
mod tables;
pub use token::Token;
#[cfg(feature = "highlight")]
mod highlight;
#[cfg(feature = "highlight")]
pub use highlight::*;
use rslint_errors::Diagnostic;
use state::LexerState;
use tables::derived_property::*;
pub use rslint_syntax::*;
pub type LexerReturn = (Token, Option<Diagnostic>);
// Simple macro for unwinding a loop
macro_rules! unwind_loop {
($($iter:tt)*) => {
$($iter)*
$($iter)*
$($iter)*
$($iter)*
$($iter)*
loop {
$($iter)*
$($iter)*
$($iter)*
$($iter)*
$($iter)*
}
};
}
// The first utf8 byte of every valid unicode whitespace char, used for short circuiting whitespace checks
const UNICODE_WHITESPACE_STARTS: [u8; 5] = [
// NBSP
0xC2, // BOM
0xEF, // Ogham space mark
0xE1, // En quad .. Hair space, narrow no break space, mathematical space
0xE2, // Ideographic space
0xE3,
];
// Unicode spaces, designated by the `Zs` unicode property
const UNICODE_SPACES: [char; 19] = [
'\u{0020}', '\u{00A0}', '\u{1680}', '\u{2000}', '\u{2001}', '\u{2002}', '\u{2003}', '\u{2004}',
'\u{2005}', '\u{2006}', '\u{2007}', '\u{2008}', '\u{2009}', '\u{200A}', '\u{200B}', '\u{202F}',
'\u{205F}', '\u{3000}', '\u{FEFF}',
];
fn is_id_start(c: char) -> bool {
c == '_' || c == '$' || ID_Start(c)
}
fn is_id_continue(c: char) -> bool {
c == '$' || c == '\u{200d}' || c == '\u{200c}' || ID_Continue(c)
}
/// An extremely fast, lookup table based, lossless ECMAScript lexer
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Lexer<'src> {
bytes: &'src [u8],
cur: usize,
state: LexerState,
pub file_id: usize,
returned_eof: bool,
}
impl<'src> Lexer<'src> {
/// Make a new lexer from raw bytes.
///
/// # Safety
/// You must make sure the bytes are valid utf8, failure to do so is undefined behavior.
pub unsafe fn from_bytes(bytes: &'src [u8], file_id: usize) -> Self {
Self {
bytes,
cur: 0,
file_id,
state: LexerState::new(),
returned_eof: false,
}
}
/// Make a new lexer from a str, this is safe because strs are valid utf8
pub fn from_str(string: &'src str, file_id: usize) -> Self {
Self {
bytes: string.as_bytes(),
cur: 0,
file_id,
state: LexerState::new(),
returned_eof: false,
}
}
// Bump the lexer and return the token given in
fn eat(&mut self, tok: LexerReturn) -> LexerReturn {
self.next();
tok
}
// Consume all whitespace starting from the current byte
fn consume_whitespace(&mut self) {
unwind_loop! {
if let Some(byte) = self.next().copied() {
// This is the most likely scenario, unicode spaces are very uncommon
if DISPATCHER[byte as usize] != Dispatch::WHS {
// try to short circuit the branch by checking the first byte of the potential unicode space
if byte > 0xC1 && UNICODE_WHITESPACE_STARTS.contains(&byte) {
let chr = self.get_unicode_char();
if is_linebreak(chr) {
self.state.had_linebreak = true;
}
if !UNICODE_SPACES.contains(&chr) {
return;
}
self.cur += chr.len_utf8() - 1;
} else {
return;
}
}
if is_linebreak(byte as char) {
self.state.had_linebreak = true;
}
} else {
return;
}
}
}
// Get the unicode char which starts at the current byte and advance the lexer's cursor
fn get_unicode_char(&self) -> char {
// This is unreachable for all intents and purposes, but this is just a precautionary measure
debug_assert!(self.cur < self.bytes.len());
// Safety: We know this is safe because we require the input to the lexer to be valid utf8 and we always call this when we are at a char
let string =
unsafe { std::str::from_utf8_unchecked(&self.bytes.get_unchecked(self.cur..)) };
let chr = if let Some(chr) = string.chars().next() {
chr
} else {
// Safety: we always call this when we are at a valid char, so this branch is completely unreachable
unsafe {
core::hint::unreachable_unchecked();
}
};
chr
}
// Get the next byte and advance the index
#[inline]
fn next(&mut self) -> Option<&u8> {
self.cur += 1;
self.bytes.get(self.cur)
}
// Get the next byte but only advance the index if there is a next byte
// This is really just a hack for certain methods like escapes
#[inline]
fn next_bounded(&mut self) -> Option<&u8> {
| fn advance(&mut self, amount: usize) {
self.cur += amount;
}
fn lookup(byte: u8) -> Dispatch {
// Safety: our lookup table maps all values of u8, so its impossible for a u8 to be out of bounds
unsafe { *DISPATCHER.get_unchecked(byte as usize) }
}
// Read a `\u{000...}` escape sequence, this expects the cur char to be the `{`
fn read_codepoint_escape(&mut self) -> Result<char, Diagnostic> {
let start = self.cur + 1;
self.read_hexnumber();
if self.bytes.get(self.cur) != Some(&b'}') {
// We should not yield diagnostics on a unicode char boundary. That wont make codespan panic
// but it may cause a panic for other crates which just consume the diagnostics
let invalid = self.get_unicode_char();
let err = Diagnostic::error(self.file_id, "", "expected hex digits for a unicode code point escape, but encountered an invalid character")
.primary(self.cur .. invalid.len_utf8(), "");
self.cur -= 1;
return Err(err);
}
// Safety: We know for a fact this is in bounds because we must be on the possible char after the } at this point
// which means its impossible for the range of the digits to be out of bounds.
// We also know we cant possibly be indexing a unicode char boundary because a unicode char (which cant be a hexdigit)
// would have triggered the if statement above. We also know this must be valid utf8, both because of read_hexnumber's behavior
// and because input to the lexer must be valid utf8
let digits_str = unsafe {
debug_assert!(self.bytes.get(start..self.cur).is_some());
debug_assert!(std::str::from_utf8(self.bytes.get_unchecked(start..self.cur)).is_ok());
std::str::from_utf8_unchecked(self.bytes.get_unchecked(start..self.cur))
};
match u32::from_str_radix(digits_str, 16) {
Ok(digits) if digits <= 0x10FFFF => {
let res = std::char::from_u32(digits);
if let Some(chr) = res {
Ok(chr)
} else {
let err =
Diagnostic::error(self.file_id, "", "invalid codepoint for unicode escape")
.primary(start..self.cur, "");
Err(err)
}
}
_ => {
let err = Diagnostic::error(
self.file_id,
"",
"out of bounds codepoint for unicode codepoint escape sequence",
)
.primary(start..self.cur, "")
.footer_note("Codepoints range from 0 to 0x10FFFF (1114111)");
Err(err)
}
}
}
// Read a `\u0000` escape sequence, this expects the current char to be the `u`, it also does not skip over the escape sequence
// The pos after this method is the last hex digit
fn read_unicode_escape(&mut self, advance: bool) -> Result<char, Diagnostic> {
debug_assert_eq!(self.bytes[self.cur], b'u');
let diagnostic = Diagnostic::error(
self.file_id,
"",
"invalid digits after unicode escape sequence",
)
.primary(
self.cur - 1..self.cur + 1,
"expected 4 hex digits following this",
);
for idx in 0..4 {
match self.next_bounded() {
None => {
if !advance {
self.cur -= idx + 1;
}
return Err(diagnostic);
}
Some(b) if !b.is_ascii_hexdigit() => {
if !advance {
self.cur -= idx + 1;
}
return Err(diagnostic);
}
_ => {}
}
}
unsafe {
// Safety: input to the lexer is guaranteed to be valid utf8 and so is the range since we return if there is a wrong amount of digits beforehand
let digits_str = std::str::from_utf8_unchecked(
self.bytes.get_unchecked((self.cur - 3)..(self.cur + 1)),
);
if let Ok(digits) = u32::from_str_radix(digits_str, 16) {
if !advance {
self.cur -= 4;
}
// Safety: we make sure the 4 chars are hex digits beforehand, and 4 hex digits cannot make an invalid char
Ok(std::char::from_u32_unchecked(digits))
} else {
// Safety: we know this is unreachable because 4 hexdigits cannot make an out of bounds char,
// and we make sure that the chars are actually hex digits
core::hint::unreachable_unchecked();
}
}
}
// Validate a `\x00 escape sequence, this expects the current char to be the `x`, it also does not skip over the escape sequence
// The pos after this method is the last hex digit
fn validate_hex_escape(&mut self) -> Option<Diagnostic> {
debug_assert_eq!(self.bytes[self.cur], b'x');
let diagnostic =
Diagnostic::error(self.file_id, "", "invalid digits after hex escape sequence")
.primary(
(self.cur - 1)..(self.cur + 1),
"Expected 2 hex digits following this",
);
for _ in 0..2 {
match self.next_bounded() {
None => return Some(diagnostic),
Some(b) if !(*b as u8).is_ascii_hexdigit() => return Some(diagnostic),
_ => {}
}
}
None
}
// Validate a `\..` escape sequence and advance the lexer based on it
fn validate_escape_sequence(&mut self) -> Option<Diagnostic> {
let cur = self.cur;
if let Some(escape) = self.bytes.get(self.cur + 1) {
match escape {
b'u' if self.bytes.get(self.cur + 2) == Some(&b'{') => {
self.advance(2);
self.read_codepoint_escape().err()
}
b'u' => {
self.next();
self.read_unicode_escape(true).err()
}
b'x' => {
self.next();
self.validate_hex_escape()
}
_ => {
// We use get_unicode_char to account for escaped source characters which are unicode
let chr = self.get_unicode_char();
self.cur += chr.len_utf8();
None
}
}
} else {
Some(Diagnostic::error(self.file_id, "", "").primary(
cur..cur + 1,
"expected an escape sequence following a backslash, but found none",
))
}
}
// Consume an identifier by recursively consuming IDENTIFIER_PART kind chars
#[inline]
fn consume_ident(&mut self) {
unwind_loop! {
if self.next_bounded().is_some() {
if self.cur_ident_part().is_none() {
return;
}
} else {
return;
}
}
}
/// Consumes the identifier at the current position, and fills the given buf with the UTF-8
/// encoded identifier that got consumed.
///
/// Returns the number of bytes written into the buffer.
/// This method will stop writing into the buffer if the buffer is too small to
/// fit the whole identifier.
#[inline]
fn consume_and_get_ident(&mut self, buf: &mut [u8]) -> usize {
let mut idx = 0;
unwind_loop! {
if self.next_bounded().is_some() {
if let Some(c) = self.cur_ident_part() {
if let Some(buf) = buf.get_mut(idx..idx + 4) {
let res = c.encode_utf8(buf);
idx += res.len();
}
} else {
return idx;
}
} else {
return idx;
}
}
}
// Consume a string literal and advance the lexer, and returning a list of errors that occurred when reading the string
// This could include unterminated string and invalid escape sequences
fn read_str_literal(&mut self) -> Option<Diagnostic> {
// Safety: this is only ever called from lex_token, which is guaranteed to be called on a char position
let quote = unsafe { *self.bytes.get_unchecked(self.cur) };
let start = self.cur;
let mut diagnostic = None;
while let Some(byte) = self.next_bounded() {
match *byte {
b'\\' => {
diagnostic = self.validate_escape_sequence();
}
b if b == quote => {
self.next();
return diagnostic;
}
_ => {}
}
}
let unterminated = Diagnostic::error(self.file_id, "", "unterminated string literal")
.primary(self.cur..self.cur, "input ends here")
.secondary(start..start + 1, "string literal starts here");
Some(unterminated)
}
/// Returns `Some(x)` if the current position is an identifier, with the character at
/// the position.
///
/// The character may be a char that was generated from a unicode escape sequence,
/// e.g. `t` is returned, the actual source code is `\u{74}`
#[inline]
fn cur_ident_part(&mut self) -> Option<char> {
debug_assert!(self.cur < self.bytes.len());
// Safety: we always call this method on a char
let b = unsafe { self.bytes.get_unchecked(self.cur) };
match Self::lookup(*b) {
IDT | DIG | ZER => Some(*b as char),
// FIXME: This should use ID_Continue, not XID_Continue
UNI => {
let chr = self.get_unicode_char();
let res = is_id_continue(chr);
if res {
self.cur += chr.len_utf8() - 1;
Some(chr)
} else {
None
}
}
BSL if self.bytes.get(self.cur + 1) == Some(&b'u') => {
let start = self.cur;
self.next();
let res = if self.bytes.get(self.cur + 1).copied() == Some(b'{') {
self.next();
self.read_codepoint_escape()
} else {
self.read_unicode_escape(true)
};
if let Ok(c) = res {
if is_id_continue(c) {
Some(c)
} else {
self.cur -= 1;
None
}
} else {
self.cur = start;
None
}
}
_ => None,
}
}
// check if the current char is an identifier start, this implicitly advances if the char being matched
// is a `\uxxxx` sequence which is an identifier start, or if the char is a unicode char which is an identifier start
#[inline]
fn cur_is_ident_start(&mut self) -> bool {
debug_assert!(self.cur < self.bytes.len());
// Safety: we always call this method on a char
let b = unsafe { self.bytes.get_unchecked(self.cur) };
match Self::lookup(*b) {
BSL if self.bytes.get(self.cur + 1) == Some(&b'u') => {
self.next();
if let Ok(chr) = self.read_unicode_escape(false) {
if is_id_start(chr) {
self.advance(5);
return true;
}
}
self.cur -= 1;
false
}
UNI => {
let chr = self.get_unicode_char();
if is_id_start(chr) {
self.cur += chr.len_utf8() - 1;
true
} else {
false
}
}
IDT => true,
_ => false,
}
}
/// Returns the identifier token at the current position, or the keyword token if
/// the identifier is a keyword.
///
/// `first` is a pair of a character that was already consumed,
/// but is still part of the identifier, and the characters position.
#[inline]
fn resolve_identifier(&mut self, first: (char, usize)) -> LexerReturn {
use SyntaxKind::*;
// Note to keep the buffer large enough to fit every possible keyword that
// the lexer can return
let mut buf = [0u8; 16];
let (len, start) = (first.0.encode_utf8(&mut buf).len(), first.1);
let count = self.consume_and_get_ident(&mut buf[len..]);
let kind = match &buf[..count + len] {
b"await" => Some(AWAIT_KW),
b"break" => Some(BREAK_KW),
b"case" => Some(CASE_KW),
b"catch" => Some(CATCH_KW),
b"class" => Some(CLASS_KW),
b"const" => Some(CONST_KW),
b"continue" => Some(CONTINUE_KW),
b"debugger" => Some(DEBUGGER_KW),
b"default" => Some(DEFAULT_KW),
b"delete" => Some(DELETE_KW),
b"do" => Some(DO_KW),
b"else" => Some(ELSE_KW),
b"enum" => Some(ENUM_KW),
b"export" => Some(EXPORT_KW),
b"extends" => Some(EXTENDS_KW),
b"false" => Some(FALSE_KW),
b"finally" => Some(FINALLY_KW),
b"for" => Some(FOR_KW),
b"function" => Some(FUNCTION_KW),
b"if" => Some(IF_KW),
b"in" => Some(IN_KW),
b"import" => Some(IMPORT_KW),
b"instanceof" => Some(INSTANCEOF_KW),
b"new" => Some(NEW_KW),
b"null" => Some(NULL_KW),
b"return" => Some(RETURN_KW),
b"super" => Some(SUPER_KW),
b"switch" => Some(SWITCH_KW),
b"this" => Some(THIS_KW),
b"throw" => Some(THROW_KW),
b"try" => Some(TRY_KW),
b"true" => Some(TRUE_KW),
b"typeof" => Some(TYPEOF_KW),
b"var" => Some(VAR_KW),
b"void" => Some(VOID_KW),
b"while" => Some(WHILE_KW),
b"with" => Some(WITH_KW),
b"yield" => Some(YIELD_KW),
_ => None,
};
if let Some(kind) = kind {
(Token::new(kind, self.cur - start), None)
} else {
(Token::new(T![ident], self.cur - start), None)
}
}
#[inline]
fn special_number_start<F: Fn(char) -> bool>(&mut self, func: F) -> bool {
if self
.bytes
.get(self.cur + 2)
.map(|b| func(*b as char))
.unwrap_or(false)
{
self.cur += 1;
true
} else {
false
}
}
#[inline]
fn maybe_bigint(&mut self) {
if let Some(b'n') = self.bytes.get(self.cur) {
self.next();
}
}
#[inline]
fn read_zero(&mut self) -> Option<Diagnostic> {
// TODO: Octal literals
match self.bytes.get(self.cur + 1) {
Some(b'x') | Some(b'X') => {
if self.special_number_start(|c| c.is_ascii_hexdigit()) {
let diag = self.read_hexnumber();
self.maybe_bigint();
diag
} else {
self.next();
None
}
}
Some(b'b') | Some(b'B') => {
if self.special_number_start(|c| c == '0' || c == '1') {
let diag = self.read_bindigits();
self.maybe_bigint();
diag
} else {
self.next();
None
}
}
Some(b'o') | Some(b'O') => {
if self.special_number_start(|c| ('0'..='7').contains(&c)) {
let diag = self.read_octaldigits();
self.maybe_bigint();
diag
} else {
self.next();
None
}
}
Some(b'n') => {
self.cur += 2;
None
}
Some(b'.') => {
self.cur += 1;
self.read_float()
}
Some(b'e') | Some(b'E') => {
// At least one digit is required
match self.bytes.get(self.cur + 2) {
Some(b'-') | Some(b'+') => {
if let Some(b'0'..=b'9') = self.bytes.get(self.cur + 3) {
self.next();
self.read_exponent()
} else {
None
}
}
Some(b'0'..=b'9') => self.read_exponent(),
_ => {
self.next();
None
}
}
}
// FIXME: many engines actually allow things like `09`, but by the spec, this is not allowed
// maybe we should not allow it if we want to go fully by the spec
_ => self.read_number(),
}
}
#[inline]
fn read_hexnumber(&mut self) -> Option<Diagnostic> {
let mut diag = None;
unwind_loop! {
match self.next() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(16)),
Some(b) if char::from(*b).is_ascii_hexdigit() => {},
_ => return diag,
}
}
}
#[inline]
fn handle_numeric_separator(&mut self, radix: u8) -> Option<Diagnostic> {
debug_assert_eq!(self.bytes[self.cur], b'_');
let err_diag = Diagnostic::error(
self.file_id,
"",
"numeric separators are only allowed between two digits",
)
.primary(self.cur..self.cur + 1, "");
let peeked = self.bytes.get(self.cur + 1).copied();
if peeked.is_none() || !char::from(peeked.unwrap()).is_digit(radix as u32) {
return Some(err_diag);
}
let forbidden = |c: Option<u8>| {
if c.is_none() {
return true;
}
let c = c.unwrap();
if radix == 16 {
matches!(c, b'.' | b'X' | b'_' | b'x')
} else {
matches!(c, b'.' | b'B' | b'E' | b'O' | b'_' | b'b' | b'e' | b'o')
}
};
let prev = self.bytes.get(self.cur - 1).copied();
if forbidden(prev) || forbidden(peeked) {
return Some(err_diag);
}
self.next_bounded();
None
}
// Read a number which does not start with 0, since that can be more things and is handled
// by another function
#[inline]
fn read_number(&mut self) -> Option<Diagnostic> {
let mut diag = None;
unwind_loop! {
match self.next_bounded() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(10)),
Some(b'0'..=b'9') => {},
Some(b'.') => {
return self.read_float();
},
// TODO: merge this, and read_float's implementation into one so we dont duplicate exponent code
Some(b'e') | Some(b'E') => {
// At least one digit is required
match self.bytes.get(self.cur + 1) {
Some(b'-') | Some(b'+') => {
if let Some(b'0'..=b'9') = self.bytes.get(self.cur + 2) {
self.next();
return self.read_exponent();
} else {
return diag;
}
},
Some(b'0'..=b'9') => return self.read_exponent(),
_ => return diag,
}
},
Some(b'n') => {
self.next();
return diag;
}
_ => return diag,
}
}
}
#[inline]
fn read_float(&mut self) -> Option<Diagnostic> {
let mut diag = None;
unwind_loop! {
match self.next_bounded() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(16)),
// LLVM has a hard time optimizing inclusive patterns, perhaps we should check if it makes llvm sad,
// and optimize this into a lookup table
Some(b'0'..=b'9') => {},
Some(b'e') | Some(b'E') => {
// At least one digit is required
match self.bytes.get(self.cur + 1) {
Some(b'-') | Some(b'+') => {
if let Some(b'0'..=b'9') = self.bytes.get(self.cur + 2) {
self.next();
return self.read_exponent().or(diag);
} else {
return diag;
}
},
Some(b'0'..=b'9') => return self.read_exponent().or(diag),
_ => return diag,
}
},
_ => return diag,
}
}
}
#[inline]
fn read_exponent(&mut self) -> Option<Diagnostic> {
if let Some(b'-') | Some(b'+') = self.bytes.get(self.cur + 1) {
self.next();
}
let mut diag = None;
unwind_loop! {
match self.next() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(10)),
Some(b'0'..=b'9') => {},
_ => return diag,
}
}
}
#[inline]
fn read_bindigits(&mut self) -> Option<Diagnostic> {
let mut diag = None;
unwind_loop! {
match self.next() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(2)),
Some(b'0') | Some(b'1') => {},
_ => return diag,
}
}
}
#[inline]
fn read_octaldigits(&mut self) -> Option<Diagnostic> {
let mut diag = None;
unwind_loop! {
match self.next() {
Some(b'_') => diag = diag.or(self.handle_numeric_separator(8)),
Some(b'0'..=b'7') => {},
_ => return diag,
}
}
}
#[inline]
fn verify_number_end(&mut self, start: usize) -> LexerReturn {
let err_start = self.cur;
if self.cur < self.bytes.len() && self.cur_is_ident_start() {
self.consume_ident();
let err = Diagnostic::error(
self.file_id,
"",
"numbers cannot be followed by identifiers directly after",
)
.primary(err_start..self.cur, "an identifier cannot appear here");
(
Token::new(SyntaxKind::ERROR_TOKEN, self.cur - start),
Some(err),
)
} else {
tok!(NUMBER, self.cur - start)
}
}
#[inline]
fn read_shebang(&mut self) -> LexerReturn {
let start = self.cur;
self.next();
if start != 0 {
return (Token::new(T![#], 1), None);
}
if let Some(b'!') = self.bytes.get(1) {
while self.next().is_some() {
let chr = self.get_unicode_char();
if is_linebreak(chr) {
return tok!(SHEBANG, self.cur);
}
self.cur += chr.len_utf8() - 1;
}
tok!(SHEBANG, self.cur)
} else {
let err = Diagnostic::error(
self.file_id,
"",
"expected `!` following a `#`, but found none",
)
.primary(0usize..1usize, "");
(Token::new(SyntaxKind::ERROR_TOKEN, 1), Some(err))
}
}
#[inline]
fn read_slash(&mut self) -> LexerReturn {
let start = self.cur;
match self.bytes.get(self.cur + 1) {
Some(b'*') => {
self.next();
while let Some(b) = self.next().copied() {
match b {
b'*' if self.bytes.get(self.cur + 1) == Some(&b'/') => {
self.advance(2);
return tok!(COMMENT, self.cur - start);
}
_ => {}
}
}
let err = Diagnostic::error(self.file_id, "", "unterminated block comment")
.primary(self.cur..self.cur + 1, "... but the file ends here")
.secondary(start..start + 2, "A block comment starts here");
(Token::new(SyntaxKind::COMMENT, self.cur - start), Some(err))
}
Some(b'/') => {
self.next();
while self.next().is_some() {
let chr = self.get_unicode_char();
if is_linebreak(chr) {
return tok!(COMMENT, self.cur - start);
}
self.cur += chr.len_utf8() - 1;
}
tok!(COMMENT, self.cur - start)
}
_ if self.state.expr_allowed => self.read_regex(),
Some(b'=') => {
self.advance(2);
tok!(SLASHEQ, self.cur - start)
}
_ => self.eat(tok![/]),
}
}
#[inline]
fn flag_err(&self, flag: char) -> Diagnostic {
Diagnostic::error(self.file_id, "", format!("duplicate flag `{}`", flag))
.primary(self.cur..self.cur + 1, "this flag was already used")
}
// TODO: Due to our return of (Token, Option<Error>) we cant issue more than one regex error
// This is not a huge issue but it would be helpful to users
#[inline]
#[allow(clippy::many_single_char_names)]
fn read_regex(&mut self) -> LexerReturn {
let start = self.cur;
let mut in_class = false;
let mut diagnostic = None;
unwind_loop! {
match self.next() {
Some(b'[') => in_class = true,
Some(b']') => in_class = false,
Some(b'/') => {
if !in_class {
let (mut g, mut i, mut m, mut s, mut u, mut y) = (false, false, false, false, false, false);
unwind_loop! {
let next = self.next_bounded().copied();
let chr_start = self.cur;
match next {
Some(b'g') => {
if g && diagnostic.is_none() {
diagnostic = Some(self.flag_err('g'))
}
g = true;
},
Some(b'i') => {
if i && diagnostic.is_none() {
diagnostic = Some(self.flag_err('i'))
}
i = true;
},
Some(b'm') => {
if m && diagnostic.is_none() {
diagnostic = Some(self.flag_err('m'))
}
m = true;
},
Some(b's') => {
if s && diagnostic.is_none() {
diagnostic = Some(self.flag_err('s'))
}
s = true;
},
Some(b'u') => {
if u && diagnostic.is_none() {
diagnostic = Some(self.flag_err('u'))
}
u = true;
},
Some(b'y') => {
if y && diagnostic.is_none() {
diagnostic = Some(self.flag_err('y'))
}
y = true;
},
Some(_) if self.cur_ident_part().is_some() => {
if diagnostic.is_none() {
diagnostic = Some(Diagnostic::error(self.file_id, "", "invalid regex flag")
.primary(chr_start .. self.cur + 1, "this is not a valid regex flag"));
}
},
_ => {
return (Token::new(SyntaxKind::REGEX, self.cur - start), diagnostic)
}
}
}
}
},
Some(b'\\') => {
if self.next_bounded().is_none() {
let err = Diagnostic::error(self.file_id, "", "expected a character after a regex escape, but found none")
.primary(self.cur..self.cur + 1, "expected a character following this");
return (Token::new(SyntaxKind::REGEX, self.cur - start), Some(err));
}
},
None => {
let err = Diagnostic::error(self.file_id, "", "unterminated regex literal")
.primary(self.cur..self.cur, "...but the file ends here")
.secondary(start..start + 1, "a regex literal starts there...");
return (Token::new(SyntaxKind::REGEX, self.cur - start), Some(err));
},
_ => {},
}
}
}
#[inline]
fn bin_or_assign(&mut self, bin: SyntaxKind, assign: SyntaxKind) -> LexerReturn {
if let Some(b'=') = self.next() {
self.next();
(Token::new(assign, 2), None)
} else {
(Token::new(bin, 1), None)
}
}
#[inline]
fn resolve_bang(&mut self) -> LexerReturn {
match self.next() {
Some(b'=') => {
if let Some(b'=') = self.next() {
self.next();
tok!(NEQ2, 3)
} else {
tok!(NEQ, 2)
}
}
_ => tok!(!),
}
}
#[inline]
fn resolve_amp(&mut self) -> LexerReturn {
match self.next() {
Some(b'&') => {
if let Some(b'=') = self.next() {
self.next();
tok!(AMP2EQ, 3)
} else {
tok!(AMP2, 2)
}
}
Some(b'=') => {
self.next();
tok!(AMPEQ, 2)
}
_ => tok!(&),
}
}
#[inline]
fn resolve_plus(&mut self) -> LexerReturn {
match self.next() {
Some(b'+') => {
self.next();
tok!(PLUS2, 2)
}
Some(b'=') => {
self.next();
tok!(PLUSEQ, 2)
}
_ => tok!(+),
}
}
#[inline]
fn resolve_minus(&mut self) -> LexerReturn {
match self.next() {
Some(b'-') => {
self.next();
tok!(MINUS2, 2)
}
Some(b'=') => {
self.next();
tok!(MINUSEQ, 2)
}
_ => tok!(-),
}
}
#[inline]
fn resolve_less_than(&mut self) -> LexerReturn {
match self.next() {
Some(b'<') => {
if let Some(b'=') = self.next() {
self.next();
tok!(SHLEQ, 3)
} else {
tok!(SHL, 2)
}
}
Some(b'=') => {
self.next();
tok!(LTEQ, 2)
}
_ => tok!(<),
}
}
#[inline]
fn resolve_greater_than(&mut self) -> LexerReturn {
match self.next() {
Some(b'>') => {
if let Some(b'>') = self.bytes.get(self.cur + 1).copied() {
if let Some(b'=') = self.bytes.get(self.cur + 2).copied() {
self.advance(3);
tok!(USHREQ, 4)
} else {
tok!(>)
}
} else if self.bytes.get(self.cur + 1).copied() == Some(b'=') {
self.advance(2);
tok!(SHREQ, 3)
} else {
tok!(>)
}
}
Some(b'=') => {
self.next();
tok!(GTEQ, 2)
}
_ => tok!(>),
}
}
#[inline]
fn resolve_eq(&mut self) -> LexerReturn {
match self.next() {
Some(b'=') => {
if let Some(b'=') = self.next() {
self.next();
tok!(EQ3, 3)
} else {
tok!(EQ2, 2)
}
}
Some(b'>') => {
self.next();
tok!(FAT_ARROW, 2)
}
_ => tok!(=),
}
}
#[inline]
fn resolve_pipe(&mut self) -> LexerReturn {
match self.next() {
Some(b'|') => {
if let Some(b'=') = self.next() {
self.next();
tok!(PIPE2EQ, 3)
} else {
tok!(PIPE2, 2)
}
}
Some(b'=') => {
self.next();
tok!(PIPEEQ, 2)
}
_ => tok!(|),
}
}
// Dont ask it to resolve the question of life's meaning because you'll be dissapointed
#[inline]
fn resolve_question(&mut self) -> LexerReturn {
match self.next() {
Some(b'?') => {
if let Some(b'=') = self.next() {
self.next();
tok!(QUESTION2EQ, 3)
} else {
tok!(QUESTION2, 2)
}
}
Some(b'.') => {
// 11.7 Optional chaining punctuator
if let Some(b'0'..=b'9') = self.bytes.get(self.cur + 1) {
tok!(?)
} else {
self.next();
tok!(QUESTIONDOT, 2)
}
}
_ => tok!(?),
}
}
#[inline]
fn resolve_star(&mut self) -> LexerReturn {
match self.next() {
Some(b'*') => {
if let Some(b'=') = self.next() {
self.next();
tok!(STAR2EQ, 3)
} else {
tok!(STAR2, 2)
}
}
Some(b'=') => {
self.next();
tok!(STAREQ, 2)
}
_ => tok!(*),
}
}
/// Lex the next token
fn lex_token(&mut self) -> LexerReturn {
// Safety: we always call lex_token when we are at a valid char
let byte = unsafe { *self.bytes.get_unchecked(self.cur) };
let start = self.cur;
// A lookup table of `byte -> fn(l: &mut Lexer) -> Token` is exponentially slower than this approach
// The speed difference comes from the difference in table size, a 2kb table is easily fit into cpu cache
// While a 16kb table will be ejected from cache very often leading to slowdowns, this also allows LLVM
// to do more aggressive optimizations on the match regarding how to map it to instructions
let dispatched = Self::lookup(byte);
match dispatched {
WHS => {
self.consume_whitespace();
tok!(WHITESPACE, self.cur - start)
}
EXL => self.resolve_bang(),
HAS => self.read_shebang(),
PRC => self.bin_or_assign(T![%], T![%=]),
AMP => self.resolve_amp(),
PNO => self.eat(tok!(L_PAREN, 1)),
PNC => self.eat(tok!(R_PAREN, 1)),
MUL => self.resolve_star(),
PLS => self.resolve_plus(),
COM => self.eat(tok![,]),
MIN => self.resolve_minus(),
SLH => self.read_slash(),
// This simply changes state on the start
TPL => self.eat(tok!(BACKTICK, 1)),
ZER => {
let diag = self.read_zero();
let (token, err) = self.verify_number_end(start);
(token, err.or(diag))
}
PRD => {
if let Some(b"..") = self.bytes.get(self.cur + 1..self.cur + 3) {
self.cur += 3;
return tok!(DOT2, 3);
}
if let Some(b'0'..=b'9') = self.bytes.get(self.cur + 1) {
let diag = self.read_float();
let (token, err) = self.verify_number_end(start);
(token, err.or(diag))
} else {
self.eat(tok![.])
}
}
BSL => {
if self.bytes.get(self.cur + 1) == Some(&b'u') {
self.next();
let res = if self.bytes.get(self.cur + 1).copied() == Some(b'{') {
self.next();
self.read_codepoint_escape()
} else {
self.read_unicode_escape(true)
};
match res {
Ok(chr) => {
if is_id_start(chr) {
self.resolve_identifier((chr, start))
} else {
let err = Diagnostic::error(self.file_id, "", "unexpected unicode escape")
.primary(start..self.cur, "this escape is unexpected, as it does not designate the start of an identifier");
self.next();
(
Token::new(SyntaxKind::ERROR_TOKEN, self.cur - start),
Some(err),
)
}
}
Err(err) => (
Token::new(SyntaxKind::ERROR_TOKEN, self.cur - start),
Some(err),
),
}
} else {
let err = Diagnostic::error(
self.file_id,
"",
format!("unexpected token `{}`", byte as char),
)
.primary(start..self.cur + 1, "");
self.next();
(Token::new(SyntaxKind::ERROR_TOKEN, 1), Some(err))
}
}
QOT => {
if let Some(err) = self.read_str_literal() {
(
Token::new(SyntaxKind::ERROR_TOKEN, self.cur - start),
Some(err),
)
} else {
tok!(STRING, self.cur - start)
}
}
IDT => self.resolve_identifier((byte as char, start)),
DIG => {
let diag = self.read_number();
let (token, err) = self.verify_number_end(start);
(token, err.or(diag))
}
COL => self.eat(tok![:]),
SEM => self.eat(tok![;]),
LSS => self.resolve_less_than(),
EQL => self.resolve_eq(),
MOR => self.resolve_greater_than(),
QST => self.resolve_question(),
BTO => self.eat(tok!(L_BRACK, 1)),
BTC => self.eat(tok![R_BRACK, 1]),
CRT => self.bin_or_assign(T![^], T![^=]),
BEO => self.eat(tok![L_CURLY, 1]),
BEC => self.eat(tok![R_CURLY, 1]),
PIP => self.resolve_pipe(),
TLD => self.eat(tok![~]),
UNI => {
let chr = self.get_unicode_char();
if UNICODE_WHITESPACE_STARTS.contains(&byte)
&& (is_linebreak(chr) || UNICODE_SPACES.contains(&chr))
{
if is_linebreak(chr) {
self.state.had_linebreak = true;
}
self.cur += chr.len_utf8() - 1;
self.consume_whitespace();
tok!(WHITESPACE, self.cur - start)
} else {
self.cur += chr.len_utf8() - 1;
if is_id_start(chr) {
self.resolve_identifier((chr, start))
} else {
let err = Diagnostic::error(
self.file_id,
"",
format!("Unexpected token `{}`", chr as char),
)
.primary(start..self.cur + 1, "");
self.next();
(
Token::new(SyntaxKind::ERROR_TOKEN, self.cur - start),
Some(err),
)
}
}
}
AT_ => self.eat(tok![@]),
_ => {
let err = Diagnostic::error(
self.file_id,
"",
format!("unexpected token `{}`", byte as char),
)
.primary(start..self.cur + 1, "");
self.next();
(Token::new(SyntaxKind::ERROR_TOKEN, 1), Some(err))
}
}
}
fn lex_template(&mut self) -> LexerReturn {
let start = self.cur;
let mut diagnostic = None;
while let Some(b) = self.bytes.get(self.cur) {
match *b as char {
'`' if self.cur == start => {
self.next();
return tok!(BACKTICK, 1);
}
'`' => {
return (
Token::new(SyntaxKind::TEMPLATE_CHUNK, self.cur - start),
diagnostic,
);
}
'\\' => {
if let Some(err) = self.validate_escape_sequence() {
diagnostic = Some(err);
}
self.next_bounded();
}
'$' if self.bytes.get(self.cur + 1) == Some(&b'{') && self.cur == start => {
self.advance(2);
return (Token::new(SyntaxKind::DOLLARCURLY, 2), diagnostic);
}
'$' if self.bytes.get(self.cur + 1) == Some(&b'{') => {
return (
Token::new(SyntaxKind::TEMPLATE_CHUNK, self.cur - start),
diagnostic,
)
}
_ => {
let _ = self.next();
}
}
}
let err = Diagnostic::error(self.file_id, "", "unterminated template literal")
.primary(self.cur..self.cur + 1, "");
(
Token::new(SyntaxKind::TEMPLATE_CHUNK, self.cur - start),
Some(err),
)
}
}
/// Check if a char is a JS linebreak
pub fn is_linebreak(chr: char) -> bool {
['\n', '\r', '\u{2028}', '\u{2029}'].contains(&chr)
}
impl Iterator for Lexer<'_> {
type Item = LexerReturn;
fn next(&mut self) -> Option<Self::Item> {
if self.cur >= self.bytes.len() {
if !self.returned_eof {
self.returned_eof = true;
return Some(tok!(EOF, 0));
}
return None;
}
let token = if self.state.is_in_template() {
self.lex_template()
} else {
self.lex_token()
};
if ![
SyntaxKind::COMMENT,
SyntaxKind::WHITESPACE,
SyntaxKind::TEMPLATE_CHUNK,
]
.contains(&token.0.kind)
{
self.state.update(token.0.kind);
}
Some(token)
}
}
// Every handler a byte coming in could be mapped to
#[allow(non_camel_case_types, clippy::upper_case_acronyms)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[repr(u8)]
enum Dispatch {
ERR,
WHS,
EXL,
QOT,
IDT,
HAS,
PRC,
AMP,
PNO,
PNC,
MUL,
PLS,
COM,
MIN,
PRD,
SLH,
ZER,
DIG,
COL,
SEM,
LSS,
EQL,
MOR,
QST,
AT_,
BTO,
BSL,
BTC,
CRT,
TPL,
BEO,
PIP,
BEC,
TLD,
UNI,
}
use Dispatch::*;
// A lookup table mapping any incoming byte to a handler function
// This is taken from the ratel project lexer and modified
// FIXME: Should we ignore the first ascii control chars which are nearly never seen instead of returning Err?
static DISPATCHER: [Dispatch; 256] = [
// 0 1 2 3 4 5 6 7 8 9 A B C D E F //
ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, WHS, WHS, WHS, WHS, WHS, ERR, ERR, // 0
ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, ERR, // 1
WHS, EXL, QOT, HAS, IDT, PRC, AMP, QOT, PNO, PNC, MUL, PLS, COM, MIN, PRD, SLH, // 2
ZER, DIG, DIG, DIG, DIG, DIG, DIG, DIG, DIG, DIG, COL, SEM, LSS, EQL, MOR, QST, // 3
AT_, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, // 4
IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, BTO, BSL, BTC, CRT, IDT, // 5
TPL, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, // 6
IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, IDT, BEO, PIP, BEC, TLD, ERR, // 7
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // 8
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // 9
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // A
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // B
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // C
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // D
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // E
UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, UNI, // F
];
| if let Some(b) = self.bytes.get(self.cur + 1) {
self.cur += 1;
Some(b)
} else {
if self.cur != self.bytes.len() {
self.cur += 1;
}
None
}
}
|
cloudevent_test.go | package step
import (
"fmt"
"regexp"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
rtesting "knative.dev/pkg/reconciler/testing"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
tektoncloudevent "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
)
func TestEmit(t *testing.T) |
func eventFromChannel(c chan string, testName string, wantEvent string) error {
timer := time.NewTimer(1 * time.Second)
select {
case event := <-c:
if wantEvent == "" {
return fmt.Errorf("received event \"%s\" for %s but none expected", event, testName)
}
matching, err := regexp.MatchString(wantEvent, event)
if err == nil {
if !matching {
return fmt.Errorf("expected event \"%s\" but got \"%s\" instead for %s", wantEvent, event, testName)
}
}
case <-timer.C:
if wantEvent != "" {
return fmt.Errorf("received no events for %s but %s expected", testName, wantEvent)
}
}
return nil
}
func checkCloudEvents(t *testing.T, fce *tektoncloudevent.FakeClient, testName string, wantEvent string) error {
t.Helper()
return eventFromChannel(fce.Events, testName, wantEvent)
}
| {
eventData := TektonStepCloudEvent{
PodRef: &corev1.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Name: "test-name",
Namespace: "test-namespace",
},
Log: "this is log",
Step: &v1beta1.Step{},
StepState: &v1beta1.StepState{},
}
eventTypes := []TektonPluginEventType{
CloudEventTypeStepStarted,
CloudEventTypeStepFailed,
CloudEventTypeStepSucceeded,
CloudEventTypeStepSkipped,
}
testcases := []struct {
name string
data map[string]string
wantEvent string
wantCloudEvent bool
}{{
name: "without sink",
data: map[string]string{},
wantEvent: "",
wantCloudEvent: false,
}, {
name: "with empty string sink",
data: map[string]string{"default-cloud-events-sink": ""},
wantEvent: "",
wantCloudEvent: false,
}, {
name: "with sink",
data: map[string]string{"default-cloud-events-sink": "http://mysink"},
wantEvent: "Normal Started",
wantCloudEvent: true,
}}
for _, et := range eventTypes {
for _, tc := range testcases {
t.Run(tc.name+"/"+et.String(), func(t *testing.T) {
// Setup the context and seed test data
ctx, _ := rtesting.SetupFakeContext(t)
ctx = tektoncloudevent.WithClient(ctx, &tektoncloudevent.FakeClientBehaviour{SendSuccessfully: true})
fakeClient := tektoncloudevent.Get(ctx).(tektoncloudevent.FakeClient)
// Setup the config and add it to the context
defaults, _ := config.NewDefaultsFromMap(tc.data)
featureFlags, _ := config.NewFeatureFlagsFromMap(map[string]string{})
cfg := &config.Config{
Defaults: defaults,
FeatureFlags: featureFlags,
}
ctx = config.ToContext(ctx, cfg)
go eventData.Emit(ctx, et, t.Name())
if tc.wantCloudEvent {
if err := checkCloudEvents(t, &fakeClient, t.Name(), `(s?)`+et.String()); err != nil {
t.Fatalf(err.Error())
}
}
})
}
}
} |
queries.rs | // Copyright 2021 - Nym Technologies SA <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use cosmwasm_std::{Deps, Order, StdResult};
use crate::storage::payments_read;
use bandwidth_claim_contract::keys::PublicKey;
use bandwidth_claim_contract::payment::{PagedPaymentResponse, Payment};
const PAYMENT_PAGE_MAX_LIMIT: u32 = 100;
const PAYMENT_PAGE_DEFAULT_LIMIT: u32 = 50;
/// Adds a 0 byte to terminate the `start_after` value given. This allows CosmWasm
/// to get the succeeding key as the start of the next page.
fn calculate_start_value<B: AsRef<[u8]>>(start_after: Option<B>) -> Option<Vec<u8>> {
start_after.as_ref().map(|identity| {
identity
.as_ref()
.iter()
.cloned()
.chain(std::iter::once(0))
.collect()
})
}
pub fn query_payments_paged(
deps: Deps,
start_after: Option<PublicKey>,
limit: Option<u32>,
) -> StdResult<PagedPaymentResponse> {
let limit = limit
.unwrap_or(PAYMENT_PAGE_DEFAULT_LIMIT)
.min(PAYMENT_PAGE_MAX_LIMIT) as usize;
let start = calculate_start_value(start_after);
let payments = payments_read(deps.storage)
.range(start.as_deref(), None, Order::Ascending)
.take(limit)
.map(|res| res.map(|item| item.1))
.collect::<StdResult<Vec<Payment>>>()?;
let start_next_after = payments.last().map(|payment| payment.verification_key());
Ok(PagedPaymentResponse::new(payments, limit, start_next_after))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::payments;
use crate::support::tests::helpers;
use std::convert::TryInto;
#[test]
fn payments_empty_on_init() {
let deps = helpers::init_contract();
let response = query_payments_paged(deps.as_ref(), None, Option::from(2)).unwrap();
assert_eq!(0, response.payments.len());
}
#[test]
fn payments_paged_retrieval_obeys_limits() {
let mut deps = helpers::init_contract();
let storage = deps.as_mut().storage;
let limit = 2;
for n in 0u32..10000 {
let bytes: Vec<u8> = std::iter::repeat(n.to_be_bytes())
.take(8)
.flatten()
.collect();
let verification_key = PublicKey::new(bytes.try_into().unwrap());
let payment = helpers::payment_fixture();
payments(storage)
.save(&verification_key.to_bytes(), &payment)
.unwrap();
}
let page1 = query_payments_paged(deps.as_ref(), None, Option::from(limit)).unwrap();
assert_eq!(limit, page1.payments.len() as u32);
}
#[test]
fn payments_paged_retrieval_has_default_limit() {
let mut deps = helpers::init_contract();
let storage = deps.as_mut().storage;
for n in 0u32..100 {
let bytes: Vec<u8> = std::iter::repeat(n.to_be_bytes())
.take(8)
.flatten()
.collect();
let verification_key = PublicKey::new(bytes.try_into().unwrap());
let payment = helpers::payment_fixture();
payments(storage)
.save(&verification_key.to_bytes(), &payment)
.unwrap();
}
// query without explicitly setting a limit
let page1 = query_payments_paged(deps.as_ref(), None, None).unwrap();
assert_eq!(PAYMENT_PAGE_DEFAULT_LIMIT, page1.payments.len() as u32);
}
#[test]
fn payments_paged_retrieval_has_max_limit() {
let mut deps = helpers::init_contract();
let storage = deps.as_mut().storage;
for n in 0u32..10000 {
let bytes: Vec<u8> = std::iter::repeat(n.to_be_bytes())
.take(8)
.flatten()
.collect();
let verification_key = PublicKey::new(bytes.try_into().unwrap());
let payment = helpers::payment_fixture();
payments(storage)
.save(&verification_key.to_bytes(), &payment)
.unwrap();
}
// query with a crazily high limit in an attempt to use too many resources
let crazy_limit = 1000;
let page1 = query_payments_paged(deps.as_ref(), None, Option::from(crazy_limit)).unwrap();
// we default to a decent sized upper bound instead
assert_eq!(PAYMENT_PAGE_MAX_LIMIT, page1.payments.len() as u32);
}
#[test]
fn | () {
let key1 = PublicKey::new([1; 32]);
let key2 = PublicKey::new([2; 32]);
let key3 = PublicKey::new([3; 32]);
let key4 = PublicKey::new([4; 32]);
let mut deps = helpers::init_contract();
let payment = helpers::payment_fixture();
payments(&mut deps.storage)
.save(&key1.to_bytes(), &payment)
.unwrap();
let per_page = 2;
let page1 = query_payments_paged(deps.as_ref(), None, Option::from(per_page)).unwrap();
// page should have 1 result on it
assert_eq!(1, page1.payments.len());
// save another
payments(&mut deps.storage)
.save(&key2.to_bytes(), &payment)
.unwrap();
// page1 should have 2 results on it
let page1 = query_payments_paged(deps.as_ref(), None, Option::from(per_page)).unwrap();
assert_eq!(2, page1.payments.len());
payments(&mut deps.storage)
.save(&key3.to_bytes(), &payment)
.unwrap();
// page1 still has 2 results
let page1 = query_payments_paged(deps.as_ref(), None, Option::from(per_page)).unwrap();
assert_eq!(2, page1.payments.len());
// retrieving the next page should start after the last key on this page
let start_after = key2;
let page2 = query_payments_paged(
deps.as_ref(),
Option::from(start_after),
Option::from(per_page),
)
.unwrap();
assert_eq!(1, page2.payments.len());
// save another one
payments(&mut deps.storage)
.save(&key4.to_bytes(), &payment)
.unwrap();
let start_after = key2;
let page2 = query_payments_paged(
deps.as_ref(),
Option::from(start_after),
Option::from(per_page),
)
.unwrap();
// now we have 2 pages, with 2 results on the second page
assert_eq!(2, page2.payments.len());
}
}
| payments_pagination_works |
pretty.rs | //! Utilities for printing nice human-readable output.
use chrono::offset::Utc;
use console::style;
use expjobserver::{human_ts, protocol};
use prettytable::{cell, row, Table};
use super::{JobInfo, JobOrMatrixInfo, MachineInfo, MatrixInfo, Status};
pub(crate) fn print_response(resp: protocol::response::ResponseType) {
use protocol::{response::ResponseType::*, *};
match resp {
Okresp(_) => println!("OK"),
Mresp(machine_resp) => println!("{:#?}", machine_resp),
Jresp(jobs_resp) => println!("{:#?}", jobs_resp),
Vresp(VarsResp { vars }) => println!("{:#?}", vars),
Jiresp(JobIdResp { jid }) => println!("OK: {}", jid),
Miresp(MatrixIdResp { id }) => println!("OK: {}", id),
Jsresp(job_status) => println!("{:#?}", job_status),
Msresp(matrix_status) => println!("{:#?}", matrix_status),
Nsmresp(_) => println!("No such machine."),
Nsjresp(_) => println!("No such job."),
Nsmatresp(_) => println!("No such matrix."),
Nwresp(_) => println!("Task is not waiting."),
Ierr(_) => println!("Internal error."),
};
}
macro_rules! style {
($status:ident, $fmt:literal, $($args:expr),+ $(; $($style:ident),+)?) => {{
$status += &format!("{}",
style(format!($fmt, $($args),+))
$($(. $style () )+)?
);
}}
}
/// Compute and print some summary stats.
fn print_summary(items: &[JobOrMatrixInfo]) {
let mut total_jobs = 0;
let mut running_jobs = 0;
let mut failed_jobs = 0;
let mut done_jobs = 0;
let mut waiting_jobs = 0;
let mut held_jobs = 0;
let mut canceled_jobs = 0;
let mut unknown_jobs = 0;
let mut count_task = |task: &JobInfo| {
total_jobs += 1;
match task.status {
Status::Running { .. } | Status::CopyResults { .. } => running_jobs += 1,
Status::Unknown { .. } => unknown_jobs += 1,
Status::Canceled { .. } => canceled_jobs += 1,
Status::Waiting => waiting_jobs += 1,
Status::Held => held_jobs += 1,
Status::Done { .. } => done_jobs += 1,
Status::Failed { .. } => failed_jobs += 1,
}
};
for item in items.iter() {
match item {
JobOrMatrixInfo::Job(job_info) => count_task(job_info),
JobOrMatrixInfo::Matrix(matrix_info) => {
for job in matrix_info.jobs.iter() {
count_task(job);
}
}
}
}
let mut summary = format!("{} jobs: ", total_jobs);
style!(summary, "{} waiting", waiting_jobs; blue, bright);
summary += ", ";
style!(summary, "{} held", held_jobs; blue, bright);
summary += ", ";
style!(summary, "{} running", running_jobs; yellow);
summary += ", ";
style!(summary, "{} done", done_jobs; green);
summary += ", ";
style!(summary, "{} failed", failed_jobs; red, underlined);
summary += ", ";
style!(summary, "{} cancelled", canceled_jobs; red);
summary += ", ";
style!(summary, "{} unknown", unknown_jobs; black, bright);
println!("{}\n", summary);
}
/// Compute the width with which to truncate the cmd in a row, if needed.
const fn row_cmd_width(term_width: u16) -> usize {
const JID_WIDTH: usize = 15;
const STATUS_WIDTH: usize = 20;
const CLASS_WIDTH: usize = 10;
const MACHINE_WIDTH: usize = 35;
const OUTPUT_WIDTH: usize = 5;
const ELLIPSIS_WIDTH: usize = 3;
const PADDING_WIDTH: usize = 2 * 6;
const ALTERNATE: usize = usize::MAX;
const TOTAL: usize = JID_WIDTH
+ STATUS_WIDTH
+ CLASS_WIDTH
+ MACHINE_WIDTH
+ OUTPUT_WIDTH
+ ELLIPSIS_WIDTH
+ PADDING_WIDTH;
let term_width = term_width as usize;
// If the the terminal is not wide enough, then we will have wrap-around anyway. Don't
// bother trying to make everything fit on one line. Instead, try to display everything.
if term_width > TOTAL {
term_width - TOTAL
} else {
ALTERNATE
}
}
fn truncate_cmd(cmd: &str, term_width: u16) -> String {
let cmd_width = row_cmd_width(term_width);
let mut cmd_trunc = cmd.to_owned();
cmd_trunc.truncate(cmd_width);
if cmd_trunc.len() < cmd.len() {
cmd_trunc.push_str("...");
}
cmd_trunc
}
/// Add a row to the table for a task.
fn add_task_row(table: &mut Table, job: JobInfo, term_width: u16) {
let jid = if let Some(matrix) = job.matrix {
format!("{}:{}", matrix, job.jid)
} else {
format!("{}", job.jid)
};
match job {
JobInfo {
cmd,
class,
status: Status::Unknown { machine },
..
} => {
let machine = if let Some(machine) = machine {
machine
} else {
"".into() | };
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, FDi->"Unknown", class, cmd, machine, ""]);
}
JobInfo {
cmd,
class,
status: Status::Canceled { machine },
variables: _variables,
..
} => {
let machine = if let Some(machine) = machine {
machine
} else {
"".into()
};
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Fri->"Canceled", class, cmd, machine, ""]);
}
JobInfo {
cmd,
class,
status: Status::Waiting,
variables: _variables,
timestamp,
..
} => {
let status = format!("Waiting ({})", human_ts(Utc::now() - timestamp));
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, FB->status, class, cmd, "", ""]);
}
JobInfo {
cmd,
class,
status: Status::Held,
variables: _variables,
timestamp,
..
} => {
let status = format!("Held ({})", human_ts(Utc::now() - timestamp));
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, FB->status, class, cmd, "", ""]);
}
JobInfo {
cmd,
class,
status:
Status::Done {
machine,
output: None,
},
variables: _variables,
timestamp,
done_timestamp,
..
} => {
let status = format!(
"Done ({})",
human_ts(done_timestamp.unwrap_or_else(|| timestamp) - timestamp)
);
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Fm->status, class, cmd, machine, ""]);
}
JobInfo {
cmd,
class,
status:
Status::Done {
machine,
output: Some(_),
},
variables: _variables,
timestamp,
done_timestamp,
..
} => {
let status = format!(
"Done ({})",
human_ts(done_timestamp.unwrap_or_else(|| timestamp) - timestamp)
);
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Fg->status, class, cmd, machine, Fg->"Ready"]);
}
JobInfo {
cmd,
class,
status: Status::Failed { error, machine },
variables: _variables,
timestamp,
done_timestamp,
..
} => {
let status = format!(
"Failed ({})",
human_ts(done_timestamp.unwrap_or_else(|| timestamp) - timestamp)
);
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Frbu->status, class, cmd,
if let Some(machine) = machine { machine } else {"".into()}, error]);
}
JobInfo {
cmd,
class,
status: Status::Running { machine },
variables: _variables,
timestamp,
..
} => {
let status = format!("Running ({})", human_ts(Utc::now() - timestamp));
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Fy->status, class, cmd, machine, ""]);
}
JobInfo {
cmd,
class,
status: Status::CopyResults { machine },
variables: _variables,
timestamp,
..
} => {
let status = format!("Copy Results ({})", human_ts(Utc::now() - timestamp));
let cmd = truncate_cmd(&cmd, term_width);
table.add_row(row![b->jid, Fy->status, class, cmd, machine, ""]);
}
}
}
/// Add a row to the table representing a whole matrix.
fn add_matrix_row(table: &mut Table, matrix: MatrixInfo, term_width: u16) {
let (running, waiting, held, done, failed, cancelled, unknown) = {
let mut running = 0;
let mut waiting = 0;
let mut held = 0;
let mut done = 0;
let mut failed = 0;
let mut cancelled = 0;
let mut unknown = 0;
for j in matrix.jobs.iter() {
match j.status {
Status::Running { .. } | Status::CopyResults { .. } => running += 1,
Status::Waiting => waiting += 1,
Status::Held => held += 1,
Status::Done { .. } => done += 1,
Status::Failed { .. } => failed += 1,
Status::Canceled { .. } => cancelled += 1,
Status::Unknown { .. } => unknown += 1,
}
}
(running, waiting, held, done, failed, cancelled, unknown)
};
let id = format!("{} (matrix)", matrix.id);
let status = {
let mut status = String::new();
if running > 0 {
style!(status, "{}R", running; yellow);
}
if waiting > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}W", waiting; blue, bright);
}
if held > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}H", held; blue, bright);
}
if done > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}D", done; green);
}
if failed > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}F", failed; red, underlined);
}
if cancelled > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}C", cancelled; red);
}
if unknown > 0 {
if !status.is_empty() {
status.push_str(" ");
}
style!(status, "{}U", unknown; black, bright);
}
status
};
let cmd = truncate_cmd(&matrix.cmd, term_width);
table.add_row(row![b->id, status, matrix.class, cmd, "", ""]);
}
pub(crate) fn print_jobs(items: Vec<JobOrMatrixInfo>, collapse_matrices: bool) {
// Print the summary.
print_summary(&items);
// Print a nice human-readable table.
let term_width = console::Term::stdout().size().1;
let mut table = Table::new();
table.set_format(*prettytable::format::consts::FORMAT_CLEAN);
table.set_titles(row![ Fwbu =>
"Job", "Status", "Class", "Command", "Machine", "Output"
]);
for item in items.into_iter() {
match item {
JobOrMatrixInfo::Job(job_info) => add_task_row(&mut table, job_info, term_width),
JobOrMatrixInfo::Matrix(matrix_info) => {
if collapse_matrices {
add_matrix_row(&mut table, matrix_info, term_width);
} else {
for job_info in matrix_info.jobs.into_iter() {
add_task_row(&mut table, job_info, term_width);
}
}
}
}
}
table.printstd();
}
pub(crate) fn print_avail(machines: Vec<MachineInfo>) {
// Print a nice human-readable table
let mut table = Table::new();
table.set_format(*prettytable::format::consts::FORMAT_CLEAN);
table.set_titles(row![ Fwbu =>
"Machine", "Class", "Running"
]);
// Query each job's status
for machine in machines.iter() {
match machine {
MachineInfo {
addr,
class,
running: Some(running),
} => {
table.add_row(row![ Fy =>
addr,
class,
format!("{}", running)
]);
}
MachineInfo {
addr,
class,
running: None,
} => {
table.add_row(row![addr, class, ""]);
}
}
}
table.printstd();
} | |
lib.rs | #![allow(non_snake_case)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate lazy_static;
#[cfg(target_os = "macos")]
#[macro_use]
extern crate objc;
use ash::extensions::{
self,
ext::{DebugReport, DebugUtils},
};
use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0};
use ash::vk;
#[cfg(not(feature = "use-rtld-next"))]
use ash::{Entry, LoadingError};
use hal::{
adapter,
device::{CreationError as DeviceCreationError, DeviceLost, OutOfMemory, SurfaceLost},
format,
image,
memory,
pso::{PatchSize, PipelineStage},
queue,
window::{PresentError, Suboptimal, SwapImageIndex},
Features,
Hints,
Limits,
};
use std::borrow::{Borrow, Cow};
use std::ffi::{CStr, CString};
use std::sync::Arc;
use std::{fmt, mem, ptr, slice};
#[cfg(feature = "use-rtld-next")]
use ash::{EntryCustom, LoadingError};
#[cfg(feature = "use-rtld-next")]
use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles};
mod command;
mod conv;
mod device;
mod info;
mod native;
mod pool;
mod window;
// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static
lazy_static! {
static ref LAYERS: Vec<&'static CStr> = if cfg!(debug_assertions) {
vec![CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap()]
} else {
vec![]
};
static ref EXTENSIONS: Vec<&'static CStr> = if cfg!(debug_assertions) {
vec![
DebugUtils::name(),
DebugReport::name(),
]
} else {
vec![]
};
static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()];
static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![
extensions::khr::Surface::name(),
// Platform-specific WSI extensions
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XlibSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::XcbSurface::name(),
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
extensions::khr::WaylandSurface::name(),
#[cfg(target_os = "android")]
extensions::khr::AndroidSurface::name(),
#[cfg(target_os = "windows")]
extensions::khr::Win32Surface::name(),
#[cfg(target_os = "macos")]
extensions::mvk::MacOSSurface::name(),
];
static ref AMD_NEGATIVE_VIEWPORT_HEIGHT: &'static CStr =
CStr::from_bytes_with_nul(b"VK_AMD_negative_viewport_height\0").unwrap();
static ref KHR_MAINTENANCE1: &'static CStr =
CStr::from_bytes_with_nul(b"VK_KHR_maintenance1\0").unwrap();
static ref KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE : &'static CStr =
CStr::from_bytes_with_nul(b"VK_KHR_sampler_mirror_clamp_to_edge\0").unwrap();
}
#[cfg(not(feature = "use-rtld-next"))]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<Entry, LoadingError> = Entry::new();
}
#[cfg(feature = "use-rtld-next")]
lazy_static! {
// Entry function pointers
pub static ref VK_ENTRY: Result<EntryCustom<V1_0, ()>, LoadingError>
= EntryCustom::new_custom(
|| Ok(()),
|_, name| unsafe {
DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy())
.unwrap_or(ptr::null_mut())
}
);
}
pub struct RawInstance(ash::Instance, Option<DebugMessenger>);
pub enum DebugMessenger {
Utils(DebugUtils, vk::DebugUtilsMessengerEXT),
Report(DebugReport, vk::DebugReportCallbackEXT),
}
impl Drop for RawInstance {
fn drop(&mut self) {
unsafe {
#[cfg(debug_assertions)]
{
match self.1 {
Some(DebugMessenger::Utils(ref ext, callback)) => {
ext.destroy_debug_utils_messenger(callback, None)
}
Some(DebugMessenger::Report(ref ext, callback)) => {
ext.destroy_debug_report_callback(callback, None)
}
None => {}
}
}
self.0.destroy_instance(None);
}
}
}
pub struct Instance {
pub raw: Arc<RawInstance>,
/// Supported extensions of this instance.
pub extensions: Vec<&'static CStr>,
}
impl fmt::Debug for Instance {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Instance")
}
}
fn map_queue_type(flags: vk::QueueFlags) -> queue::QueueType {
if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
queue::QueueType::General
} else if flags.contains(vk::QueueFlags::GRAPHICS) {
// TRANSFER_BIT optional
queue::QueueType::Graphics
} else if flags.contains(vk::QueueFlags::COMPUTE) {
// TRANSFER_BIT optional
queue::QueueType::Compute
} else if flags.contains(vk::QueueFlags::TRANSFER) {
queue::QueueType::Transfer
} else {
// TODO: present only queues?
unimplemented!()
}
}
unsafe fn display_debug_utils_label_ext(
label_structs: *mut vk::DebugUtilsLabelEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
Some(
slice::from_raw_parts::<vk::DebugUtilsLabelEXT>(label_structs, count)
.iter()
.flat_map(|dul_obj| {
dul_obj
.p_label_name
.as_ref()
.map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned())
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe fn display_debug_utils_object_name_info_ext(
info_structs: *mut vk::DebugUtilsObjectNameInfoEXT,
count: usize,
) -> Option<String> {
if count == 0 {
return None;
}
//TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way?
Some(
slice::from_raw_parts::<vk::DebugUtilsObjectNameInfoEXT>(info_structs, count)
.iter()
.map(|obj_info| {
let object_name = obj_info
.p_object_name
.as_ref()
.map(|name| CStr::from_ptr(name).to_string_lossy().into_owned());
match object_name {
Some(name) => format!(
"(type: {:?}, hndl: {}, name: {})",
obj_info.object_type,
&obj_info.object_handle.to_string(),
name
),
None => format!(
"(type: {:?}, hndl: {})",
obj_info.object_type,
&obj_info.object_handle.to_string()
),
}
})
.collect::<Vec<String>>()
.join(", "),
)
}
unsafe extern "system" fn debug_utils_messenger_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_user_data: *mut std::os::raw::c_void,
) -> vk::Bool32 {
let callback_data = *p_callback_data;
let message_severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error,
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn,
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info,
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace,
_ => log::Level::Warn,
};
let message_type = &format!("{:?}", message_type);
let message_id_number: i32 = callback_data.message_id_number as i32;
let message_id_name = if callback_data.p_message_id_name.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy()
};
let message = if callback_data.p_message.is_null() {
Cow::from("")
} else {
CStr::from_ptr(callback_data.p_message).to_string_lossy()
};
let additional_info: [(&str, Option<String>); 3] = [
(
"queue info",
display_debug_utils_label_ext(
callback_data.p_queue_labels as *mut _,
callback_data.queue_label_count as usize,
),
),
(
"cmd buf info",
display_debug_utils_label_ext(
callback_data.p_cmd_buf_labels as *mut _,
callback_data.cmd_buf_label_count as usize,
),
),
(
"object info",
display_debug_utils_object_name_info_ext(
callback_data.p_objects as *mut _,
callback_data.object_count as usize,
),
),
];
log!(message_severity, "{}\n", {
let mut msg = format!(
"\n{} [{} ({})] : {}",
message_type,
message_id_name,
&message_id_number.to_string(),
message
);
#[allow(array_into_iter)]
for (info_label, info) in additional_info.into_iter() {
match info {
Some(data) => {
msg = format!("{}\n{}: {}", msg, info_label, data);
}
None => {}
}
}
msg
});
vk::FALSE
}
unsafe extern "system" fn debug_report_callback(
type_: vk::DebugReportFlagsEXT,
_: vk::DebugReportObjectTypeEXT,
_object: u64,
_location: usize,
_msg_code: i32,
layer_prefix: *const std::os::raw::c_char,
description: *const std::os::raw::c_char,
_user_data: *mut std::os::raw::c_void,
) -> vk::Bool32 {
let level = match type_ {
vk::DebugReportFlagsEXT::ERROR => log::Level::Error,
vk::DebugReportFlagsEXT::WARNING => log::Level::Warn,
vk::DebugReportFlagsEXT::INFORMATION => log::Level::Info,
vk::DebugReportFlagsEXT::DEBUG => log::Level::Debug,
_ => log::Level::Warn,
};
let layer_prefix = CStr::from_ptr(layer_prefix).to_str().unwrap();
let description = CStr::from_ptr(description).to_str().unwrap();
log!(level, "[{}] {}", layer_prefix, description);
vk::FALSE
}
impl hal::Instance<Backend> for Instance {
fn create(name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
// TODO: return errors instead of panic
let entry = VK_ENTRY.as_ref().map_err(|e| {
info!("Missing Vulkan entry points: {:?}", e);
hal::UnsupportedBackend
})?;
let app_name = CString::new(name).unwrap();
let app_info = vk::ApplicationInfo {
s_type: vk::StructureType::APPLICATION_INFO,
p_next: ptr::null(),
p_application_name: app_name.as_ptr(),
application_version: version,
p_engine_name: b"gfx-rs\0".as_ptr() as *const _,
engine_version: 1,
api_version: vk::make_version(1, 0, 0),
};
let instance_extensions = entry
.enumerate_instance_extension_properties()
.map_err(|e| {
info!("Unable to enumerate instance extensions: {:?}", e);
hal::UnsupportedBackend
})?;
let instance_layers = entry
.enumerate_instance_layer_properties()
.map_err(|e| {
info!("Unable to enumerate instance layers: {:?}", e);
hal::UnsupportedBackend
})?;
// Check our extensions against the available extensions
let extensions = SURFACE_EXTENSIONS
.iter()
.chain(EXTENSIONS.iter())
.filter_map(|&ext| {
instance_extensions
.iter()
.find(|inst_ext| unsafe {
CStr::from_ptr(inst_ext.extension_name.as_ptr()) == ext
})
.map(|_| ext)
.or_else(|| {
info!("Unable to find extension: {}", ext.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
// Check requested layers against the available layers
let layers = LAYERS
.iter()
.filter_map(|&layer| {
instance_layers
.iter()
.find(|inst_layer| unsafe {
CStr::from_ptr(inst_layer.layer_name.as_ptr()) == layer
})
.map(|_| layer)
.or_else(|| {
warn!("Unable to find layer: {}", layer.to_string_lossy());
None
})
})
.collect::<Vec<&CStr>>();
let instance = {
let cstrings = layers
.iter()
.chain(extensions.iter())
.map(|&s| CString::from(s))
.collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layers.len() as _,
pp_enabled_layer_names: str_pointers.as_ptr(),
enabled_extension_count: extensions.len() as _,
pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(),
};
unsafe { entry.create_instance(&create_info, None) }.map_err(|e| {
warn!("Unable to create Vulkan instance: {:?}", e);
hal::UnsupportedBackend
})?
};
#[cfg(debug_assertions)]
let debug_messenger = {
// make sure VK_EXT_debug_utils is available
if instance_extensions.iter().any(|props| unsafe {
CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name()
}) {
let ext = DebugUtils::new(entry, &instance);
let info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(),
message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(),
pfn_user_callback: Some(debug_utils_messenger_callback),
p_user_data: ptr::null_mut(),
};
let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap();
Some(DebugMessenger::Utils(ext, handle))
} else if instance_extensions.iter().any(|props| unsafe {
CStr::from_ptr(props.extension_name.as_ptr()) == DebugReport::name()
}) {
let ext = DebugReport::new(entry, &instance);
let info = vk::DebugReportCallbackCreateInfoEXT {
s_type: vk::StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugReportFlagsEXT::all(),
pfn_callback: Some(debug_report_callback),
p_user_data: ptr::null_mut(),
};
let handle = unsafe { ext.create_debug_report_callback(&info, None) }.unwrap();
Some(DebugMessenger::Report(ext, handle))
} else {
None
}
};
#[cfg(not(debug_assertions))]
let debug_messenger = None;
Ok(Instance {
raw: Arc::new(RawInstance(instance, debug_messenger)),
extensions,
})
}
fn enumerate_adapters(&self) -> Vec<adapter::Adapter<Backend>> {
let devices = match unsafe { self.raw.0.enumerate_physical_devices() } {
Ok(devices) => devices,
Err(err) => {
error!("Could not enumerate physical devices! {}", err);
vec![]
}
};
devices
.into_iter()
.map(|device| {
let extensions =
unsafe { self.raw.0.enumerate_device_extension_properties(device) }.unwrap();
let properties = unsafe { self.raw.0.get_physical_device_properties(device) };
let info = adapter::AdapterInfo {
name: unsafe {
CStr::from_ptr(properties.device_name.as_ptr()) | .to_owned()
},
vendor: properties.vendor_id as usize,
device: properties.device_id as usize,
device_type: match properties.device_type {
ash::vk::PhysicalDeviceType::OTHER => adapter::DeviceType::Other,
ash::vk::PhysicalDeviceType::INTEGRATED_GPU => {
adapter::DeviceType::IntegratedGpu
}
ash::vk::PhysicalDeviceType::DISCRETE_GPU => {
adapter::DeviceType::DiscreteGpu
}
ash::vk::PhysicalDeviceType::VIRTUAL_GPU => adapter::DeviceType::VirtualGpu,
ash::vk::PhysicalDeviceType::CPU => adapter::DeviceType::Cpu,
_ => adapter::DeviceType::Other,
},
};
let physical_device = PhysicalDevice {
instance: self.raw.clone(),
handle: device,
extensions,
properties,
};
let queue_families = unsafe {
self.raw
.0
.get_physical_device_queue_family_properties(device)
.into_iter()
.enumerate()
.map(|(i, properties)| QueueFamily {
properties,
device,
index: i as u32,
})
.collect()
};
adapter::Adapter {
info,
physical_device,
queue_families,
}
})
.collect()
}
unsafe fn create_surface(
&self,
has_handle: &impl raw_window_handle::HasRawWindowHandle,
) -> Result<window::Surface, hal::window::InitError> {
use raw_window_handle::RawWindowHandle;
match has_handle.raw_window_handle() {
#[cfg(all(
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "solaris")
))]
RawWindowHandle::Wayland(handle)
if self
.extensions
.contains(&extensions::khr::WaylandSurface::name()) =>
{
Ok(self.create_surface_from_wayland(handle.display, handle.surface))
}
#[cfg(all(
feature = "x11",
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "solaris")
))]
RawWindowHandle::Xlib(handle)
if self
.extensions
.contains(&extensions::khr::XlibSurface::name()) =>
{
Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window))
}
#[cfg(all(
feature = "xcb",
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "ios")
))]
RawWindowHandle::Xcb(handle)
if self
.extensions
.contains(&extensions::khr::XcbSurface::name()) =>
{
Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window))
}
#[cfg(target_os = "android")]
RawWindowHandle::Android(handle) => {
Ok(self.create_surface_android(handle.a_native_window))
}
#[cfg(windows)]
RawWindowHandle::Windows(handle) => {
use winapi::um::libloaderapi::GetModuleHandleW;
let hinstance = GetModuleHandleW(ptr::null());
Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd))
}
#[cfg(target_os = "macos")]
RawWindowHandle::MacOS(handle) => Ok(self.create_surface_from_ns_view(handle.ns_view)),
_ => Err(hal::window::InitError::UnsupportedWindowHandle),
}
}
unsafe fn destroy_surface(&self, surface: window::Surface) {
surface
.raw
.functor
.destroy_surface(surface.raw.handle, None);
}
}
#[derive(Debug, Clone)]
pub struct QueueFamily {
properties: vk::QueueFamilyProperties,
device: vk::PhysicalDevice,
index: u32,
}
impl queue::QueueFamily for QueueFamily {
fn queue_type(&self) -> queue::QueueType {
map_queue_type(self.properties.queue_flags)
}
fn max_queues(&self) -> usize {
self.properties.queue_count as _
}
fn id(&self) -> queue::QueueFamilyId {
queue::QueueFamilyId(self.index as _)
}
}
pub struct PhysicalDevice {
instance: Arc<RawInstance>,
handle: vk::PhysicalDevice,
extensions: Vec<vk::ExtensionProperties>,
properties: vk::PhysicalDeviceProperties,
}
impl PhysicalDevice {
fn supports_extension(&self, extension: &CStr) -> bool {
self.extensions
.iter()
.any(|ep| unsafe { CStr::from_ptr(ep.extension_name.as_ptr()) } == extension)
}
}
impl fmt::Debug for PhysicalDevice {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("PhysicalDevice")
}
}
impl adapter::PhysicalDevice<Backend> for PhysicalDevice {
unsafe fn open(
&self,
families: &[(&QueueFamily, &[queue::QueuePriority])],
requested_features: Features,
) -> Result<adapter::Gpu<Backend>, DeviceCreationError> {
let family_infos = families
.iter()
.map(|&(family, priorities)| vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceQueueCreateFlags::empty(),
queue_family_index: family.index,
queue_count: priorities.len() as _,
p_queue_priorities: priorities.as_ptr(),
})
.collect::<Vec<_>>();
if !self.features().contains(requested_features) {
return Err(DeviceCreationError::MissingFeature);
}
let maintenance_level = if self.supports_extension(*KHR_MAINTENANCE1) { 1 } else { 0 };
let enabled_features = conv::map_device_features(requested_features);
let enabled_extensions = DEVICE_EXTENSIONS
.iter()
.cloned()
.chain(
if requested_features.contains(Features::NDC_Y_UP) && maintenance_level == 0 {
Some(*AMD_NEGATIVE_VIEWPORT_HEIGHT)
} else {
None
},
)
.chain(
match maintenance_level {
0 => None,
1 => Some(*KHR_MAINTENANCE1),
_ => unreachable!(),
}
);
// Create device
let device_raw = {
let cstrings = enabled_extensions.map(CString::from).collect::<Vec<_>>();
let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
let info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: family_infos.len() as u32,
p_queue_create_infos: family_infos.as_ptr(),
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
enabled_extension_count: str_pointers.len() as u32,
pp_enabled_extension_names: str_pointers.as_ptr(),
p_enabled_features: &enabled_features,
};
match self.instance.0.create_device(self.handle, &info, None) {
Ok(device) => device,
Err(e) => {
return Err(match e {
vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
DeviceCreationError::OutOfMemory(OutOfMemory::Host)
}
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
DeviceCreationError::OutOfMemory(OutOfMemory::Device)
}
vk::Result::ERROR_INITIALIZATION_FAILED => {
DeviceCreationError::InitializationFailed
}
vk::Result::ERROR_DEVICE_LOST => DeviceCreationError::DeviceLost,
vk::Result::ERROR_TOO_MANY_OBJECTS => DeviceCreationError::TooManyObjects,
_ => unreachable!(),
})
}
}
};
let swapchain_fn = vk::KhrSwapchainFn::load(|name| {
mem::transmute(
self.instance
.0
.get_device_proc_addr(device_raw.handle(), name.as_ptr()),
)
});
let device = Device {
shared: Arc::new(RawDevice {
raw: device_raw,
features: requested_features,
instance: Arc::clone(&self.instance),
maintenance_level,
}),
vendor_id: self.properties.vendor_id,
};
let device_arc = Arc::clone(&device.shared);
let queue_groups = families
.into_iter()
.map(|&(family, ref priorities)| {
let mut family_raw =
queue::QueueGroup::new(queue::QueueFamilyId(family.index as usize));
for id in 0 .. priorities.len() {
let queue_raw = device_arc.raw.get_device_queue(family.index, id as _);
family_raw.add_queue(CommandQueue {
raw: Arc::new(queue_raw),
device: device_arc.clone(),
swapchain_fn: swapchain_fn.clone(),
});
}
family_raw
})
.collect();
Ok(adapter::Gpu {
device,
queue_groups,
})
}
fn format_properties(&self, format: Option<format::Format>) -> format::Properties {
let properties = unsafe {
self.instance.0.get_physical_device_format_properties(
self.handle,
format.map_or(vk::Format::UNDEFINED, conv::map_format),
)
};
format::Properties {
linear_tiling: conv::map_image_features(properties.linear_tiling_features),
optimal_tiling: conv::map_image_features(properties.optimal_tiling_features),
buffer_features: conv::map_buffer_features(properties.buffer_features),
}
}
fn image_format_properties(
&self,
format: format::Format,
dimensions: u8,
tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Option<image::FormatProperties> {
let format_properties = unsafe {
self.instance.0.get_physical_device_image_format_properties(
self.handle,
conv::map_format(format),
match dimensions {
1 => vk::ImageType::TYPE_1D,
2 => vk::ImageType::TYPE_2D,
3 => vk::ImageType::TYPE_3D,
_ => panic!("Unexpected image dimensionality: {}", dimensions),
},
conv::map_tiling(tiling),
conv::map_image_usage(usage),
conv::map_view_capabilities(view_caps),
)
};
match format_properties {
Ok(props) => Some(image::FormatProperties {
max_extent: image::Extent {
width: props.max_extent.width,
height: props.max_extent.height,
depth: props.max_extent.depth,
},
max_levels: props.max_mip_levels as _,
max_layers: props.max_array_layers as _,
sample_count_mask: props.sample_counts.as_raw() as _,
max_resource_size: props.max_resource_size as _,
}),
Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None,
Err(other) => {
error!("Unexpected error in `image_format_properties`: {:?}", other);
None
}
}
}
fn memory_properties(&self) -> adapter::MemoryProperties {
let mem_properties = unsafe {
self.instance
.0
.get_physical_device_memory_properties(self.handle)
};
let memory_heaps = mem_properties.memory_heaps
[.. mem_properties.memory_heap_count as usize]
.iter()
.map(|mem| mem.size)
.collect();
let memory_types = mem_properties.memory_types
[.. mem_properties.memory_type_count as usize]
.iter()
.map(|mem| {
use crate::memory::Properties;
let mut type_flags = Properties::empty();
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL)
{
type_flags |= Properties::DEVICE_LOCAL;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
type_flags |= Properties::CPU_VISIBLE;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_COHERENT)
{
type_flags |= Properties::COHERENT;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::HOST_CACHED)
{
type_flags |= Properties::CPU_CACHED;
}
if mem
.property_flags
.intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED)
{
type_flags |= Properties::LAZILY_ALLOCATED;
}
adapter::MemoryType {
properties: type_flags,
heap_index: mem.heap_index as usize,
}
})
.collect();
adapter::MemoryProperties {
memory_heaps,
memory_types,
}
}
fn features(&self) -> Features {
// see https://github.com/gfx-rs/gfx/issues/1930
let is_windows_intel_dual_src_bug = cfg!(windows)
&& self.properties.vendor_id == info::intel::VENDOR
&& (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK
== info::intel::DEVICE_KABY_LAKE_MASK
|| self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK
== info::intel::DEVICE_SKY_LAKE_MASK);
let features = unsafe { self.instance.0.get_physical_device_features(self.handle) };
let mut bits = Features::empty()
| Features::TRIANGLE_FAN
| Features::SEPARATE_STENCIL_REF_VALUES
| Features::SAMPLER_MIP_LOD_BIAS;
if self.supports_extension(*AMD_NEGATIVE_VIEWPORT_HEIGHT)
|| self.supports_extension(*KHR_MAINTENANCE1)
{
bits |= Features::NDC_Y_UP;
}
if self.supports_extension(*KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE) {
bits |= Features::SAMPLER_MIRROR_CLAMP_EDGE;
}
if features.robust_buffer_access != 0 {
bits |= Features::ROBUST_BUFFER_ACCESS;
}
if features.full_draw_index_uint32 != 0 {
bits |= Features::FULL_DRAW_INDEX_U32;
}
if features.image_cube_array != 0 {
bits |= Features::IMAGE_CUBE_ARRAY;
}
if features.independent_blend != 0 {
bits |= Features::INDEPENDENT_BLENDING;
}
if features.geometry_shader != 0 {
bits |= Features::GEOMETRY_SHADER;
}
if features.tessellation_shader != 0 {
bits |= Features::TESSELLATION_SHADER;
}
if features.sample_rate_shading != 0 {
bits |= Features::SAMPLE_RATE_SHADING;
}
if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug {
bits |= Features::DUAL_SRC_BLENDING;
}
if features.logic_op != 0 {
bits |= Features::LOGIC_OP;
}
if features.multi_draw_indirect != 0 {
bits |= Features::MULTI_DRAW_INDIRECT;
}
if features.draw_indirect_first_instance != 0 {
bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE;
}
if features.depth_clamp != 0 {
bits |= Features::DEPTH_CLAMP;
}
if features.depth_bias_clamp != 0 {
bits |= Features::DEPTH_BIAS_CLAMP;
}
if features.fill_mode_non_solid != 0 {
bits |= Features::NON_FILL_POLYGON_MODE;
}
if features.depth_bounds != 0 {
bits |= Features::DEPTH_BOUNDS;
}
if features.wide_lines != 0 {
bits |= Features::LINE_WIDTH;
}
if features.large_points != 0 {
bits |= Features::POINT_SIZE;
}
if features.alpha_to_one != 0 {
bits |= Features::ALPHA_TO_ONE;
}
if features.multi_viewport != 0 {
bits |= Features::MULTI_VIEWPORTS;
}
if features.sampler_anisotropy != 0 {
bits |= Features::SAMPLER_ANISOTROPY;
}
if features.texture_compression_etc2 != 0 {
bits |= Features::FORMAT_ETC2;
}
if features.texture_compression_astc_ldr != 0 {
bits |= Features::FORMAT_ASTC_LDR;
}
if features.texture_compression_bc != 0 {
bits |= Features::FORMAT_BC;
}
if features.occlusion_query_precise != 0 {
bits |= Features::PRECISE_OCCLUSION_QUERY;
}
if features.pipeline_statistics_query != 0 {
bits |= Features::PIPELINE_STATISTICS_QUERY;
}
if features.vertex_pipeline_stores_and_atomics != 0 {
bits |= Features::VERTEX_STORES_AND_ATOMICS;
}
if features.fragment_stores_and_atomics != 0 {
bits |= Features::FRAGMENT_STORES_AND_ATOMICS;
}
if features.shader_tessellation_and_geometry_point_size != 0 {
bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE;
}
if features.shader_image_gather_extended != 0 {
bits |= Features::SHADER_IMAGE_GATHER_EXTENDED;
}
if features.shader_storage_image_extended_formats != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS;
}
if features.shader_storage_image_multisample != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE;
}
if features.shader_storage_image_read_without_format != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT;
}
if features.shader_storage_image_write_without_format != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT;
}
if features.shader_uniform_buffer_array_dynamic_indexing != 0 {
bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_sampled_image_array_dynamic_indexing != 0 {
bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_storage_buffer_array_dynamic_indexing != 0 {
bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_storage_image_array_dynamic_indexing != 0 {
bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING;
}
if features.shader_clip_distance != 0 {
bits |= Features::SHADER_CLIP_DISTANCE;
}
if features.shader_cull_distance != 0 {
bits |= Features::SHADER_CULL_DISTANCE;
}
if features.shader_float64 != 0 {
bits |= Features::SHADER_FLOAT64;
}
if features.shader_int64 != 0 {
bits |= Features::SHADER_INT64;
}
if features.shader_int16 != 0 {
bits |= Features::SHADER_INT16;
}
if features.shader_resource_residency != 0 {
bits |= Features::SHADER_RESOURCE_RESIDENCY;
}
if features.shader_resource_min_lod != 0 {
bits |= Features::SHADER_RESOURCE_MIN_LOD;
}
if features.sparse_binding != 0 {
bits |= Features::SPARSE_BINDING;
}
if features.sparse_residency_buffer != 0 {
bits |= Features::SPARSE_RESIDENCY_BUFFER;
}
if features.sparse_residency_image2_d != 0 {
bits |= Features::SPARSE_RESIDENCY_IMAGE_2D;
}
if features.sparse_residency_image3_d != 0 {
bits |= Features::SPARSE_RESIDENCY_IMAGE_3D;
}
if features.sparse_residency2_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_2_SAMPLES;
}
if features.sparse_residency4_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_4_SAMPLES;
}
if features.sparse_residency8_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_8_SAMPLES;
}
if features.sparse_residency16_samples != 0 {
bits |= Features::SPARSE_RESIDENCY_16_SAMPLES;
}
if features.sparse_residency_aliased != 0 {
bits |= Features::SPARSE_RESIDENCY_ALIASED;
}
if features.variable_multisample_rate != 0 {
bits |= Features::VARIABLE_MULTISAMPLE_RATE;
}
if features.inherited_queries != 0 {
bits |= Features::INHERITED_QUERIES;
}
bits
}
fn hints(&self) -> Hints {
Hints::BASE_VERTEX_INSTANCE_DRAWING
}
fn limits(&self) -> Limits {
let limits = &self.properties.limits;
let max_group_count = limits.max_compute_work_group_count;
let max_group_size = limits.max_compute_work_group_size;
Limits {
max_image_1d_size: limits.max_image_dimension1_d,
max_image_2d_size: limits.max_image_dimension2_d,
max_image_3d_size: limits.max_image_dimension3_d,
max_image_cube_size: limits.max_image_dimension_cube,
max_image_array_layers: limits.max_image_array_layers as _,
max_texel_elements: limits.max_texel_buffer_elements as _,
max_patch_size: limits.max_tessellation_patch_size as PatchSize,
max_viewports: limits.max_viewports as _,
max_viewport_dimensions: limits.max_viewport_dimensions,
max_framebuffer_extent: image::Extent {
width: limits.max_framebuffer_width,
height: limits.max_framebuffer_height,
depth: limits.max_framebuffer_layers,
},
max_compute_work_group_count: [
max_group_count[0] as _,
max_group_count[1] as _,
max_group_count[2] as _,
],
max_compute_work_group_size: [
max_group_size[0] as _,
max_group_size[1] as _,
max_group_size[2] as _,
],
max_vertex_input_attributes: limits.max_vertex_input_attributes as _,
max_vertex_input_bindings: limits.max_vertex_input_bindings as _,
max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _,
max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _,
max_vertex_output_components: limits.max_vertex_output_components as _,
optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _,
optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment
as _,
min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _,
min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _,
min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _,
framebuffer_color_sample_counts: limits.framebuffer_color_sample_counts.as_raw() as _,
framebuffer_depth_sample_counts: limits.framebuffer_depth_sample_counts.as_raw() as _,
framebuffer_stencil_sample_counts: limits.framebuffer_stencil_sample_counts.as_raw()
as _,
max_color_attachments: limits.max_color_attachments as _,
buffer_image_granularity: limits.buffer_image_granularity,
non_coherent_atom_size: limits.non_coherent_atom_size as _,
max_sampler_anisotropy: limits.max_sampler_anisotropy,
min_vertex_input_binding_stride_alignment: 1,
max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _,
max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _,
max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _,
max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _,
max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _,
max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _,
max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _,
max_descriptor_set_storage_buffers_dynamic: limits
.max_descriptor_set_storage_buffers_dynamic
as _,
max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _,
max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _,
max_descriptor_set_uniform_buffers_dynamic: limits
.max_descriptor_set_uniform_buffers_dynamic
as _,
max_draw_indexed_index_value: limits.max_draw_indexed_index_value,
max_draw_indirect_count: limits.max_draw_indirect_count,
max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources
as _,
max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _,
max_fragment_input_components: limits.max_fragment_input_components as _,
max_fragment_output_attachments: limits.max_fragment_output_attachments as _,
max_framebuffer_layers: limits.max_framebuffer_layers as _,
max_geometry_input_components: limits.max_geometry_input_components as _,
max_geometry_output_components: limits.max_geometry_output_components as _,
max_geometry_output_vertices: limits.max_geometry_output_vertices as _,
max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _,
max_geometry_total_output_components: limits.max_geometry_total_output_components as _,
max_memory_allocation_count: limits.max_memory_allocation_count as _,
max_per_stage_descriptor_input_attachments: limits
.max_per_stage_descriptor_input_attachments
as _,
max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images
as _,
max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _,
max_per_stage_descriptor_storage_buffers: limits
.max_per_stage_descriptor_storage_buffers
as _,
max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images
as _,
max_per_stage_descriptor_uniform_buffers: limits
.max_per_stage_descriptor_uniform_buffers
as _,
max_per_stage_resources: limits.max_per_stage_resources as _,
max_push_constants_size: limits.max_push_constants_size as _,
max_sampler_allocation_count: limits.max_sampler_allocation_count as _,
max_sampler_lod_bias: limits.max_sampler_lod_bias as _,
max_storage_buffer_range: limits.max_storage_buffer_range as _,
max_uniform_buffer_range: limits.max_uniform_buffer_range as _,
min_memory_map_alignment: limits.min_memory_map_alignment,
standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE,
}
}
fn is_valid_cache(&self, cache: &[u8]) -> bool {
const HEADER_SIZE: usize = 16 + vk::UUID_SIZE;
if cache.len() < HEADER_SIZE {
warn!("Bad cache data length {:?}", cache.len());
return false;
}
let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]);
let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]);
let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]);
let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]);
// header length
if (header_len as usize) < HEADER_SIZE {
warn!("Bad header length {:?}", header_len);
return false;
}
// cache header version
if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 {
warn!("Unsupported cache header version: {:?}", header_version);
return false;
}
// vendor id
if vendor_id != self.properties.vendor_id {
warn!(
"Vendor ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.vendor_id, vendor_id,
);
return false;
}
// device id
if device_id != self.properties.device_id {
warn!(
"Device ID mismatch. Device: {:?}, cache: {:?}.",
self.properties.device_id, device_id,
);
return false;
}
if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] {
warn!(
"Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.",
self.properties.pipeline_cache_uuid,
&cache[16 .. 16 + vk::UUID_SIZE],
);
return false;
}
true
}
}
#[doc(hidden)]
pub struct RawDevice {
raw: ash::Device,
features: Features,
instance: Arc<RawInstance>,
maintenance_level: u8,
}
impl fmt::Debug for RawDevice {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RawDevice") // TODO: Real Debug impl
}
}
impl Drop for RawDevice {
fn drop(&mut self) {
unsafe {
self.raw.destroy_device(None);
}
}
}
impl RawDevice {
fn debug_messenger(&self) -> Option<&DebugMessenger> {
self.instance.1.as_ref()
}
fn map_viewport(&self, rect: &hal::pso::Viewport) -> vk::Viewport {
let flip_y = self.features.contains(hal::Features::NDC_Y_UP);
let shift_y = flip_y && self.maintenance_level != 0;
conv::map_viewport(rect, flip_y, shift_y)
}
}
// Need to explicitly synchronize on submission and present.
pub type RawCommandQueue = Arc<vk::Queue>;
pub struct CommandQueue {
raw: RawCommandQueue,
device: Arc<RawDevice>,
swapchain_fn: vk::KhrSwapchainFn,
}
impl fmt::Debug for CommandQueue {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("CommandQueue")
}
}
impl queue::CommandQueue<Backend> for CommandQueue {
unsafe fn submit<'a, T, Ic, S, Iw, Is>(
&mut self,
submission: queue::Submission<Ic, Iw, Is>,
fence: Option<&native::Fence>,
) where
T: 'a + Borrow<command::CommandBuffer>,
Ic: IntoIterator<Item = &'a T>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = (&'a S, PipelineStage)>,
Is: IntoIterator<Item = &'a S>,
{
//TODO: avoid heap allocations
let mut waits = Vec::new();
let mut stages = Vec::new();
let buffers = submission
.command_buffers
.into_iter()
.map(|cmd| cmd.borrow().raw)
.collect::<Vec<_>>();
for (semaphore, stage) in submission.wait_semaphores {
waits.push(semaphore.borrow().0);
stages.push(conv::map_pipeline_stage(stage));
}
let signals = submission
.signal_semaphores
.into_iter()
.map(|semaphore| semaphore.borrow().0)
.collect::<Vec<_>>();
let info = vk::SubmitInfo {
s_type: vk::StructureType::SUBMIT_INFO,
p_next: ptr::null(),
wait_semaphore_count: waits.len() as u32,
p_wait_semaphores: waits.as_ptr(),
// If count is zero, AMD driver crashes if nullptr is not set for stage masks
p_wait_dst_stage_mask: if stages.is_empty() {
ptr::null()
} else {
stages.as_ptr()
},
command_buffer_count: buffers.len() as u32,
p_command_buffers: buffers.as_ptr(),
signal_semaphore_count: signals.len() as u32,
p_signal_semaphores: signals.as_ptr(),
};
let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null());
let result = self.device.raw.queue_submit(*self.raw, &[info], fence_raw);
assert_eq!(Ok(()), result);
}
unsafe fn present<'a, W, Is, S, Iw>(
&mut self,
swapchains: Is,
wait_semaphores: Iw,
) -> Result<Option<Suboptimal>, PresentError>
where
W: 'a + Borrow<window::Swapchain>,
Is: IntoIterator<Item = (&'a W, SwapImageIndex)>,
S: 'a + Borrow<native::Semaphore>,
Iw: IntoIterator<Item = &'a S>,
{
let semaphores = wait_semaphores
.into_iter()
.map(|sem| sem.borrow().0)
.collect::<Vec<_>>();
let mut frames = Vec::new();
let mut vk_swapchains = Vec::new();
for (swapchain, index) in swapchains {
vk_swapchains.push(swapchain.borrow().raw);
frames.push(index);
}
let info = vk::PresentInfoKHR {
s_type: vk::StructureType::PRESENT_INFO_KHR,
p_next: ptr::null(),
wait_semaphore_count: semaphores.len() as _,
p_wait_semaphores: semaphores.as_ptr(),
swapchain_count: vk_swapchains.len() as _,
p_swapchains: vk_swapchains.as_ptr(),
p_image_indices: frames.as_ptr(),
p_results: ptr::null_mut(),
};
match self.swapchain_fn.queue_present_khr(*self.raw, &info) {
vk::Result::SUCCESS => Ok(None),
vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)),
vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::Host))
}
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::Device))
}
vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)),
vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate),
vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)),
_ => panic!("Failed to present frame"),
}
}
unsafe fn present_surface(
&mut self,
surface: &mut window::Surface,
image: window::SurfaceImage,
wait_semaphore: Option<&native::Semaphore>,
) -> Result<Option<Suboptimal>, PresentError> {
let ssc = surface.swapchain.as_ref().unwrap();
let p_wait_semaphores = if let Some(wait_semaphore) = wait_semaphore {
&wait_semaphore.0
} else {
let submit_info = vk::SubmitInfo {
s_type: vk::StructureType::SUBMIT_INFO,
p_next: ptr::null(),
wait_semaphore_count: 0,
p_wait_semaphores: ptr::null(),
p_wait_dst_stage_mask: &vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT,
command_buffer_count: 0,
p_command_buffers: ptr::null(),
signal_semaphore_count: 1,
p_signal_semaphores: &ssc.semaphore.0,
};
self.device
.raw
.queue_submit(*self.raw, &[submit_info], vk::Fence::null())
.unwrap();
&ssc.semaphore.0
};
let present_info = vk::PresentInfoKHR {
s_type: vk::StructureType::PRESENT_INFO_KHR,
p_next: ptr::null(),
wait_semaphore_count: 1,
p_wait_semaphores,
swapchain_count: 1,
p_swapchains: &ssc.swapchain.raw,
p_image_indices: &image.index,
p_results: ptr::null_mut(),
};
match self
.swapchain_fn
.queue_present_khr(*self.raw, &present_info)
{
vk::Result::SUCCESS => Ok(None),
vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)),
vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::Host))
}
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
Err(PresentError::OutOfMemory(OutOfMemory::Device))
}
vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)),
vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate),
vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)),
_ => panic!("Failed to present frame"),
}
}
fn wait_idle(&self) -> Result<(), OutOfMemory> {
match unsafe { self.device.raw.queue_wait_idle(*self.raw) } {
Ok(()) => Ok(()),
Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(OutOfMemory::Host),
Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(OutOfMemory::Device),
Err(_) => unreachable!(),
}
}
}
#[derive(Debug)]
pub struct Device {
shared: Arc<RawDevice>,
vendor_id: u32,
}
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Backend {}
impl hal::Backend for Backend {
type Instance = Instance;
type PhysicalDevice = PhysicalDevice;
type Device = Device;
type Surface = window::Surface;
type Swapchain = window::Swapchain;
type QueueFamily = QueueFamily;
type CommandQueue = CommandQueue;
type CommandBuffer = command::CommandBuffer;
type Memory = native::Memory;
type CommandPool = pool::RawCommandPool;
type ShaderModule = native::ShaderModule;
type RenderPass = native::RenderPass;
type Framebuffer = native::Framebuffer;
type Buffer = native::Buffer;
type BufferView = native::BufferView;
type Image = native::Image;
type ImageView = native::ImageView;
type Sampler = native::Sampler;
type ComputePipeline = native::ComputePipeline;
type GraphicsPipeline = native::GraphicsPipeline;
type PipelineLayout = native::PipelineLayout;
type PipelineCache = native::PipelineCache;
type DescriptorSetLayout = native::DescriptorSetLayout;
type DescriptorPool = native::DescriptorPool;
type DescriptorSet = native::DescriptorSet;
type Fence = native::Fence;
type Semaphore = native::Semaphore;
type Event = native::Event;
type QueryPool = native::QueryPool;
} | .to_str()
.unwrap_or("Unknown") |
expr.rs | //! Type inference for expressions.
use std::iter::{repeat, repeat_with};
use std::{mem, sync::Arc};
use chalk_ir::{cast::Cast, fold::Shift, Mutability, TyVariableKind};
use hir_def::{
expr::{Array, BinaryOp, Expr, ExprId, Literal, Statement, UnaryOp},
path::{GenericArg, GenericArgs},
resolver::resolver_for_expr,
AssocContainerId, FieldId, Lookup,
};
use hir_expand::name::{name, Name};
use stdx::always;
use syntax::ast::RangeOp;
use crate::{
autoderef, consteval,
lower::lower_to_chalk_mutability,
mapping::from_chalk,
method_resolution, op,
primitive::{self, UintTy},
static_lifetime, to_chalk_trait_id,
traits::FnTrait,
utils::{generics, Generics},
AdtId, Binders, CallableDefId, FnPointer, FnSig, FnSubst, InEnvironment, Interner,
ProjectionTyExt, Rawness, Scalar, Substitution, TraitRef, Ty, TyBuilder, TyExt, TyKind,
};
use super::{
find_breakable, BindingMode, BreakableContext, Diverges, Expectation, InferenceContext,
InferenceDiagnostic, TypeMismatch,
};
impl<'a> InferenceContext<'a> {
pub(super) fn infer_expr(&mut self, tgt_expr: ExprId, expected: &Expectation) -> Ty {
let ty = self.infer_expr_inner(tgt_expr, expected);
if self.resolve_ty_shallow(&ty).is_never() {
// Any expression that produces a value of type `!` must have diverged
self.diverges = Diverges::Always;
}
if let Some(expected_ty) = expected.only_has_type(&mut self.table) {
let could_unify = self.unify(&ty, &expected_ty);
if !could_unify {
self.result.type_mismatches.insert(
tgt_expr.into(),
TypeMismatch { expected: expected_ty.clone(), actual: ty.clone() },
);
}
}
ty
}
/// Infer type of expression with possibly implicit coerce to the expected type.
/// Return the type after possible coercion.
pub(super) fn infer_expr_coerce(&mut self, expr: ExprId, expected: &Expectation) -> Ty {
let ty = self.infer_expr_inner(expr, &expected);
let ty = if let Some(target) = expected.only_has_type(&mut self.table) {
if !self.coerce(&ty, &target) {
self.result.type_mismatches.insert(
expr.into(),
TypeMismatch { expected: target.clone(), actual: ty.clone() },
);
// Return actual type when type mismatch.
// This is needed for diagnostic when return type mismatch.
ty
} else {
target.clone()
}
} else {
ty
};
ty
}
fn callable_sig_from_fn_trait(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
let krate = self.resolver.krate()?;
let fn_once_trait = FnTrait::FnOnce.get_id(self.db, krate)?;
let output_assoc_type =
self.db.trait_data(fn_once_trait).associated_type_by_name(&name![Output])?;
let mut arg_tys = vec![];
let arg_ty = TyBuilder::tuple(num_args)
.fill(repeat_with(|| {
let arg = self.table.new_type_var();
arg_tys.push(arg.clone());
arg
}))
.build();
let projection = {
let b = TyBuilder::assoc_type_projection(self.db, output_assoc_type);
if b.remaining() != 2 {
return None;
}
b.push(ty.clone()).push(arg_ty).build()
};
let trait_env = self.trait_env.env.clone();
let obligation = InEnvironment {
goal: projection.trait_ref(self.db).cast(&Interner),
environment: trait_env,
};
let canonical = self.canonicalize(obligation.clone());
if self.db.trait_solve(krate, canonical.value.cast(&Interner)).is_some() {
self.push_obligation(obligation.goal);
let return_ty = self.table.normalize_projection_ty(projection);
Some((arg_tys, return_ty))
} else {
None
}
}
pub(crate) fn callable_sig(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
match ty.callable_sig(self.db) {
Some(sig) => Some((sig.params().to_vec(), sig.ret().clone())),
None => self.callable_sig_from_fn_trait(ty, num_args),
}
}
fn infer_expr_inner(&mut self, tgt_expr: ExprId, expected: &Expectation) -> Ty |
fn infer_block(
&mut self,
statements: &[Statement],
tail: Option<ExprId>,
expected: &Expectation,
) -> Ty {
for stmt in statements {
match stmt {
Statement::Let { pat, type_ref, initializer } => {
let decl_ty =
type_ref.as_ref().map(|tr| self.make_ty(tr)).unwrap_or(self.err_ty());
// Always use the declared type when specified
let mut ty = decl_ty.clone();
if let Some(expr) = initializer {
let actual_ty =
self.infer_expr_coerce(*expr, &Expectation::has_type(decl_ty.clone()));
if decl_ty.is_unknown() {
ty = actual_ty;
}
}
self.infer_pat(*pat, &ty, BindingMode::default());
}
Statement::Expr { expr, .. } => {
self.infer_expr(*expr, &Expectation::none());
}
}
}
let ty = if let Some(expr) = tail {
self.infer_expr_coerce(expr, expected)
} else {
// Citing rustc: if there is no explicit tail expression,
// that is typically equivalent to a tail expression
// of `()` -- except if the block diverges. In that
// case, there is no value supplied from the tail
// expression (assuming there are no other breaks,
// this implies that the type of the block will be
// `!`).
if self.diverges.is_always() {
// we don't even make an attempt at coercion
self.table.new_maybe_never_var()
} else {
if let Some(t) = expected.only_has_type(&mut self.table) {
self.coerce(&TyBuilder::unit(), &t);
}
TyBuilder::unit()
}
};
ty
}
fn infer_method_call(
&mut self,
tgt_expr: ExprId,
receiver: ExprId,
args: &[ExprId],
method_name: &Name,
generic_args: Option<&GenericArgs>,
) -> Ty {
let receiver_ty = self.infer_expr(receiver, &Expectation::none());
let canonicalized_receiver = self.canonicalize(receiver_ty.clone());
let traits_in_scope = self.resolver.traits_in_scope(self.db.upcast());
let resolved = self.resolver.krate().and_then(|krate| {
method_resolution::lookup_method(
&canonicalized_receiver.value,
self.db,
self.trait_env.clone(),
krate,
&traits_in_scope,
self.resolver.module(),
method_name,
)
});
let (derefed_receiver_ty, method_ty, substs) = match resolved {
Some((ty, func)) => {
let ty = canonicalized_receiver.decanonicalize_ty(ty);
let generics = generics(self.db.upcast(), func.into());
let substs = self.substs_for_method_call(generics, generic_args, &ty);
self.write_method_resolution(tgt_expr, func, substs.clone());
(ty, self.db.value_ty(func.into()), substs)
}
None => (
receiver_ty,
Binders::empty(&Interner, self.err_ty()),
Substitution::empty(&Interner),
),
};
let method_ty = method_ty.substitute(&Interner, &substs);
self.register_obligations_for_call(&method_ty);
let (expected_receiver_ty, param_tys, ret_ty) = match method_ty.callable_sig(self.db) {
Some(sig) => {
if !sig.params().is_empty() {
(sig.params()[0].clone(), sig.params()[1..].to_vec(), sig.ret().clone())
} else {
(self.err_ty(), Vec::new(), sig.ret().clone())
}
}
None => (self.err_ty(), Vec::new(), self.err_ty()),
};
// Apply autoref so the below unification works correctly
// FIXME: return correct autorefs from lookup_method
let actual_receiver_ty = match self.resolve_ty_shallow(&expected_receiver_ty).as_reference()
{
Some((_, lifetime, mutability)) => {
TyKind::Ref(mutability, lifetime, derefed_receiver_ty).intern(&Interner)
}
_ => derefed_receiver_ty,
};
self.unify(&expected_receiver_ty, &actual_receiver_ty);
self.check_call_arguments(args, ¶m_tys);
self.normalize_associated_types_in(ret_ty)
}
fn check_call_arguments(&mut self, args: &[ExprId], param_tys: &[Ty]) {
// Quoting https://github.com/rust-lang/rust/blob/6ef275e6c3cb1384ec78128eceeb4963ff788dca/src/librustc_typeck/check/mod.rs#L3325 --
// We do this in a pretty awful way: first we type-check any arguments
// that are not closures, then we type-check the closures. This is so
// that we have more information about the types of arguments when we
// type-check the functions. This isn't really the right way to do this.
for &check_closures in &[false, true] {
let param_iter = param_tys.iter().cloned().chain(repeat(self.err_ty()));
for (&arg, param_ty) in args.iter().zip(param_iter) {
let is_closure = matches!(&self.body[arg], Expr::Lambda { .. });
if is_closure != check_closures {
continue;
}
let param_ty = self.normalize_associated_types_in(param_ty);
self.infer_expr_coerce(arg, &Expectation::has_type(param_ty.clone()));
}
}
}
fn substs_for_method_call(
&mut self,
def_generics: Generics,
generic_args: Option<&GenericArgs>,
receiver_ty: &Ty,
) -> Substitution {
let (parent_params, self_params, type_params, impl_trait_params) =
def_generics.provenance_split();
assert_eq!(self_params, 0); // method shouldn't have another Self param
let total_len = parent_params + type_params + impl_trait_params;
let mut substs = Vec::with_capacity(total_len);
// Parent arguments are unknown, except for the receiver type
for (_id, param) in def_generics.iter_parent() {
if param.provenance == hir_def::generics::TypeParamProvenance::TraitSelf {
substs.push(receiver_ty.clone());
} else {
substs.push(self.table.new_type_var());
}
}
// handle provided type arguments
if let Some(generic_args) = generic_args {
// if args are provided, it should be all of them, but we can't rely on that
for arg in generic_args
.args
.iter()
.filter(|arg| matches!(arg, GenericArg::Type(_)))
.take(type_params)
{
match arg {
GenericArg::Type(type_ref) => {
let ty = self.make_ty(type_ref);
substs.push(ty);
}
GenericArg::Lifetime(_) => {}
}
}
};
let supplied_params = substs.len();
for _ in supplied_params..total_len {
substs.push(self.table.new_type_var());
}
assert_eq!(substs.len(), total_len);
Substitution::from_iter(&Interner, substs)
}
fn register_obligations_for_call(&mut self, callable_ty: &Ty) {
let callable_ty = self.resolve_ty_shallow(&callable_ty);
if let TyKind::FnDef(fn_def, parameters) = callable_ty.kind(&Interner) {
let def: CallableDefId = from_chalk(self.db, *fn_def);
let generic_predicates = self.db.generic_predicates(def.into());
for predicate in generic_predicates.iter() {
let (predicate, binders) = predicate
.clone()
.substitute(&Interner, parameters)
.into_value_and_skipped_binders();
always!(binders.len(&Interner) == 0); // quantified where clauses not yet handled
self.push_obligation(predicate.cast(&Interner));
}
// add obligation for trait implementation, if this is a trait method
match def {
CallableDefId::FunctionId(f) => {
if let AssocContainerId::TraitId(trait_) = f.lookup(self.db.upcast()).container
{
// construct a TraitRef
let substs = crate::subst_prefix(
&*parameters,
generics(self.db.upcast(), trait_.into()).len(),
);
self.push_obligation(
TraitRef { trait_id: to_chalk_trait_id(trait_), substitution: substs }
.cast(&Interner),
);
}
}
CallableDefId::StructId(_) | CallableDefId::EnumVariantId(_) => {}
}
}
}
}
| {
self.db.check_canceled();
let body = Arc::clone(&self.body); // avoid borrow checker problem
let ty = match &body[tgt_expr] {
Expr::Missing => self.err_ty(),
Expr::If { condition, then_branch, else_branch } => {
// if let is desugared to match, so this is always simple if
self.infer_expr(
*condition,
&Expectation::has_type(TyKind::Scalar(Scalar::Bool).intern(&Interner)),
);
let condition_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
let mut both_arms_diverge = Diverges::Always;
let mut result_ty = self.table.new_type_var();
let then_ty = self.infer_expr_inner(*then_branch, &expected);
both_arms_diverge &= mem::replace(&mut self.diverges, Diverges::Maybe);
result_ty = self.coerce_merge_branch(Some(*then_branch), &result_ty, &then_ty);
let else_ty = match else_branch {
Some(else_branch) => self.infer_expr_inner(*else_branch, &expected),
None => TyBuilder::unit(),
};
both_arms_diverge &= self.diverges;
// FIXME: create a synthetic `else {}` so we have something to refer to here instead of None?
result_ty = self.coerce_merge_branch(*else_branch, &result_ty, &else_ty);
self.diverges = condition_diverges | both_arms_diverge;
result_ty
}
Expr::Block { statements, tail, label, id: _ } => {
let old_resolver = mem::replace(
&mut self.resolver,
resolver_for_expr(self.db.upcast(), self.owner, tgt_expr),
);
let ty = match label {
Some(_) => {
let break_ty = self.table.new_type_var();
self.breakables.push(BreakableContext {
may_break: false,
break_ty: break_ty.clone(),
label: label.map(|label| self.body[label].name.clone()),
});
let ty =
self.infer_block(statements, *tail, &Expectation::has_type(break_ty));
let ctxt = self.breakables.pop().expect("breakable stack broken");
if ctxt.may_break {
ctxt.break_ty
} else {
ty
}
}
None => self.infer_block(statements, *tail, expected),
};
self.resolver = old_resolver;
ty
}
Expr::Unsafe { body } | Expr::Const { body } => self.infer_expr(*body, expected),
Expr::TryBlock { body } => {
let _inner = self.infer_expr(*body, expected);
// FIXME should be std::result::Result<{inner}, _>
self.err_ty()
}
Expr::Async { body } => {
// Use the first type parameter as the output type of future.
// existenail type AsyncBlockImplTrait<InnerType>: Future<Output = InnerType>
let inner_ty = self.infer_expr(*body, &Expectation::none());
let impl_trait_id = crate::ImplTraitId::AsyncBlockTypeImplTrait(self.owner, *body);
let opaque_ty_id = self.db.intern_impl_trait_id(impl_trait_id).into();
TyKind::OpaqueType(opaque_ty_id, Substitution::from1(&Interner, inner_ty))
.intern(&Interner)
}
Expr::Loop { body, label } => {
self.breakables.push(BreakableContext {
may_break: false,
break_ty: self.table.new_type_var(),
label: label.map(|label| self.body[label].name.clone()),
});
self.infer_expr(*body, &Expectation::has_type(TyBuilder::unit()));
let ctxt = self.breakables.pop().expect("breakable stack broken");
if ctxt.may_break {
self.diverges = Diverges::Maybe;
}
if ctxt.may_break {
ctxt.break_ty
} else {
TyKind::Never.intern(&Interner)
}
}
Expr::While { condition, body, label } => {
self.breakables.push(BreakableContext {
may_break: false,
break_ty: self.err_ty(),
label: label.map(|label| self.body[label].name.clone()),
});
// while let is desugared to a match loop, so this is always simple while
self.infer_expr(
*condition,
&Expectation::has_type(TyKind::Scalar(Scalar::Bool).intern(&Interner)),
);
self.infer_expr(*body, &Expectation::has_type(TyBuilder::unit()));
let _ctxt = self.breakables.pop().expect("breakable stack broken");
// the body may not run, so it diverging doesn't mean we diverge
self.diverges = Diverges::Maybe;
TyBuilder::unit()
}
Expr::For { iterable, body, pat, label } => {
let iterable_ty = self.infer_expr(*iterable, &Expectation::none());
self.breakables.push(BreakableContext {
may_break: false,
break_ty: self.err_ty(),
label: label.map(|label| self.body[label].name.clone()),
});
let pat_ty =
self.resolve_associated_type(iterable_ty, self.resolve_into_iter_item());
self.infer_pat(*pat, &pat_ty, BindingMode::default());
self.infer_expr(*body, &Expectation::has_type(TyBuilder::unit()));
let _ctxt = self.breakables.pop().expect("breakable stack broken");
// the body may not run, so it diverging doesn't mean we diverge
self.diverges = Diverges::Maybe;
TyBuilder::unit()
}
Expr::Lambda { body, args, ret_type, arg_types } => {
assert_eq!(args.len(), arg_types.len());
let mut sig_tys = Vec::new();
// collect explicitly written argument types
for arg_type in arg_types.iter() {
let arg_ty = if let Some(type_ref) = arg_type {
self.make_ty(type_ref)
} else {
self.table.new_type_var()
};
sig_tys.push(arg_ty);
}
// add return type
let ret_ty = match ret_type {
Some(type_ref) => self.make_ty(type_ref),
None => self.table.new_type_var(),
};
sig_tys.push(ret_ty.clone());
let sig_ty = TyKind::Function(FnPointer {
num_binders: 0,
sig: FnSig { abi: (), safety: chalk_ir::Safety::Safe, variadic: false },
substitution: FnSubst(
Substitution::from_iter(&Interner, sig_tys.clone()).shifted_in(&Interner),
),
})
.intern(&Interner);
let closure_id = self.db.intern_closure((self.owner, tgt_expr)).into();
let closure_ty =
TyKind::Closure(closure_id, Substitution::from1(&Interner, sig_ty))
.intern(&Interner);
// Eagerly try to relate the closure type with the expected
// type, otherwise we often won't have enough information to
// infer the body.
if let Some(t) = expected.only_has_type(&mut self.table) {
self.coerce(&closure_ty, &t);
}
// Now go through the argument patterns
for (arg_pat, arg_ty) in args.iter().zip(sig_tys) {
self.infer_pat(*arg_pat, &arg_ty, BindingMode::default());
}
let prev_diverges = mem::replace(&mut self.diverges, Diverges::Maybe);
let prev_ret_ty = mem::replace(&mut self.return_ty, ret_ty.clone());
self.infer_expr_coerce(*body, &Expectation::has_type(ret_ty));
self.diverges = prev_diverges;
self.return_ty = prev_ret_ty;
closure_ty
}
Expr::Call { callee, args } => {
let callee_ty = self.infer_expr(*callee, &Expectation::none());
let canonicalized = self.canonicalize(callee_ty.clone());
let mut derefs = autoderef(
self.db,
self.resolver.krate(),
InEnvironment {
goal: canonicalized.value.clone(),
environment: self.table.trait_env.env.clone(),
},
);
let (param_tys, ret_ty): (Vec<Ty>, Ty) = derefs
.find_map(|callee_deref_ty| {
self.callable_sig(
&canonicalized.decanonicalize_ty(callee_deref_ty.value),
args.len(),
)
})
.unwrap_or((Vec::new(), self.err_ty()));
self.register_obligations_for_call(&callee_ty);
self.check_call_arguments(args, ¶m_tys);
self.normalize_associated_types_in(ret_ty)
}
Expr::MethodCall { receiver, args, method_name, generic_args } => self
.infer_method_call(
tgt_expr,
*receiver,
&args,
&method_name,
generic_args.as_deref(),
),
Expr::Match { expr, arms } => {
let input_ty = self.infer_expr(*expr, &Expectation::none());
let mut result_ty = if arms.is_empty() {
TyKind::Never.intern(&Interner)
} else {
self.table.new_type_var()
};
let matchee_diverges = self.diverges;
let mut all_arms_diverge = Diverges::Always;
for arm in arms {
self.diverges = Diverges::Maybe;
let _pat_ty = self.infer_pat(arm.pat, &input_ty, BindingMode::default());
if let Some(guard_expr) = arm.guard {
self.infer_expr(
guard_expr,
&Expectation::has_type(TyKind::Scalar(Scalar::Bool).intern(&Interner)),
);
}
let arm_ty = self.infer_expr_inner(arm.expr, &expected);
all_arms_diverge &= self.diverges;
result_ty = self.coerce_merge_branch(Some(arm.expr), &result_ty, &arm_ty);
}
self.diverges = matchee_diverges | all_arms_diverge;
result_ty
}
Expr::Path(p) => {
// FIXME this could be more efficient...
let resolver = resolver_for_expr(self.db.upcast(), self.owner, tgt_expr);
self.infer_path(&resolver, p, tgt_expr.into()).unwrap_or(self.err_ty())
}
Expr::Continue { .. } => TyKind::Never.intern(&Interner),
Expr::Break { expr, label } => {
let last_ty =
if let Some(ctxt) = find_breakable(&mut self.breakables, label.as_ref()) {
ctxt.break_ty.clone()
} else {
self.err_ty()
};
let val_ty = if let Some(expr) = expr {
self.infer_expr(*expr, &Expectation::none())
} else {
TyBuilder::unit()
};
// FIXME: create a synthetic `()` during lowering so we have something to refer to here?
let merged_type = self.coerce_merge_branch(*expr, &last_ty, &val_ty);
if let Some(ctxt) = find_breakable(&mut self.breakables, label.as_ref()) {
ctxt.break_ty = merged_type;
ctxt.may_break = true;
} else {
self.push_diagnostic(InferenceDiagnostic::BreakOutsideOfLoop {
expr: tgt_expr,
});
}
TyKind::Never.intern(&Interner)
}
Expr::Return { expr } => {
if let Some(expr) = expr {
self.infer_expr_coerce(*expr, &Expectation::has_type(self.return_ty.clone()));
} else {
let unit = TyBuilder::unit();
self.coerce(&unit, &self.return_ty.clone());
}
TyKind::Never.intern(&Interner)
}
Expr::Yield { expr } => {
// FIXME: track yield type for coercion
if let Some(expr) = expr {
self.infer_expr(*expr, &Expectation::none());
}
TyKind::Never.intern(&Interner)
}
Expr::RecordLit { path, fields, spread } => {
let (ty, def_id) = self.resolve_variant(path.as_deref());
if let Some(variant) = def_id {
self.write_variant_resolution(tgt_expr.into(), variant);
}
if let Some(t) = expected.only_has_type(&mut self.table) {
self.unify(&ty, &t);
}
let substs = ty
.as_adt()
.map(|(_, s)| s.clone())
.unwrap_or_else(|| Substitution::empty(&Interner));
let field_types = def_id.map(|it| self.db.field_types(it)).unwrap_or_default();
let variant_data = def_id.map(|it| it.variant_data(self.db.upcast()));
for field in fields.iter() {
let field_def =
variant_data.as_ref().and_then(|it| match it.field(&field.name) {
Some(local_id) => Some(FieldId { parent: def_id.unwrap(), local_id }),
None => {
self.push_diagnostic(InferenceDiagnostic::NoSuchField {
expr: field.expr,
});
None
}
});
let field_ty = field_def.map_or(self.err_ty(), |it| {
field_types[it.local_id].clone().substitute(&Interner, &substs)
});
self.infer_expr_coerce(field.expr, &Expectation::has_type(field_ty));
}
if let Some(expr) = spread {
self.infer_expr(*expr, &Expectation::has_type(ty.clone()));
}
ty
}
Expr::Field { expr, name } => {
let receiver_ty = self.infer_expr_inner(*expr, &Expectation::none());
let canonicalized = self.canonicalize(receiver_ty);
let ty = autoderef::autoderef(
self.db,
self.resolver.krate(),
InEnvironment {
goal: canonicalized.value.clone(),
environment: self.trait_env.env.clone(),
},
)
.find_map(|derefed_ty| {
let def_db = self.db.upcast();
let module = self.resolver.module();
let is_visible = |field_id: &FieldId| {
module
.map(|mod_id| {
self.db.field_visibilities(field_id.parent)[field_id.local_id]
.is_visible_from(def_db, mod_id)
})
.unwrap_or(true)
};
match canonicalized.decanonicalize_ty(derefed_ty.value).kind(&Interner) {
TyKind::Tuple(_, substs) => name.as_tuple_index().and_then(|idx| {
substs
.as_slice(&Interner)
.get(idx)
.map(|a| a.assert_ty_ref(&Interner))
.cloned()
}),
TyKind::Adt(AdtId(hir_def::AdtId::StructId(s)), parameters) => {
let local_id = self.db.struct_data(*s).variant_data.field(name)?;
let field = FieldId { parent: (*s).into(), local_id };
if is_visible(&field) {
self.write_field_resolution(tgt_expr, field);
Some(
self.db.field_types((*s).into())[field.local_id]
.clone()
.substitute(&Interner, ¶meters),
)
} else {
None
}
}
TyKind::Adt(AdtId(hir_def::AdtId::UnionId(u)), parameters) => {
let local_id = self.db.union_data(*u).variant_data.field(name)?;
let field = FieldId { parent: (*u).into(), local_id };
if is_visible(&field) {
self.write_field_resolution(tgt_expr, field);
Some(
self.db.field_types((*u).into())[field.local_id]
.clone()
.substitute(&Interner, ¶meters),
)
} else {
None
}
}
_ => None,
}
})
.unwrap_or(self.err_ty());
let ty = self.insert_type_vars(ty);
self.normalize_associated_types_in(ty)
}
Expr::Await { expr } => {
let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
self.resolve_associated_type(inner_ty, self.resolve_future_future_output())
}
Expr::Try { expr } => {
let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
self.resolve_associated_type(inner_ty, self.resolve_ops_try_ok())
}
Expr::Cast { expr, type_ref } => {
// FIXME: propagate the "castable to" expectation (and find a test case that shows this is necessary)
let _inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
let cast_ty = self.make_ty(type_ref);
// FIXME check the cast...
cast_ty
}
Expr::Ref { expr, rawness, mutability } => {
let mutability = lower_to_chalk_mutability(*mutability);
let expectation = if let Some((exp_inner, exp_rawness, exp_mutability)) = expected
.only_has_type(&mut self.table)
.as_ref()
.and_then(|t| t.as_reference_or_ptr())
{
if exp_mutability == Mutability::Mut && mutability == Mutability::Not {
// FIXME: record type error - expected mut reference but found shared ref,
// which cannot be coerced
}
if exp_rawness == Rawness::Ref && *rawness == Rawness::RawPtr {
// FIXME: record type error - expected reference but found ptr,
// which cannot be coerced
}
Expectation::rvalue_hint(Ty::clone(exp_inner))
} else {
Expectation::none()
};
let inner_ty = self.infer_expr_inner(*expr, &expectation);
match rawness {
Rawness::RawPtr => TyKind::Raw(mutability, inner_ty),
Rawness::Ref => TyKind::Ref(mutability, static_lifetime(), inner_ty),
}
.intern(&Interner)
}
Expr::Box { expr } => {
let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
if let Some(box_) = self.resolve_boxed_box() {
TyBuilder::adt(self.db, box_)
.push(inner_ty)
.fill_with_defaults(self.db, || self.table.new_type_var())
.build()
} else {
self.err_ty()
}
}
Expr::UnaryOp { expr, op } => {
let inner_ty = self.infer_expr_inner(*expr, &Expectation::none());
let inner_ty = self.resolve_ty_shallow(&inner_ty);
match op {
UnaryOp::Deref => match self.resolver.krate() {
Some(krate) => {
let canonicalized = self.canonicalize(inner_ty);
match autoderef::deref(
self.db,
krate,
InEnvironment {
goal: &canonicalized.value,
environment: self.trait_env.env.clone(),
},
) {
Some(derefed_ty) => {
canonicalized.decanonicalize_ty(derefed_ty.value)
}
None => self.err_ty(),
}
}
None => self.err_ty(),
},
UnaryOp::Neg => {
match inner_ty.kind(&Interner) {
// Fast path for builtins
TyKind::Scalar(Scalar::Int(_))
| TyKind::Scalar(Scalar::Uint(_))
| TyKind::Scalar(Scalar::Float(_))
| TyKind::InferenceVar(_, TyVariableKind::Integer)
| TyKind::InferenceVar(_, TyVariableKind::Float) => inner_ty,
// Otherwise we resolve via the std::ops::Neg trait
_ => self
.resolve_associated_type(inner_ty, self.resolve_ops_neg_output()),
}
}
UnaryOp::Not => {
match inner_ty.kind(&Interner) {
// Fast path for builtins
TyKind::Scalar(Scalar::Bool)
| TyKind::Scalar(Scalar::Int(_))
| TyKind::Scalar(Scalar::Uint(_))
| TyKind::InferenceVar(_, TyVariableKind::Integer) => inner_ty,
// Otherwise we resolve via the std::ops::Not trait
_ => self
.resolve_associated_type(inner_ty, self.resolve_ops_not_output()),
}
}
}
}
Expr::BinaryOp { lhs, rhs, op } => match op {
Some(op) => {
let lhs_expectation = match op {
BinaryOp::LogicOp(..) => {
Expectation::has_type(TyKind::Scalar(Scalar::Bool).intern(&Interner))
}
_ => Expectation::none(),
};
let lhs_ty = self.infer_expr(*lhs, &lhs_expectation);
let lhs_ty = self.resolve_ty_shallow(&lhs_ty);
let rhs_expectation = op::binary_op_rhs_expectation(*op, lhs_ty.clone());
let rhs_ty = self.infer_expr(*rhs, &Expectation::has_type(rhs_expectation));
let rhs_ty = self.resolve_ty_shallow(&rhs_ty);
let ret = op::binary_op_return_ty(*op, lhs_ty.clone(), rhs_ty.clone());
if ret.is_unknown() {
cov_mark::hit!(infer_expr_inner_binary_operator_overload);
self.resolve_associated_type_with_params(
lhs_ty,
self.resolve_binary_op_output(op),
&[rhs_ty],
)
} else {
ret
}
}
_ => self.err_ty(),
},
Expr::Range { lhs, rhs, range_type } => {
let lhs_ty = lhs.map(|e| self.infer_expr_inner(e, &Expectation::none()));
let rhs_expect = lhs_ty
.as_ref()
.map_or_else(Expectation::none, |ty| Expectation::has_type(ty.clone()));
let rhs_ty = rhs.map(|e| self.infer_expr(e, &rhs_expect));
match (range_type, lhs_ty, rhs_ty) {
(RangeOp::Exclusive, None, None) => match self.resolve_range_full() {
Some(adt) => TyBuilder::adt(self.db, adt).build(),
None => self.err_ty(),
},
(RangeOp::Exclusive, None, Some(ty)) => match self.resolve_range_to() {
Some(adt) => TyBuilder::adt(self.db, adt).push(ty).build(),
None => self.err_ty(),
},
(RangeOp::Inclusive, None, Some(ty)) => {
match self.resolve_range_to_inclusive() {
Some(adt) => TyBuilder::adt(self.db, adt).push(ty).build(),
None => self.err_ty(),
}
}
(RangeOp::Exclusive, Some(_), Some(ty)) => match self.resolve_range() {
Some(adt) => TyBuilder::adt(self.db, adt).push(ty).build(),
None => self.err_ty(),
},
(RangeOp::Inclusive, Some(_), Some(ty)) => {
match self.resolve_range_inclusive() {
Some(adt) => TyBuilder::adt(self.db, adt).push(ty).build(),
None => self.err_ty(),
}
}
(RangeOp::Exclusive, Some(ty), None) => match self.resolve_range_from() {
Some(adt) => TyBuilder::adt(self.db, adt).push(ty).build(),
None => self.err_ty(),
},
(RangeOp::Inclusive, _, None) => self.err_ty(),
}
}
Expr::Index { base, index } => {
let base_ty = self.infer_expr_inner(*base, &Expectation::none());
let index_ty = self.infer_expr(*index, &Expectation::none());
if let (Some(index_trait), Some(krate)) =
(self.resolve_ops_index(), self.resolver.krate())
{
let canonicalized = self.canonicalize(base_ty);
let self_ty = method_resolution::resolve_indexing_op(
self.db,
&canonicalized.value,
self.trait_env.clone(),
krate,
index_trait,
);
let self_ty =
self_ty.map_or(self.err_ty(), |t| canonicalized.decanonicalize_ty(t.value));
self.resolve_associated_type_with_params(
self_ty,
self.resolve_ops_index_output(),
&[index_ty],
)
} else {
self.err_ty()
}
}
Expr::Tuple { exprs } => {
let mut tys = match expected
.only_has_type(&mut self.table)
.as_ref()
.map(|t| t.kind(&Interner))
{
Some(TyKind::Tuple(_, substs)) => substs
.iter(&Interner)
.map(|a| a.assert_ty_ref(&Interner).clone())
.chain(repeat_with(|| self.table.new_type_var()))
.take(exprs.len())
.collect::<Vec<_>>(),
_ => (0..exprs.len()).map(|_| self.table.new_type_var()).collect(),
};
for (expr, ty) in exprs.iter().zip(tys.iter_mut()) {
self.infer_expr_coerce(*expr, &Expectation::has_type(ty.clone()));
}
TyKind::Tuple(tys.len(), Substitution::from_iter(&Interner, tys)).intern(&Interner)
}
Expr::Array(array) => {
let elem_ty =
match expected.to_option(&mut self.table).as_ref().map(|t| t.kind(&Interner)) {
Some(TyKind::Array(st, _)) | Some(TyKind::Slice(st)) => st.clone(),
_ => self.table.new_type_var(),
};
let len = match array {
Array::ElementList(items) => {
for expr in items.iter() {
// FIXME: use CoerceMany (coerce_merge_branch)
self.infer_expr_coerce(*expr, &Expectation::has_type(elem_ty.clone()));
}
Some(items.len() as u64)
}
Array::Repeat { initializer, repeat } => {
self.infer_expr_coerce(
*initializer,
&Expectation::has_type(elem_ty.clone()),
);
self.infer_expr(
*repeat,
&Expectation::has_type(
TyKind::Scalar(Scalar::Uint(UintTy::Usize)).intern(&Interner),
),
);
let repeat_expr = &self.body.exprs[*repeat];
consteval::eval_usize(repeat_expr)
}
};
TyKind::Array(elem_ty, consteval::usize_const(len)).intern(&Interner)
}
Expr::Literal(lit) => match lit {
Literal::Bool(..) => TyKind::Scalar(Scalar::Bool).intern(&Interner),
Literal::String(..) => {
TyKind::Ref(Mutability::Not, static_lifetime(), TyKind::Str.intern(&Interner))
.intern(&Interner)
}
Literal::ByteString(bs) => {
let byte_type = TyKind::Scalar(Scalar::Uint(UintTy::U8)).intern(&Interner);
let len = consteval::usize_const(Some(bs.len() as u64));
let array_type = TyKind::Array(byte_type, len).intern(&Interner);
TyKind::Ref(Mutability::Not, static_lifetime(), array_type).intern(&Interner)
}
Literal::Char(..) => TyKind::Scalar(Scalar::Char).intern(&Interner),
Literal::Int(_v, ty) => match ty {
Some(int_ty) => {
TyKind::Scalar(Scalar::Int(primitive::int_ty_from_builtin(*int_ty)))
.intern(&Interner)
}
None => self.table.new_integer_var(),
},
Literal::Uint(_v, ty) => match ty {
Some(int_ty) => {
TyKind::Scalar(Scalar::Uint(primitive::uint_ty_from_builtin(*int_ty)))
.intern(&Interner)
}
None => self.table.new_integer_var(),
},
Literal::Float(_v, ty) => match ty {
Some(float_ty) => {
TyKind::Scalar(Scalar::Float(primitive::float_ty_from_builtin(*float_ty)))
.intern(&Interner)
}
None => self.table.new_float_var(),
},
},
Expr::MacroStmts { tail } => self.infer_expr_inner(*tail, expected),
};
// use a new type variable if we got unknown here
let ty = self.insert_type_vars_shallow(ty);
self.write_expr_ty(tgt_expr, ty.clone());
ty
} |
utils.rs | use crate::common::{FileHandle, QError};
use crate::variant::{QBNumberCast, Variant};
use std::convert::TryFrom;
pub trait VariantCasts {
fn to_file_handle(&self) -> Result<FileHandle, QError>;
fn to_record_number(&self) -> Result<usize, QError>;
fn to_non_negative_int(&self) -> Result<usize, QError>;
fn to_positive_int(&self) -> Result<usize, QError>;
fn to_positive_int_or(&self, err: QError) -> Result<usize, QError>;
}
impl VariantCasts for Variant {
fn | (&self) -> Result<FileHandle, QError> {
let i: i32 = self.try_cast()?;
FileHandle::try_from(i)
}
fn to_record_number(&self) -> Result<usize, QError> {
let record_number_as_long: i64 = self.try_cast()?;
if record_number_as_long <= 0 {
Err(QError::BadRecordNumber)
} else {
Ok(record_number_as_long as usize)
}
}
fn to_non_negative_int(&self) -> Result<usize, QError> {
let i: i32 = self.try_cast()?;
if i >= 0 {
Ok(i as usize)
} else {
Err(QError::IllegalFunctionCall)
}
}
fn to_positive_int(&self) -> Result<usize, QError> {
self.to_positive_int_or(QError::IllegalFunctionCall)
}
fn to_positive_int_or(&self, err: QError) -> Result<usize, QError> {
let i: i32 = self.try_cast()?;
if i > 0 {
Ok(i as usize)
} else {
Err(err)
}
}
}
| to_file_handle |
test_utils.py | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def | (self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = '[email protected]'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = '[email protected]'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = '[email protected]'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = '[email protected]'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = '[email protected]'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = '[email protected]'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = '[email protected]'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = '[email protected]'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = '[email protected]'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
"""Post an email from the sender to the recipient.
Args:
recipient_email: str. The email of the recipient.
sender_email: str. The email of the sender.
subject: str. The subject of the email.
body: str. The body of the email.
html_body: str. The HTML body of the email.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code of the JSON
response.
Returns:
json. A JSON response generated by _send_post_request function.
"""
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 0 states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
# Create an ExplorationIssues model to match the behavior of creating
# new explorations.
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 21 states
dictionary. Version 21 is where training data of exploration is stored
with the states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'[email protected]': {'first': 'Bob', 'id': 1},
'[email protected]': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
| __init__ |
serializers.py | import pytz
import json
from unicodedata import normalize
from distutils.version import StrictVersion
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException
from api.base.serializers import is_anonymized
from api.base.utils import absolute_reverse, get_user_auth, is_truthy
from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import (
NodeSerializer,
NodeStorageProviderSerializer,
NodeLicenseRelationshipField,
NodeLinksSerializer,
update_institutions,
NodeLicenseSerializer,
NodeContributorsSerializer,
RegistrationProviderRelationshipField,
get_license_details,
)
from api.base.serializers import (
IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField,
)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError, NodeStateError
from osf.models import Node, AbstractNode
from osf.utils.registrations import strip_registered_meta_comments
from framework.sentry import log_exception
class RegistrationSerializer(NodeSerializer):
admin_only_editable_fields = [
'custom_citation',
'is_pending_retraction',
'is_public',
'withdrawal_justification',
]
# Remember to add new RegistrationSerializer fields to this list
# if you don't need them to be anonymized
non_anonymized_fields = NodeSerializer.non_anonymized_fields + [
'archiving',
'article_doi',
'date_registered',
'date_withdrawn',
'embargo_end_date',
'embargoed',
'pending_embargo_approval',
'pending_embargo_termination_approval',
'pending_registration_approval',
'pending_withdrawal',
'provider',
'registered_by',
'registered_from',
'registered_meta',
'registration_responses',
'registration_schema',
'registration_supplement',
'withdrawal_justification',
'withdrawn',
]
reviews_state = ser.CharField(source='moderation_state', read_only=True)
title = ser.CharField(read_only=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string)
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license'))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
article_doi = ser.CharField(required=False, allow_null=True)
public = HideIfWithdrawal(ser.BooleanField(
source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes',
))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this node.',
))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.',
))
pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo_termination',
help_text='The associated Embargo early termination is awaiting approval by project admins',
))
embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(
source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.',
))
archiving = HideIfWithdrawal(ser.BooleanField(read_only=True))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawn = ser.BooleanField(
source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.',
)
has_project = ser.SerializerMethodField()
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False))
withdrawal_justification = ser.CharField(read_only=True)
template_from = HideIfWithdrawal(ser.CharField(
read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.',
))
registration_supplement = ser.SerializerMethodField()
# Will be deprecated in favor of registration_responses
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registration_responses = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'},
))
registered_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'},
)
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={
'unread': 'get_unread_comments_count',
'count': 'get_total_comments_count',
},
filter={'target': '<_id>'},
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'},
)
bibliographic_contributors = RelationshipField(
related_view='registrations:registration-bibliographic-contributors',
related_view_kwargs={'node_id': '<_id>'},
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.',
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-storage-providers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_files_count'},
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_wiki_page_count'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'},
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'},
))
license = HideIfWithdrawal(NodeLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_forks_count'},
))
groups = HideIfRegistration(RelationshipField(
related_view='nodes:node-groups',
related_view_kwargs={'node_id': '<_id>'},
))
node_links = ShowIfVersion(
HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.',
)), min_version='2.0', max_version='2.0',
)
linked_by_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_nodes_count'},
))
linked_by_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_registrations_count'},
))
parent = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node',
)
root = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'},
)
region = HideIfWithdrawal(RelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': '<osfstorage_region._id>'},
read_only=True,
))
affiliated_institutions = RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'},
self_view='registrations:registration-relationships-institutions',
self_view_kwargs={'node_id': '<_id>'},
read_only=False,
many=True,
required=False,
)
registration_schema = RelationshipField(
related_view='schemas:registration-schema-detail',
related_view_kwargs={'schema_id': '<registered_schema_id>'},
)
settings = HideIfRegistration(RelationshipField(
related_view='nodes:node-settings',
related_view_kwargs={'node_id': '<_id>'},
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'},
)))
identifiers = RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'},
)
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'},
))
provider = RegistrationProviderRelationshipField(
related_view='providers:registration-providers:registration-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=True,
)
review_actions = RelationshipField(
related_view='registrations:registration-actions-list',
related_view_kwargs={'node_id': '<_id>'},
)
requests = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-requests-list',
related_view_kwargs={'node_id': '<_id>'},
))
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-subjects'
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-relationships-subjects'
links = LinksField({'html': 'get_absolute_html_url'})
def get_has_project(self, obj):
return obj.has_project
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = self.anonymize_registered_meta(obj)
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_registration_responses(self, obj):
if obj.registration_responses:
return self.anonymize_registration_responses(obj)
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def get_view_only_links_count(self, obj):
return obj.private_links.filter(is_deleted=False).count()
def get_total_comments_count(self, obj):
return obj.comment_set.filter(page='node', is_deleted=False).count()
def get_files_count(self, obj):
return obj.files_count or 0
def anonymize_registered_meta(self, obj):
"""
Looks at every question on every page of the schema, for any titles
that have a contributor-input block type. If present, deletes that question's response
from meta_values.
"""
cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0])
return self.anonymize_fields(obj, cleaned_registered_meta)
def anonymize_registration_responses(self, obj):
"""
For any questions that have a `contributor-input` block type, delete
that question's response from registration_responses.
We want to make sure author's names that need to be anonymized
aren't surfaced when viewed through an anonymous VOL
"""
return self.anonymize_fields(obj, obj.registration_responses)
def anonymize_fields(self, obj, data):
"""
Consolidates logic to anonymize fields with contributor information
on both registered_meta and registration_responses
"""
if is_anonymized(self.context['request']):
anonymous_registration_response_keys = obj.get_contributor_registration_response_keys()
for key in anonymous_registration_response_keys:
if key in data:
del data[key]
return data
def check_admin_perms(self, registration, user, validated_data):
"""
While admin/write users can make both make modifications to registrations,
most fields are restricted to admin-only edits. You must be an admin
contributor on the registration; you cannot have gotten your admin
permissions through group membership.
Add fields that need admin perms to admin_only_editable_fields
"""
user_is_admin = registration.is_admin_contributor(user)
for field in validated_data:
if field in self.admin_only_editable_fields and not user_is_admin:
raise exceptions.PermissionDenied()
def update_registration_tags(self, registration, validated_data, auth):
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(str(err))
def retract_registration(self, registration, validated_data, user):
is_pending_retraction = validated_data.pop('is_pending_retraction', None)
withdrawal_justification = validated_data.pop('withdrawal_justification', None)
if withdrawal_justification and not is_pending_retraction:
raise exceptions.ValidationError(
'You cannot provide a withdrawal_justification without a concurrent withdrawal request.',
)
if is_truthy(is_pending_retraction):
if registration.is_pending_retraction:
raise exceptions.ValidationError('This registration is already pending withdrawal.')
try:
retraction = registration.retract_registration(user, withdrawal_justification, save=True)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
retraction.ask(registration.get_active_contributors_recursive(unique_users=True))
elif is_pending_retraction is not None:
raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.')
def update(self, registration, validated_data):
user = self.context['request'].user
auth = Auth(user)
self.check_admin_perms(registration, user, validated_data)
validated_data.pop('_id', None)
if 'tags' in validated_data:
self.update_registration_tags(registration, validated_data, auth)
if 'custom_citation' in validated_data:
registration.update_custom_citation(validated_data.pop('custom_citation'), auth)
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(registration, validated_data)
validated_data['node_license'] = license_details
validated_data.pop('license_type', None)
validated_data.pop('license', None)
if 'affiliated_institutions' in validated_data:
institutions_list = validated_data.pop('affiliated_institutions')
new_institutions = [{'_id': institution} for institution in institutions_list]
update_institutions(registration, new_institutions, user)
registration.save()
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(registration, subjects, auth)
if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data:
self.retract_registration(registration, validated_data, user)
if 'is_public' in validated_data:
if validated_data.get('is_public') is False:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
try:
registration.update(validated_data, auth=auth)
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
return registration
class Meta:
type_ = 'registrations'
class RegistrationCreateSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields -
"""
def expect_cleaner_attributes(self, request):
return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION)
def __init__(self, *args, **kwargs):
super(RegistrationCreateSerializer, self).__init__(*args, **kwargs)
request = kwargs['context']['request']
# required fields defined here for the different versions
if self.expect_cleaner_attributes(request):
self.fields['draft_registration_id'] = ser.CharField(write_only=True)
else:
self.fields['draft_registration'] = ser.CharField(write_only=True)
# For newer versions
embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None)
included_node_ids = ser.ListField(write_only=True, required=False)
# For older versions
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
children = ser.ListField(write_only=True, required=False)
registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo'])
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
always_embed=True,
required=False, | Old API versions should pass in "immediate" or "embargo" under `registration_choice`.
New API versions should pass in an "embargo_end_date" if it should be embargoed, else it will be None
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('registration_choice'):
raise JSONAPIException(
source={'pointer': '/data/attributes/registration_choice'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate'
return validated_data.get('registration_choice', 'immediate')
def get_embargo_end_date_by_version(self, validated_data):
"""
Old API versions should pass in "lift_embargo".
New API versions should pass in "embargo_end_date"
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('lift_embargo'):
raise JSONAPIException(
source={'pointer': '/data/attributes/lift_embargo'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return validated_data.get('embargo_end_date', None)
return validated_data.get('lift_embargo')
def get_children_by_version(self, validated_data):
"""
Old API versions should pass in 'children'
New API versions should pass in 'included_node_ids'.
"""
if self.expect_cleaner_attributes(self.context['request']):
return validated_data.get('included_node_ids', [])
return validated_data.get('children', [])
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft', None)
registration_choice = self.get_registration_choice_by_version(validated_data)
embargo_lifted = self.get_embargo_end_date_by_version(validated_data)
children = self.get_children_by_version(validated_data)
if children:
# First check that all children are valid
child_nodes = Node.objects.filter(guids___id__in=children)
if child_nodes.count() != len(children):
raise exceptions.ValidationError('Some child nodes could not be found.')
# Second check that metadata doesn't have files that are not in the child nodes being registered.
registering = children + [draft.branched_from._id]
orphan_files = self._find_orphan_files(registering, draft)
if orphan_files:
orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files]
raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. '
'The following file(s) are attached, but are not part of a component being'
' registered: {}'.format(', '.join(orphan_files_names)))
try:
# Still validating metadata, but whether `registration_responses` or `registration_metadata` were populated
# on the draft, the other field was built and populated as well. Both should exist.
draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True)
except ValidationValueError:
log_exception() # Probably indicates a bug on our end, so log to sentry
# TODO: Raise an error once our JSON schemas are updated
try:
registration = draft.register(auth, save=True, child_ids=children)
except NodeStateError as err:
raise exceptions.ValidationError(err)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def _find_orphan_files(self, registering, draft):
from website.archiver.utils import find_selected_files
files = find_selected_files(draft.registration_schema, draft.registration_metadata)
orphan_files = []
for key, value in files.items():
if 'extra' in value:
for file_metadata in value['extra']:
if not self._is_attached_file_valid(file_metadata, registering):
orphan_files.append(file_metadata)
return orphan_files
def _is_attached_file_valid(self, file_metadata, registering):
"""
Validation of file information on registration_metadata. Theoretically, the file information
on registration_responses does not have to be valid, so we enforce their accuracy here,
to ensure file links load properly.
Verifying that nodeId in the file_metadata is one of the files we're registering. Verify
that selectedFileName is the name of a file on the node. Verify that the sha256 matches
a version on that file.
:param file_metadata - under "registration_metadata"
:param registering - node ids you are registering
:return boolean
"""
node_id = file_metadata.get('nodeId')
if node_id not in registering:
return False
node = AbstractNode.load(node_id)
if not node:
# node in registration_metadata doesn't exist
return False
specified_sha = file_metadata.get('sha256', '')
file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \
node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first()
if not file:
# file with this name does not exist on the node
return False
match = False
for version in file.versions.all():
if specified_sha == version.metadata.get('sha256'):
match = True
if not match:
# Specified sha256 does not match a version on the specified file
return False
return True
class RegistrationDetailSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer make _id required and other fields writeable
"""
id = IDField(source='_id', required=True)
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', required=False,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawal_justification = ser.CharField(required=False)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
)
comments = FileRelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<target._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'},
)
node = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<target._id>'},
help_text='The registration that this file belongs to',
)
class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer):
"""
Overrides NodeStorageProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True,
) | )
def get_registration_choice_by_version(self, validated_data):
""" |
idrac_powerunit.py | #!/usr/bin/env python
import json
import SNMPUtil
import argparse
### Monitoring iDRAC Servers - Powerunit Performance
### It uses snmpwalk command to get the hadrware data from the iDRAC Servers.
### SNMPUtil.py is used to get the snmp raw data and parsed to get the output json
### Download and install the latest version of Site24x7 Linux Agent. The agent will execute the plugin and push the data to the Site24x7 server
###
### Author: Anita, Zoho Corp
### Language : Python
### Tested in Ubuntu
### Tested for snmp version 2c
### OIDS for Getting Power unit Details
OIDS = {'powerunit' : ['powerUnitTable','amperageProbeLocationName','amperageProbeReading']}
### OID Attributes
hardware = {'powerunit' : ['powerUnitStateSettings','powerUnitRedundancyStatus','powerUnitStatus','amperageProbeReading']}
### Output Keys and their units
names = {'powerunit' : ['state','redundancystatus','status', {'powerconsumption':'W'}]}
class HardwareParser:
def __init__(self, hostname, snmp_version, snmp_community_str, mib_location):
self.hostname = hostname
self.snmp_version = snmp_version
self.snmp_community_str = snmp_community_str
self.mib_location = mib_location
self.hardware = ''
self.oids = ''
self.pattern = ''
def | (self):
output_data = {}
output_data['data'] = {}
output_data['units'] = {}
for _ in OIDS:
self.hardware = _
self.oids = OIDS[self.hardware]
self.keys = set()
for _ in self.oids:
try:
### SNMPUtil module is used to get the snmp output for the input OIDS
snmpdata = SNMPUtil.SNMPPARSER('snmpwalk',self.hostname, self.snmp_version, self.snmp_community_str,_, self.mib_location, hardware[self.hardware])
### get Raw SNMP Output as a dict
self.snmp_data = snmpdata.getRawData()
### Method to parse the SNMP command output data
output_data = self.parseSNMPData(output_data)
except Exception as e:
raise Exception(e)
return output_data
### Method to parse the SNMP command output data
def parseSNMPData(self,output_data):
jsondata = output_data['data']
unitdata = output_data['units']
appendkeys = False;
if not jsondata: appendkeys = True
for _ in self.snmp_data:
for index, __ in enumerate(hardware[self.hardware]) :
if __ in _:
name = ''.join(_.split("::")[1:]).replace('"','').split(' ')[0].split('.')
elementname = name[len(name)-1] # Name
value = ''.join(_.split()[1:]).replace('"','') # Value
if appendkeys : self.keys.add(elementname);
if ':' in value:
val = value.split(':')[1:]
value = val[len(val)-1]
if __ == 'powerSupplyOutputWatts' : value = int(value)/float(10)
if __ == 'powerSupplyRatedInputWattage' : value = int(value)/float(10)
if __ == 'amperageProbeReading' : value = int(value)/float(10)
if __ == 'voltageProbeReading' : value = int(value)/float(1000)
elem = names[self.hardware][index]
attribute = '' # Attribute Name
unit = '' # Attribute Value
if type(elem) is str: # Attributes with no units specified
attribute = elem
elif type(elem) is dict: # Attributes with units
attribute = list(elem.keys())[0]
unit = elem[list(elem.keys())[0]]
key = (attribute +'_'+elementname).replace(' ','')
if appendkeys :
jsondata[key] = value
if unit!='': unitdata[key] = unit
elif elementname in self.keys :
jsondata[key] = value
if unit!='': unitdata[key] = unit
elif self.hardware== 'powerunit':
if 'System Board Pwr Consumption' in _: self.keys.add(elementname)
if (elementname in self.keys and 'amperageProbeReading' in _) : jsondata[key] = value
output_data['data'] = jsondata
output_data['units'] = unitdata
return output_data
if __name__ == '__main__':
result = {}
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', help='hostname', nargs='?', default='localhost')
parser.add_argument('--snmp_version', help='snmp version', type=str, nargs='?', default="2c")
parser.add_argument('--snmp_community_str', help='snmp community version', nargs='?', default='public')
parser.add_argument('--idrac_mib_file_locn', help='idrac mib file location', nargs='?', default='')
parser.add_argument('--plugin_version', help='plugin template version', nargs='?', default='1')
parser.add_argument('--heartbeat_required', help='Enable heartbeat for monitoring', nargs='?', default="true")
args = parser.parse_args()
try:
parser = HardwareParser(args.hostname, args.snmp_version, args.snmp_community_str, args.idrac_mib_file_locn)
output = parser.getData()
result = output['data']
result['units'] = output['units']
except Exception as e:
result['msg'] = str(e)
result['plugin_version'] = args.plugin_version
result['heartbeat_required'] = args.heartbeat_required
print(json.dumps(result, indent=2, sort_keys=True))
| getData |
EditJob.js | import axios from "axios";
import React from "react";
import { useState, useEffect } from "react";
import { useHistory } from "react-router-dom";
import {
Col,
Row,
Button,
Form,
FormGroup,
Label,
Input,
FormText,
Table,
} from "reactstrap";
import RecruiterNavbar from "./RecruiterNavbar";
const EditJob = ({ userid, jobid, dis }) => {
let history = useHistory();
const [title, setTitle] = useState("");
const [maxappl, setMaxappl] = useState("");
const [positions, setPositions] = useState("");
const [typeofjob, setTypeofjob] = useState("Work from Home");
const [duration, setDuration] = useState("");
const [salary, setSalary] = useState("");
const [deadlineDate, setDeadlineDate] = useState("");
const [deadlineTime, setDeadlineTime] = useState("");
useEffect(() => {
const getJobData = async () => {
const request = await axios.get(`http://localhost:4000/job/${jobid}`);
const data = request.data;
setTitle(data.title);
setMaxappl(data.maxappl);
setPositions(data.positions);
setTypeofjob(data.typeofjob);
setDuration(data.duration);
setSalary(data.salary);
setDeadlineDate(data.deadline.toString().slice(0, 10));
setDeadlineTime(data.deadline.toString().slice(11, 16));
};
userid = localStorage.getItem("userid");
jobid = localStorage.getItem("jobid");
dis = true;
getJobData();
}, []);
const onSubmit = (e) => {
if (Number(maxappl) < Number(positions)) {
alert(
"Maximum number of applicants cannot be less than number of positions."
);
return;
}
const deadline = new Date(deadlineDate + "T" + deadlineTime + ":00+05:30");
const present = new Date();
if (deadline <= present) {
alert("Deadline cannot be in the past for present");
return;
}
axios
.put(`http://localhost:4000/job/${jobid}`, {
maxappl,
positions,
deadline,
})
.then((res) => {
console.log(res.data);
window.location.href = "/recruiter/jobs";
})
.catch((err) => {
console.log(err);
alert("Failed");
});
setTitle("");
setMaxappl("");
setPositions("");
setTypeofjob("");
setDuration("");
setSalary("");
setDeadlineDate("");
setDeadlineTime("");
localStorage.removeItem("jobid");
// history.push("/recruiter/jobs");
};
return (
<div>
<RecruiterNavbar />
<br></br>
<br></br>
<Form onSubmit={onSubmit}>
<FormText color="muted">Fields marked * are necessary to fill</FormText>
<FormGroup>
<Label for="name">Title*</Label>
<Input
type="text"
name="name"
disabled={dis}
id="exampleEmail"
required
value={title}
onChange={(e) => setTitle(e.target.value)}
/>
</FormGroup>
<FormGroup>
<Label for="exampleSelect">Type of Job*</Label>
<Input
type="select"
name="select"
required
disabled={dis}
id="exampleSelect"
value={typeofjob}
onChange={(e) => setTypeofjob(e.target.value)}
>
<option>Work from Home</option>
<option>Full-Time</option>
<option>Part-Time</option>
</Input>
</FormGroup>
<FormGroup>
<Label for="exampleEmail">Max Number of Applicants*</Label>
<Input
type="number"
name="contact"
required
id="exampleEmail"
min="1"
value={maxappl}
onChange={(e) => setMaxappl(e.target.value)}
/>
</FormGroup>
<FormGroup>
<Label for="exampleEmail">Available Positions*</Label>
<Input
type="number"
name="contact"
required
id="exampleEmail"
min="1"
value={positions}
onChange={(e) => setPositions(e.target.value)}
/>
</FormGroup>
<FormGroup>
<Label for="exampleDate">Deadline Date*</Label>
<Input
type="date"
name="date"
required
id="exampleDate"
value={deadlineDate}
onChange={(e) => setDeadlineDate(e.target.value)}
/>
</FormGroup>
<FormGroup>
<Label for="exampleTime">Deadline Time*</Label>
<Input
type="time"
required
name="time"
id="exampleTime"
value={deadlineTime}
onChange={(e) => setDeadlineTime(e.target.value)}
/>
</FormGroup>
<FormGroup>
<Label for="exampleEmail">Duration (in months)*</Label>
<Input
type="number"
name="contact"
id="exampleEmail"
required
disabled={dis}
min="0"
max="6"
value={duration}
onChange={(e) => setDuration(e.target.value)}
/>
<FormText color="muted">
Only duration of 1 to 6 months is allowed. Fill 0 for indefinite.
</FormText>
</FormGroup>
<FormGroup>
<Label for="exampleEmail">Salary*</Label>
<Input
type="number"
min="0"
required
disabled={dis}
name="contact"
id="exampleEmail"
value={salary}
onChange={(e) => setSalary(e.target.value)}
/>
</FormGroup>
<input type="submit"></input>
<br></br>
<br></br>
</Form>
</div>
);
};
EditJob.defaultProps = {
userid: localStorage.getItem("userid"), | export default EditJob; | jobid: localStorage.getItem("jobid"),
dis: true,
};
|
lol_matchmaking_gameflow_game_data.rs | /*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LolMatchmakingGameflowGameData {
#[serde(rename = "queue", skip_serializing_if = "Option::is_none")]
pub queue: Option<crate::models::LolMatchmakingGameflowQueue>,
}
impl LolMatchmakingGameflowGameData { | pub fn new() -> LolMatchmakingGameflowGameData {
LolMatchmakingGameflowGameData {
queue: None,
}
}
} | |
sys_timerfd.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import (
"gvisor.googlesource.com/gvisor/pkg/abi/linux"
"gvisor.googlesource.com/gvisor/pkg/sentry/arch"
"gvisor.googlesource.com/gvisor/pkg/sentry/fs"
"gvisor.googlesource.com/gvisor/pkg/sentry/fs/timerfd"
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel"
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs"
ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time"
"gvisor.googlesource.com/gvisor/pkg/syserror"
)
// TimerfdCreate implements Linux syscall timerfd_create(2).
func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
clockID := args[0].Int()
flags := args[1].Int()
if flags&^(linux.TFD_CLOEXEC|linux.TFD_NONBLOCK) != 0 {
return 0, nil, syserror.EINVAL
}
var c ktime.Clock
switch clockID {
case linux.CLOCK_REALTIME:
c = t.Kernel().RealtimeClock()
case linux.CLOCK_MONOTONIC:
c = t.Kernel().MonotonicClock()
default:
return 0, nil, syserror.EINVAL
}
f := timerfd.NewFile(t, c)
defer f.DecRef()
f.SetFlags(fs.SettableFileFlags{
NonBlocking: flags&linux.TFD_NONBLOCK != 0,
})
fd, err := t.FDMap().NewFDFrom(0, f, kernel.FDFlags{
CloseOnExec: flags&linux.TFD_CLOEXEC != 0,
}, t.ThreadGroup().Limits())
if err != nil {
return 0, nil, err
}
return uintptr(fd), nil, nil
}
// TimerfdSettime implements Linux syscall timerfd_settime(2).
func | (t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
fd := kdefs.FD(args[0].Int())
flags := args[1].Int()
newValAddr := args[2].Pointer()
oldValAddr := args[3].Pointer()
if flags&^(linux.TFD_TIMER_ABSTIME) != 0 {
return 0, nil, syserror.EINVAL
}
f := t.FDMap().GetFile(fd)
if f == nil {
return 0, nil, syserror.EBADF
}
defer f.DecRef()
tf, ok := f.FileOperations.(*timerfd.TimerOperations)
if !ok {
return 0, nil, syserror.EINVAL
}
var newVal linux.Itimerspec
if _, err := t.CopyIn(newValAddr, &newVal); err != nil {
return 0, nil, err
}
newS, err := ktime.SettingFromItimerspec(newVal, flags&linux.TFD_TIMER_ABSTIME != 0, tf.Clock())
if err != nil {
return 0, nil, err
}
tm, oldS := tf.SetTime(newS)
if oldValAddr != 0 {
oldVal := ktime.ItimerspecFromSetting(tm, oldS)
if _, err := t.CopyOut(oldValAddr, &oldVal); err != nil {
return 0, nil, err
}
}
return 0, nil, nil
}
// TimerfdGettime implements Linux syscall timerfd_gettime(2).
func TimerfdGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
fd := kdefs.FD(args[0].Int())
curValAddr := args[1].Pointer()
f := t.FDMap().GetFile(fd)
if f == nil {
return 0, nil, syserror.EBADF
}
defer f.DecRef()
tf, ok := f.FileOperations.(*timerfd.TimerOperations)
if !ok {
return 0, nil, syserror.EINVAL
}
tm, s := tf.GetTime()
curVal := ktime.ItimerspecFromSetting(tm, s)
_, err := t.CopyOut(curValAddr, &curVal)
return 0, nil, err
}
| TimerfdSettime |
tree_list_row.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use glib::object::Cast;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
pub struct TreeListRow(Object<ffi::GtkTreeListRow, ffi::GtkTreeListRowClass>);
match fn {
type_ => || ffi::gtk_tree_list_row_get_type(),
}
}
impl TreeListRow {
#[doc(alias = "gtk_tree_list_row_get_child_row")]
#[doc(alias = "get_child_row")]
pub fn child_row(&self, position: u32) -> Option<TreeListRow> {
unsafe {
from_glib_full(ffi::gtk_tree_list_row_get_child_row(
self.to_glib_none().0,
position,
))
}
}
#[doc(alias = "gtk_tree_list_row_get_children")]
#[doc(alias = "get_children")]
pub fn children(&self) -> Option<gio::ListModel> {
unsafe { from_glib_none(ffi::gtk_tree_list_row_get_children(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_tree_list_row_get_depth")]
#[doc(alias = "get_depth")]
pub fn depth(&self) -> u32 {
unsafe { ffi::gtk_tree_list_row_get_depth(self.to_glib_none().0) }
}
#[doc(alias = "gtk_tree_list_row_get_expanded")]
#[doc(alias = "get_expanded")]
pub fn is_expanded(&self) -> bool {
unsafe { from_glib(ffi::gtk_tree_list_row_get_expanded(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_tree_list_row_get_item")]
#[doc(alias = "get_item")]
pub fn item(&self) -> Option<glib::Object> {
unsafe { from_glib_full(ffi::gtk_tree_list_row_get_item(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_tree_list_row_get_parent")]
#[doc(alias = "get_parent")]
pub fn parent(&self) -> Option<TreeListRow> {
unsafe { from_glib_full(ffi::gtk_tree_list_row_get_parent(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_tree_list_row_get_position")]
#[doc(alias = "get_position")]
pub fn position(&self) -> u32 {
unsafe { ffi::gtk_tree_list_row_get_position(self.to_glib_none().0) }
}
#[doc(alias = "gtk_tree_list_row_is_expandable")]
pub fn is_expandable(&self) -> bool {
unsafe { from_glib(ffi::gtk_tree_list_row_is_expandable(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_tree_list_row_set_expanded")]
pub fn set_expanded(&self, expanded: bool) {
unsafe {
ffi::gtk_tree_list_row_set_expanded(self.to_glib_none().0, expanded.into_glib());
}
}
#[doc(alias = "children")]
pub fn connect_children_notify<F: Fn(&TreeListRow) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_children_trampoline<F: Fn(&TreeListRow) + 'static>(
this: *mut ffi::GtkTreeListRow,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::children\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_children_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "depth")]
pub fn | <F: Fn(&TreeListRow) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_depth_trampoline<F: Fn(&TreeListRow) + 'static>(
this: *mut ffi::GtkTreeListRow,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::depth\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_depth_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "expandable")]
pub fn connect_expandable_notify<F: Fn(&TreeListRow) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_expandable_trampoline<F: Fn(&TreeListRow) + 'static>(
this: *mut ffi::GtkTreeListRow,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::expandable\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_expandable_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "expanded")]
pub fn connect_expanded_notify<F: Fn(&TreeListRow) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_expanded_trampoline<F: Fn(&TreeListRow) + 'static>(
this: *mut ffi::GtkTreeListRow,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::expanded\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_expanded_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
#[doc(alias = "item")]
pub fn connect_item_notify<F: Fn(&TreeListRow) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_item_trampoline<F: Fn(&TreeListRow) + 'static>(
this: *mut ffi::GtkTreeListRow,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::item\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_item_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
#[derive(Clone, Default)]
pub struct TreeListRowBuilder {
expanded: Option<bool>,
}
impl TreeListRowBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> TreeListRow {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref expanded) = self.expanded {
properties.push(("expanded", expanded));
}
glib::Object::new::<TreeListRow>(&properties)
.expect("Failed to create an instance of TreeListRow")
}
pub fn expanded(mut self, expanded: bool) -> Self {
self.expanded = Some(expanded);
self
}
}
impl fmt::Display for TreeListRow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("TreeListRow")
}
}
| connect_depth_notify |
resource_firestore_index.go | // ----------------------------------------------------------------------------
//
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
//
// ----------------------------------------------------------------------------
//
// This file is automatically generated by Magic Modules and manual
// changes will be clobbered when the file is regenerated.
//
// Please read more about how to change this file in
// .github/CONTRIBUTING.md.
//
// ----------------------------------------------------------------------------
package google
import (
"fmt"
"log"
"reflect"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func resourceFirestoreIndex() *schema.Resource {
return &schema.Resource{
Create: resourceFirestoreIndexCreate,
Read: resourceFirestoreIndexRead,
Delete: resourceFirestoreIndexDelete,
Importer: &schema.ResourceImporter{
State: resourceFirestoreIndexImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
},
Schema: map[string]*schema.Schema{
"collection": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `The collection being indexed.`,
},
"fields": {
Type: schema.TypeList,
Required: true,
ForceNew: true,
Description: `The fields supported by this index. The last field entry is always for
the field path '__name__'. If, on creation, '__name__' was not
specified as the last field, it will be added automatically with the
same direction as that of the last field defined. If the final field
in a composite index is not directional, the '__name__' will be
ordered '"ASCENDING"' (unless explicitly specified otherwise).`,
MinItems: 2,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"array_config": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"CONTAINS", ""}, false),
Description: `Indicates that this field supports operations on arrayValues. Only one of 'order' and 'arrayConfig' can
be specified.`,
},
"field_path": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: `Name of the field.`,
},
"order": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"ASCENDING", "DESCENDING", ""}, false),
Description: `Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=.
Only one of 'order' and 'arrayConfig' can be specified.`,
},
},
},
},
"database": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: `The Firestore database id. Defaults to '"(default)"'.`,
Default: "(default)",
},
"query_scope": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"COLLECTION", "COLLECTION_GROUP", ""}, false),
Description: `The scope at which a query is run. One of '"COLLECTION"' or
'"COLLECTION_GROUP"'. Defaults to '"COLLECTION"'.`,
Default: "COLLECTION",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: `A server defined name for this index. Format:
'projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}'`,
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
}
}
func resourceFirestoreIndexCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
obj := make(map[string]interface{})
databaseProp, err := expandFirestoreIndexDatabase(d.Get("database"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("database"); !isEmptyValue(reflect.ValueOf(databaseProp)) && (ok || !reflect.DeepEqual(v, databaseProp)) {
obj["database"] = databaseProp
}
collectionProp, err := expandFirestoreIndexCollection(d.Get("collection"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("collection"); !isEmptyValue(reflect.ValueOf(collectionProp)) && (ok || !reflect.DeepEqual(v, collectionProp)) {
obj["collection"] = collectionProp
}
queryScopeProp, err := expandFirestoreIndexQueryScope(d.Get("query_scope"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("query_scope"); !isEmptyValue(reflect.ValueOf(queryScopeProp)) && (ok || !reflect.DeepEqual(v, queryScopeProp)) {
obj["queryScope"] = queryScopeProp
}
fieldsProp, err := expandFirestoreIndexFields(d.Get("fields"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) {
obj["fields"] = fieldsProp
}
obj, err = resourceFirestoreIndexEncoder(d, meta, obj)
if err != nil {
return err
}
url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes")
if err != nil {
return err
}
log.Printf("[DEBUG] Creating new Index: %#v", obj)
project, err := getProject(d, config)
if err != nil {
return err
}
res, err := sendRequestWithTimeout(config, "POST", project, url, obj, d.Timeout(schema.TimeoutCreate))
if err != nil {
return fmt.Errorf("Error creating Index: %s", err)
}
// Store the ID now
id, err := replaceVars(d, config, "{{name}}")
if err != nil {
return fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
err = firestoreOperationWaitTime(
config, res, project, "Creating Index",
int(d.Timeout(schema.TimeoutCreate).Minutes()))
if err != nil {
// The resource didn't actually create
d.SetId("")
return fmt.Errorf("Error waiting to create Index: %s", err)
}
log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res)
// The operation for this resource contains the generated name that we need
// in order to perform a READ.
metadata := res["metadata"].(map[string]interface{})
name := metadata["index"].(string)
log.Printf("[DEBUG] Setting Index name, id to %s", name)
d.Set("name", name)
d.SetId(name)
return resourceFirestoreIndexRead(d, meta)
}
func resourceFirestoreIndexRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}")
if err != nil {
return err
}
project, err := getProject(d, config)
if err != nil {
return err
}
res, err := sendRequest(config, "GET", project, url, nil)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("FirestoreIndex %q", d.Id()))
}
if err := d.Set("project", project); err != nil {
return fmt.Errorf("Error reading Index: %s", err)
}
if err := d.Set("name", flattenFirestoreIndexName(res["name"], d)); err != nil {
return fmt.Errorf("Error reading Index: %s", err)
}
if err := d.Set("query_scope", flattenFirestoreIndexQueryScope(res["queryScope"], d)); err != nil {
return fmt.Errorf("Error reading Index: %s", err)
}
if err := d.Set("fields", flattenFirestoreIndexFields(res["fields"], d)); err != nil {
return fmt.Errorf("Error reading Index: %s", err)
}
return nil
}
func resourceFirestoreIndexDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}")
if err != nil {
return err
}
var obj map[string]interface{}
log.Printf("[DEBUG] Deleting Index %q", d.Id())
res, err := sendRequestWithTimeout(config, "DELETE", project, url, obj, d.Timeout(schema.TimeoutDelete))
if err != nil {
return handleNotFoundError(err, d, "Index")
}
err = firestoreOperationWaitTime(
config, res, project, "Deleting Index",
int(d.Timeout(schema.TimeoutDelete).Minutes()))
if err != nil {
return err
}
log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res)
return nil
}
func resourceFirestoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
// current import_formats can't import fields with forward slashes in their value
if err := parseImportId([]string{"(?P<name>.+)"}, d, config); err != nil {
return nil, err
}
stringParts := strings.Split(d.Get("name").(string), "/")
if len(stringParts) != 8 {
return nil, fmt.Errorf(
"Saw %s when the name is expected to have shape %s",
d.Get("name"),
"projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}",
)
}
d.Set("project", stringParts[1])
return []*schema.ResourceData{d}, nil
}
func flattenFirestoreIndexName(v interface{}, d *schema.ResourceData) interface{} {
return v
}
func flattenFirestoreIndexQueryScope(v interface{}, d *schema.ResourceData) interface{} {
return v
}
func flattenFirestoreIndexFields(v interface{}, d *schema.ResourceData) interface{} {
if v == nil {
return v
}
l := v.([]interface{})
transformed := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
if len(original) < 1 {
// Do not include empty json objects coming back from the api
continue
}
transformed = append(transformed, map[string]interface{}{
"field_path": flattenFirestoreIndexFieldsFieldPath(original["fieldPath"], d),
"order": flattenFirestoreIndexFieldsOrder(original["order"], d),
"array_config": flattenFirestoreIndexFieldsArrayConfig(original["arrayConfig"], d),
})
}
return transformed
}
func flattenFirestoreIndexFieldsFieldPath(v interface{}, d *schema.ResourceData) interface{} {
return v
}
func flattenFirestoreIndexFieldsOrder(v interface{}, d *schema.ResourceData) interface{} {
return v
}
func flattenFirestoreIndexFieldsArrayConfig(v interface{}, d *schema.ResourceData) interface{} {
return v
}
func expandFirestoreIndexDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandFirestoreIndexCollection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandFirestoreIndexQueryScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandFirestoreIndexFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
if raw == nil {
continue
}
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedFieldPath, err := expandFirestoreIndexFieldsFieldPath(original["field_path"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !isEmptyValue(val) {
transformed["fieldPath"] = transformedFieldPath
}
transformedOrder, err := expandFirestoreIndexFieldsOrder(original["order"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) {
transformed["order"] = transformedOrder
}
transformedArrayConfig, err := expandFirestoreIndexFieldsArrayConfig(original["array_config"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedArrayConfig); val.IsValid() && !isEmptyValue(val) {
transformed["arrayConfig"] = transformedArrayConfig
}
req = append(req, transformed)
}
return req, nil
}
func | (v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandFirestoreIndexFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandFirestoreIndexFieldsArrayConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}
func resourceFirestoreIndexEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {
// We've added project / database / collection as split fields of the name, but
// the API doesn't expect them. Make sure we remove them from any requests.
delete(obj, "project")
delete(obj, "database")
delete(obj, "collection")
return obj, nil
}
| expandFirestoreIndexFieldsFieldPath |
ifs0inv.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::IFS0INV {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CTIFR {
bits: bool,
}
impl CTIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CS0IFR {
bits: bool,
}
impl CS0IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CS1IFR {
bits: bool,
}
impl CS1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INT0IFR {
bits: bool,
}
impl INT0IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct T1IFR {
bits: bool,
}
impl T1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC1EIFR {
bits: bool,
}
impl IC1EIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC1IFR {
bits: bool,
}
impl IC1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OC1IFR {
bits: bool,
}
impl OC1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INT1IFR {
bits: bool,
}
impl INT1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct T2IFR {
bits: bool,
}
impl T2IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC2EIFR {
bits: bool,
}
impl IC2EIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC2IFR {
bits: bool,
}
impl IC2IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OC2IFR {
bits: bool,
}
impl OC2IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INT2IFR {
bits: bool,
}
impl INT2IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct T3IFR {
bits: bool,
}
impl T3IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC3EIFR {
bits: bool,
}
impl IC3EIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC3IFR {
bits: bool,
}
impl IC3IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OC3IFR {
bits: bool,
}
impl OC3IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INT3IFR {
bits: bool,
}
impl INT3IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct T4IFR {
bits: bool,
}
impl T4IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC4EIFR {
bits: bool,
}
impl IC4EIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC4IFR {
bits: bool,
}
impl IC4IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OC4IFR {
bits: bool,
}
impl OC4IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct INT4IFR {
bits: bool,
}
impl INT4IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct T5IFR {
bits: bool,
}
impl T5IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC5EIFR {
bits: bool,
}
impl IC5EIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct IC5IFR {
bits: bool,
}
impl IC5IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OC5IFR {
bits: bool,
}
impl OC5IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct AD1IFR {
bits: bool,
}
impl AD1IFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FSCMIFR {
bits: bool,
}
impl FSCMIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct RTCCIFR {
bits: bool,
}
impl RTCCIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct FCEIFR {
bits: bool,
}
impl FCEIFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _CTIFW<'a> {
w: &'a mut W,
}
impl<'a> _CTIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CS0IFW<'a> {
w: &'a mut W,
}
impl<'a> _CS0IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CS1IFW<'a> {
w: &'a mut W,
}
impl<'a> _CS1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct | <'a> {
w: &'a mut W,
}
impl<'a> _INT0IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _T1IFW<'a> {
w: &'a mut W,
}
impl<'a> _T1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC1EIFW<'a> {
w: &'a mut W,
}
impl<'a> _IC1EIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC1IFW<'a> {
w: &'a mut W,
}
impl<'a> _IC1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OC1IFW<'a> {
w: &'a mut W,
}
impl<'a> _OC1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _INT1IFW<'a> {
w: &'a mut W,
}
impl<'a> _INT1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _T2IFW<'a> {
w: &'a mut W,
}
impl<'a> _T2IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC2EIFW<'a> {
w: &'a mut W,
}
impl<'a> _IC2EIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC2IFW<'a> {
w: &'a mut W,
}
impl<'a> _IC2IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OC2IFW<'a> {
w: &'a mut W,
}
impl<'a> _OC2IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _INT2IFW<'a> {
w: &'a mut W,
}
impl<'a> _INT2IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _T3IFW<'a> {
w: &'a mut W,
}
impl<'a> _T3IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 14;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC3EIFW<'a> {
w: &'a mut W,
}
impl<'a> _IC3EIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC3IFW<'a> {
w: &'a mut W,
}
impl<'a> _IC3IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OC3IFW<'a> {
w: &'a mut W,
}
impl<'a> _OC3IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _INT3IFW<'a> {
w: &'a mut W,
}
impl<'a> _INT3IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _T4IFW<'a> {
w: &'a mut W,
}
impl<'a> _T4IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC4EIFW<'a> {
w: &'a mut W,
}
impl<'a> _IC4EIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 20;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC4IFW<'a> {
w: &'a mut W,
}
impl<'a> _IC4IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 21;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OC4IFW<'a> {
w: &'a mut W,
}
impl<'a> _OC4IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 22;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _INT4IFW<'a> {
w: &'a mut W,
}
impl<'a> _INT4IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 23;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _T5IFW<'a> {
w: &'a mut W,
}
impl<'a> _T5IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC5EIFW<'a> {
w: &'a mut W,
}
impl<'a> _IC5EIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 25;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _IC5IFW<'a> {
w: &'a mut W,
}
impl<'a> _IC5IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 26;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OC5IFW<'a> {
w: &'a mut W,
}
impl<'a> _OC5IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _AD1IFW<'a> {
w: &'a mut W,
}
impl<'a> _AD1IFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 28;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FSCMIFW<'a> {
w: &'a mut W,
}
impl<'a> _FSCMIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RTCCIFW<'a> {
w: &'a mut W,
}
impl<'a> _RTCCIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FCEIFW<'a> {
w: &'a mut W,
}
impl<'a> _FCEIFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 31;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0"]
#[inline]
pub fn ctif(&self) -> CTIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTIFR { bits }
}
#[doc = "Bit 1"]
#[inline]
pub fn cs0if(&self) -> CS0IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CS0IFR { bits }
}
#[doc = "Bit 2"]
#[inline]
pub fn cs1if(&self) -> CS1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CS1IFR { bits }
}
#[doc = "Bit 3"]
#[inline]
pub fn int0if(&self) -> INT0IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INT0IFR { bits }
}
#[doc = "Bit 4"]
#[inline]
pub fn t1if(&self) -> T1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
};
T1IFR { bits }
}
#[doc = "Bit 5"]
#[inline]
pub fn ic1eif(&self) -> IC1EIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC1EIFR { bits }
}
#[doc = "Bit 6"]
#[inline]
pub fn ic1if(&self) -> IC1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC1IFR { bits }
}
#[doc = "Bit 7"]
#[inline]
pub fn oc1if(&self) -> OC1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OC1IFR { bits }
}
#[doc = "Bit 8"]
#[inline]
pub fn int1if(&self) -> INT1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INT1IFR { bits }
}
#[doc = "Bit 9"]
#[inline]
pub fn t2if(&self) -> T2IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
};
T2IFR { bits }
}
#[doc = "Bit 10"]
#[inline]
pub fn ic2eif(&self) -> IC2EIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC2EIFR { bits }
}
#[doc = "Bit 11"]
#[inline]
pub fn ic2if(&self) -> IC2IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC2IFR { bits }
}
#[doc = "Bit 12"]
#[inline]
pub fn oc2if(&self) -> OC2IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OC2IFR { bits }
}
#[doc = "Bit 13"]
#[inline]
pub fn int2if(&self) -> INT2IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INT2IFR { bits }
}
#[doc = "Bit 14"]
#[inline]
pub fn t3if(&self) -> T3IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
};
T3IFR { bits }
}
#[doc = "Bit 15"]
#[inline]
pub fn ic3eif(&self) -> IC3EIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC3EIFR { bits }
}
#[doc = "Bit 16"]
#[inline]
pub fn ic3if(&self) -> IC3IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC3IFR { bits }
}
#[doc = "Bit 17"]
#[inline]
pub fn oc3if(&self) -> OC3IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OC3IFR { bits }
}
#[doc = "Bit 18"]
#[inline]
pub fn int3if(&self) -> INT3IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INT3IFR { bits }
}
#[doc = "Bit 19"]
#[inline]
pub fn t4if(&self) -> T4IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) != 0
};
T4IFR { bits }
}
#[doc = "Bit 20"]
#[inline]
pub fn ic4eif(&self) -> IC4EIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 20;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC4EIFR { bits }
}
#[doc = "Bit 21"]
#[inline]
pub fn ic4if(&self) -> IC4IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 21;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC4IFR { bits }
}
#[doc = "Bit 22"]
#[inline]
pub fn oc4if(&self) -> OC4IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 22;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OC4IFR { bits }
}
#[doc = "Bit 23"]
#[inline]
pub fn int4if(&self) -> INT4IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 23;
((self.bits >> OFFSET) & MASK as u32) != 0
};
INT4IFR { bits }
}
#[doc = "Bit 24"]
#[inline]
pub fn t5if(&self) -> T5IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
};
T5IFR { bits }
}
#[doc = "Bit 25"]
#[inline]
pub fn ic5eif(&self) -> IC5EIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 25;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC5EIFR { bits }
}
#[doc = "Bit 26"]
#[inline]
pub fn ic5if(&self) -> IC5IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) != 0
};
IC5IFR { bits }
}
#[doc = "Bit 27"]
#[inline]
pub fn oc5if(&self) -> OC5IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OC5IFR { bits }
}
#[doc = "Bit 28"]
#[inline]
pub fn ad1if(&self) -> AD1IFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 28;
((self.bits >> OFFSET) & MASK as u32) != 0
};
AD1IFR { bits }
}
#[doc = "Bit 29"]
#[inline]
pub fn fscmif(&self) -> FSCMIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FSCMIFR { bits }
}
#[doc = "Bit 30"]
#[inline]
pub fn rtccif(&self) -> RTCCIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RTCCIFR { bits }
}
#[doc = "Bit 31"]
#[inline]
pub fn fceif(&self) -> FCEIFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
};
FCEIFR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0"]
#[inline]
pub fn ctif(&mut self) -> _CTIFW {
_CTIFW { w: self }
}
#[doc = "Bit 1"]
#[inline]
pub fn cs0if(&mut self) -> _CS0IFW {
_CS0IFW { w: self }
}
#[doc = "Bit 2"]
#[inline]
pub fn cs1if(&mut self) -> _CS1IFW {
_CS1IFW { w: self }
}
#[doc = "Bit 3"]
#[inline]
pub fn int0if(&mut self) -> _INT0IFW {
_INT0IFW { w: self }
}
#[doc = "Bit 4"]
#[inline]
pub fn t1if(&mut self) -> _T1IFW {
_T1IFW { w: self }
}
#[doc = "Bit 5"]
#[inline]
pub fn ic1eif(&mut self) -> _IC1EIFW {
_IC1EIFW { w: self }
}
#[doc = "Bit 6"]
#[inline]
pub fn ic1if(&mut self) -> _IC1IFW {
_IC1IFW { w: self }
}
#[doc = "Bit 7"]
#[inline]
pub fn oc1if(&mut self) -> _OC1IFW {
_OC1IFW { w: self }
}
#[doc = "Bit 8"]
#[inline]
pub fn int1if(&mut self) -> _INT1IFW {
_INT1IFW { w: self }
}
#[doc = "Bit 9"]
#[inline]
pub fn t2if(&mut self) -> _T2IFW {
_T2IFW { w: self }
}
#[doc = "Bit 10"]
#[inline]
pub fn ic2eif(&mut self) -> _IC2EIFW {
_IC2EIFW { w: self }
}
#[doc = "Bit 11"]
#[inline]
pub fn ic2if(&mut self) -> _IC2IFW {
_IC2IFW { w: self }
}
#[doc = "Bit 12"]
#[inline]
pub fn oc2if(&mut self) -> _OC2IFW {
_OC2IFW { w: self }
}
#[doc = "Bit 13"]
#[inline]
pub fn int2if(&mut self) -> _INT2IFW {
_INT2IFW { w: self }
}
#[doc = "Bit 14"]
#[inline]
pub fn t3if(&mut self) -> _T3IFW {
_T3IFW { w: self }
}
#[doc = "Bit 15"]
#[inline]
pub fn ic3eif(&mut self) -> _IC3EIFW {
_IC3EIFW { w: self }
}
#[doc = "Bit 16"]
#[inline]
pub fn ic3if(&mut self) -> _IC3IFW {
_IC3IFW { w: self }
}
#[doc = "Bit 17"]
#[inline]
pub fn oc3if(&mut self) -> _OC3IFW {
_OC3IFW { w: self }
}
#[doc = "Bit 18"]
#[inline]
pub fn int3if(&mut self) -> _INT3IFW {
_INT3IFW { w: self }
}
#[doc = "Bit 19"]
#[inline]
pub fn t4if(&mut self) -> _T4IFW {
_T4IFW { w: self }
}
#[doc = "Bit 20"]
#[inline]
pub fn ic4eif(&mut self) -> _IC4EIFW {
_IC4EIFW { w: self }
}
#[doc = "Bit 21"]
#[inline]
pub fn ic4if(&mut self) -> _IC4IFW {
_IC4IFW { w: self }
}
#[doc = "Bit 22"]
#[inline]
pub fn oc4if(&mut self) -> _OC4IFW {
_OC4IFW { w: self }
}
#[doc = "Bit 23"]
#[inline]
pub fn int4if(&mut self) -> _INT4IFW {
_INT4IFW { w: self }
}
#[doc = "Bit 24"]
#[inline]
pub fn t5if(&mut self) -> _T5IFW {
_T5IFW { w: self }
}
#[doc = "Bit 25"]
#[inline]
pub fn ic5eif(&mut self) -> _IC5EIFW {
_IC5EIFW { w: self }
}
#[doc = "Bit 26"]
#[inline]
pub fn ic5if(&mut self) -> _IC5IFW {
_IC5IFW { w: self }
}
#[doc = "Bit 27"]
#[inline]
pub fn oc5if(&mut self) -> _OC5IFW {
_OC5IFW { w: self }
}
#[doc = "Bit 28"]
#[inline]
pub fn ad1if(&mut self) -> _AD1IFW {
_AD1IFW { w: self }
}
#[doc = "Bit 29"]
#[inline]
pub fn fscmif(&mut self) -> _FSCMIFW {
_FSCMIFW { w: self }
}
#[doc = "Bit 30"]
#[inline]
pub fn rtccif(&mut self) -> _RTCCIFW {
_RTCCIFW { w: self }
}
#[doc = "Bit 31"]
#[inline]
pub fn fceif(&mut self) -> _FCEIFW {
_FCEIFW { w: self }
}
}
| _INT0IFW |
casual_test.go | package ru_test
import (
"testing"
"time"
"github.com/olebedev/when"
"github.com/olebedev/when/rules"
"github.com/olebedev/when/rules/ru"
)
func TestCasualDate(t *testing.T) {
fixt := []Fixture{
{"Это нужно сделать прямо сейчас", 33, "прямо сейчас", 0},
{"Это нужно сделать сегодня", 33, "сегодня", 0},
{"Это нужно сделать завтра вечером", 33, "завтра", time.Hour * 24},
{"Это нужно было сделать вчера вечером", 42, "вчера", -(time.Hour * 24)},
{"Это нужно сделать до завтра", 33, "до завтра", time.Hour * 24},
}
w := when.New(nil)
w.Add(ru.CasualDate(rules.Skip))
ApplyFixtures(t, "ru.CasualDate", w, fixt)
}
func TestCasualTime(t *testing.T) {
fixt := []Fixture{
{"Это нужно было сделать этим утром ", 42, "этим утром", 8 * time.Hour},
{"Это нужно сделать до обеда", 33, "до обеда", 12 * time.Hour},
{"Это нужно сделать после обеда", 33, "после обеда", 15 * time.Hour},
{"Это нужно сделать к вечеру", 33, "к вечеру", 18 * time.Hour},
{"вечером", 0, "вечером", 18 * time.Hour},
{"вечером", 0, "вечером", 18 * time.Hour},
}
w := when.New(nil) |
ApplyFixtures(t, "ru.CasualTime", w, fixt)
}
func TestCasualDateCasualTime(t *testing.T) {
fixt := []Fixture{
{"Это нужно сделать завтра после обеда", 33, "завтра после обеда", (15 + 24) * time.Hour},
{"Это нужно сделать завтра утром", 33, "завтра утром", (8 + 24) * time.Hour},
{"Это нужно было сделать вчера утром", 42, "вчера утром", (8 - 24) * time.Hour},
{"Это нужно было сделать вчера после обеда", 42, "вчера после обеда", (15 - 24) * time.Hour},
{"помыть окна до вечера", 22, "до вечера", 18 * time.Hour},
{"помыть окна до обеда", 22, "до обеда", 12 * time.Hour},
{"сделать это к вечеру", 22, "к вечеру", 18 * time.Hour},
{"помыть окна завтра утром", 22, "завтра утром", 32 * time.Hour},
{"написать письмо во вторник после обеда", 50, "после обеда", 15 * time.Hour},
{"написать письмо до утра ", 30, "до утра", 8 * time.Hour},
{"к вечеру", 0, "к вечеру", 18 * time.Hour},
}
w := when.New(nil)
w.Add(
ru.CasualDate(rules.Skip),
ru.CasualTime(rules.Override),
)
ApplyFixtures(t, "ru.CasualDate|ru.CasualTime", w, fixt)
} | w.Add(ru.CasualTime(rules.Skip)) |
decorators.py | import os
def configuration(f):
import click
from functools import update_wrapper |
@click.pass_context
def inner(ctx, *args, **kwargs):
# HACK: We can't call `configure()` from within tests
# since we don't load config files from disk, so we
# need a way to bypass this initialization step
if os.environ.get('_{{ cookiecutter.module_name|upper }}_SKIP_CONFIGURATION') != '1':
from {{ cookiecutter.module_name }}.runner import configure
configure()
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(inner, f) | |
v1_config_map_list.py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ConfigMapList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1ConfigMap]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1ConfigMapList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1ConfigMapList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1ConfigMapList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ConfigMapList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ConfigMapList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1ConfigMapList.
Items is the list of ConfigMaps.
:return: The items of this V1ConfigMapList.
:rtype: list[V1ConfigMap]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ConfigMapList.
Items is the list of ConfigMaps.
:param items: The items of this V1ConfigMapList.
:type: list[V1ConfigMap]
"""
if items is None:
raise ValueError('Invalid value for `items`, must not be `None`')
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1ConfigMapList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1ConfigMapList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ConfigMapList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ConfigMapList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1ConfigMapList.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1ConfigMapList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ConfigMapList.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1ConfigMapList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
|
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ConfigMapList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| """
For `print` and `pprint`
"""
return self.to_str() |
0002_user_last_login.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-22 07:13
from __future__ import unicode_literals
from django.db import migrations, models
class | (migrations.Migration):
dependencies = [
('instagram', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_login',
field=models.DateTimeField(auto_now=True),
),
]
| Migration |
jde_tracker.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py
"""
import numpy as np
from collections import defaultdict
from ..matching import jde_matching as matching
from ..motion import KalmanFilter
from .base_jde_tracker import TrackState, STrack
from .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks
__all__ = ['JDETracker']
class JDETracker(object):
__shared__ = ['num_classes']
"""
JDE tracker, support single class and multi classes
Args:
num_classes (int): the number of classes
det_thresh (float): threshold of detection score
track_buffer (int): buffer for tracker
min_box_area (int): min box area to filter out low quality boxes
vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
bad results. If set <0 means no need to filter bboxes,usually set
1.6 for pedestrian tracking.
tracked_thresh (float): linear assignment threshold of tracked
stracks and detections
r_tracked_thresh (float): linear assignment threshold of
tracked stracks and unmatched detections
unconfirmed_thresh (float): linear assignment threshold of
unconfirmed stracks and unmatched detections
motion (str): motion model, KalmanFilter as default
conf_thres (float): confidence threshold for tracking
metric_type (str): either "euclidean" or "cosine", the distance metric
used for measurement to track association.
"""
def __init__(self,
use_byte=False,
num_classes=1,
det_thresh=0.3,
track_buffer=30,
min_box_area=200,
vertical_ratio=1.6,
tracked_thresh=0.7,
r_tracked_thresh=0.5,
unconfirmed_thresh=0.7,
conf_thres=0,
match_thres=0.8,
low_conf_thres=0.2,
motion='KalmanFilter',
metric_type='euclidean'):
self.use_byte = use_byte
self.num_classes = num_classes
self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1
self.track_buffer = track_buffer
self.min_box_area = min_box_area
self.vertical_ratio = vertical_ratio
self.tracked_thresh = tracked_thresh
self.r_tracked_thresh = r_tracked_thresh
self.unconfirmed_thresh = unconfirmed_thresh
self.conf_thres = conf_thres
self.match_thres = match_thres
self.low_conf_thres = low_conf_thres
if motion == 'KalmanFilter':
self.motion = KalmanFilter()
self.metric_type = metric_type
self.frame_id = 0
self.tracked_tracks_dict = defaultdict(list) # dict(list[STrack])
self.lost_tracks_dict = defaultdict(list) # dict(list[STrack])
self.removed_tracks_dict = defaultdict(list) # dict(list[STrack])
self.max_time_lost = 0
# max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)
def update(self, pred_dets, pred_embs=None):
"""
Processes the image frame and finds bounding box(detections).
Associates the detection with corresponding tracklets and also handles
lost, removed, refound and active tracklets.
Args:
pred_dets (np.array): Detection results of the image, the shape is
[N, 6], means 'cls_id, score, x0, y0, x1, y1'.
pred_embs (np.array): Embedding results of the image, the shape is
[N, 128] or [N, 512].
Return:
output_stracks_dict (dict(list)): The list contains information
regarding the online_tracklets for the recieved image tensor.
"""
self.frame_id += 1
if self.frame_id == 1:
STrack.init_count(self.num_classes)
activated_tracks_dict = defaultdict(list)
refined_tracks_dict = defaultdict(list)
lost_tracks_dict = defaultdict(list)
removed_tracks_dict = defaultdict(list)
output_tracks_dict = defaultdict(list)
pred_dets_dict = defaultdict(list)
pred_embs_dict = defaultdict(list)
# unify single and multi classes detection and embedding results
for cls_id in range(self.num_classes):
cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)
pred_dets_dict[cls_id] = pred_dets[cls_idx]
if pred_embs is not None:
pred_embs_dict[cls_id] = pred_embs[cls_idx]
else:
pred_embs_dict[cls_id] = None
for cls_id in range(self.num_classes):
""" Step 1: Get detections by class"""
pred_dets_cls = pred_dets_dict[cls_id]
pred_embs_cls = pred_embs_dict[cls_id]
remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)
if remain_inds.sum() > 0:
pred_dets_cls = pred_dets_cls[remain_inds]
if self.use_byte:
detections = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[2:6]),
tlbrs[1],
cls_id,
30,
temp_feat=None) for tlbrs in pred_dets_cls
]
else:
pred_embs_cls = pred_embs_cls[remain_inds]
detections = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,
30, temp_feat)
for (tlbrs, temp_feat
) in zip(pred_dets_cls, pred_embs_cls)
]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed_dict = defaultdict(list)
tracked_tracks_dict = defaultdict(list)
for track in self.tracked_tracks_dict[cls_id]:
if not track.is_activated:
# previous tracks which are not active in the current frame are added in unconfirmed list
unconfirmed_dict[cls_id].append(track)
else:
# Active tracks are added to the local list 'tracked_stracks'
tracked_tracks_dict[cls_id].append(track)
""" Step 2: First association, with embedding"""
# building tracking pool for the current frame
track_pool_dict = defaultdict(list)
track_pool_dict[cls_id] = joint_stracks(
tracked_tracks_dict[cls_id], self.lost_tracks_dict[cls_id])
# Predict the current location with KalmanFilter
STrack.multi_predict(track_pool_dict[cls_id], self.motion)
if self.use_byte:
dists = matching.iou_distance(track_pool_dict[cls_id],
detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.match_thres) # not self.tracked_thresh
else:
dists = matching.embedding_distance(
track_pool_dict[cls_id],
detections,
metric=self.metric_type)
dists = matching.fuse_motion(
self.motion, dists, track_pool_dict[cls_id], detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.tracked_thresh)
for i_tracked, idet in matches:
# i_tracked is the id of the track and idet is the detection
track = track_pool_dict[cls_id][i_tracked] | track.update(detections[idet], self.frame_id)
activated_tracks_dict[cls_id].append(track)
else:
# We have obtained a detection from a track which is not active,
# hence put the track in refind_stracks list
track.re_activate(det, self.frame_id, new_id=False)
refined_tracks_dict[cls_id].append(track)
# None of the steps below happen if there are no undetected tracks.
""" Step 3: Second association, with IOU"""
if self.use_byte:
inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres
inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres
inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)
pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]
# association the untrack to the low score detections
if len(pred_dets_cls_second) > 0:
detections_second = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[:4]),
tlbrs[4],
cls_id,
30,
temp_feat=None)
for tlbrs in pred_dets_cls_second[:, :5]
]
else:
detections_second = []
r_tracked_stracks = [
track_pool_dict[cls_id][i] for i in u_track
if track_pool_dict[cls_id][i].state == TrackState.Tracked
]
dists = matching.iou_distance(r_tracked_stracks,
detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(
dists, thresh=0.4) # not r_tracked_thresh
else:
detections = [detections[i] for i in u_detection]
r_tracked_stracks = []
for i in u_track:
if track_pool_dict[cls_id][i].state == TrackState.Tracked:
r_tracked_stracks.append(track_pool_dict[cls_id][i])
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.r_tracked_thresh)
for i_tracked, idet in matches:
track = r_tracked_stracks[i_tracked]
det = detections[
idet] if not self.use_byte else detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_tracks_dict[cls_id].append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refined_tracks_dict[cls_id].append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_tracks_dict[cls_id].append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed_dict[cls_id], detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(
dists, thresh=self.unconfirmed_thresh)
for i_tracked, idet in matches:
unconfirmed_dict[cls_id][i_tracked].update(detections[idet],
self.frame_id)
activated_tracks_dict[cls_id].append(unconfirmed_dict[cls_id][
i_tracked])
for it in u_unconfirmed:
track = unconfirmed_dict[cls_id][it]
track.mark_removed()
removed_tracks_dict[cls_id].append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.motion, self.frame_id)
activated_tracks_dict[cls_id].append(track)
""" Step 5: Update state"""
for track in self.lost_tracks_dict[cls_id]:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_tracks_dict[cls_id].append(track)
self.tracked_tracks_dict[cls_id] = [
t for t in self.tracked_tracks_dict[cls_id]
if t.state == TrackState.Tracked
]
self.tracked_tracks_dict[cls_id] = joint_stracks(
self.tracked_tracks_dict[cls_id], activated_tracks_dict[cls_id])
self.tracked_tracks_dict[cls_id] = joint_stracks(
self.tracked_tracks_dict[cls_id], refined_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id] = sub_stracks(
self.lost_tracks_dict[cls_id], self.tracked_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id].extend(lost_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id] = sub_stracks(
self.lost_tracks_dict[cls_id], self.removed_tracks_dict[cls_id])
self.removed_tracks_dict[cls_id].extend(removed_tracks_dict[cls_id])
self.tracked_tracks_dict[cls_id], self.lost_tracks_dict[
cls_id] = remove_duplicate_stracks(
self.tracked_tracks_dict[cls_id],
self.lost_tracks_dict[cls_id])
# get scores of lost tracks
output_tracks_dict[cls_id] = [
track for track in self.tracked_tracks_dict[cls_id]
if track.is_activated
]
return output_tracks_dict | det = detections[idet]
if track.state == TrackState.Tracked:
# If the track is active, add the detection to the track |
utils.py | # -*- coding: utf-8 -*-
""" OneLogin_Saml2_Utils class
Copyright (c) 2010-2021 OneLogin, Inc.
MIT License
Auxiliary class of OneLogin's Python Toolkit.
"""
import base64
import warnings
from copy import deepcopy
import calendar
from datetime import datetime
from hashlib import sha1, sha256, sha384, sha512
from isodate import parse_duration as duration_parser
import re
from textwrap import wrap
from functools import wraps
from uuid import uuid4
from xml.dom.minidom import Element
import zlib
import xmlsec
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.errors import OneLogin_Saml2_Error, OneLogin_Saml2_ValidationError
from onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
from urllib.parse import quote_plus, urlsplit, urlunsplit # py3
except ImportError:
from urlparse import urlsplit, urlunsplit
from urllib import quote_plus # py2
def return_false_on_exception(func):
"""
Decorator. When applied to a function, it will, by default, suppress any exceptions
raised by that function and return False. It may be overridden by passing a
"raise_exceptions" keyword argument when calling the wrapped function.
"""
@wraps(func)
def exceptfalse(*args, **kwargs):
if not kwargs.pop('raise_exceptions', False):
try:
return func(*args, **kwargs)
except Exception:
return False
else:
return func(*args, **kwargs)
return exceptfalse
class OneLogin_Saml2_Utils(object):
"""
Auxiliary class that contains several utility methods to parse time,
urls, add sign, encrypt, decrypt, sign validation, handle xml ...
"""
RESPONSE_SIGNATURE_XPATH = '/samlp:Response/ds:Signature'
ASSERTION_SIGNATURE_XPATH = '/samlp:Response/saml:Assertion/ds:Signature'
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
TIME_FORMAT_2 = "%Y-%m-%dT%H:%M:%S.%fZ"
TIME_FORMAT_WITH_FRAGMENT = re.compile(r'^(\d{4,4}-\d{2,2}-\d{2,2}T\d{2,2}:\d{2,2}:\d{2,2})(\.\d*)?Z?$')
@staticmethod
def escape_url(url, lowercase_urlencoding=False):
"""
escape the non-safe symbols in url
The encoding used by ADFS 3.0 is not compatible with
python's quote_plus (ADFS produces lower case hex numbers and quote_plus produces
upper case hex numbers)
:param url: the url to escape
:type url: str
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean
:return: the escaped url
:rtype str
"""
encoded = quote_plus(url)
return re.sub(r"%[A-F0-9]{2}", lambda m: m.group(0).lower(), encoded) if lowercase_urlencoding else encoded
@staticmethod
def b64encode(data):
"""base64 encode"""
return compat.to_string(base64.b64encode(compat.to_bytes(data)))
@staticmethod
def b64decode(data):
"""base64 decode"""
return base64.b64decode(data)
@staticmethod
def decode_base64_and_inflate(value, ignore_zip=False):
"""
base64 decodes and then inflates according to RFC1951
:param value: a deflated and encoded string
:type value: string
:param ignore_zip: ignore zip errors
:returns: the string after decoding and inflating
:rtype: string
"""
encoded = OneLogin_Saml2_Utils.b64decode(value)
try:
return zlib.decompress(encoded, -15)
except zlib.error:
if not ignore_zip:
raise
return encoded
@staticmethod
def deflate_and_base64_encode(value):
"""
Deflates and then base64 encodes a string
:param value: The string to deflate and encode
:type value: string
:returns: The deflated and encoded string
:rtype: string
"""
return OneLogin_Saml2_Utils.b64encode(zlib.compress(compat.to_bytes(value))[2:-4])
@staticmethod
def format_cert(cert, heads=True):
"""
Returns a x509 cert (adding header & footer if required).
:param cert: A x509 unformatted cert
:type: string
:param heads: True if we want to include head and footer
:type: boolean
:returns: Formatted cert
:rtype: string
"""
x509_cert = cert.replace('\x0D', '')
x509_cert = x509_cert.replace('\r', '')
x509_cert = x509_cert.replace('\n', '')
if len(x509_cert) > 0:
x509_cert = x509_cert.replace('-----BEGIN CERTIFICATE-----', '')
x509_cert = x509_cert.replace('-----END CERTIFICATE-----', '')
x509_cert = x509_cert.replace(' ', '')
if heads:
x509_cert = "-----BEGIN CERTIFICATE-----\n" + "\n".join(wrap(x509_cert, 64)) + "\n-----END CERTIFICATE-----\n"
return x509_cert
@staticmethod
def format_private_key(key, heads=True):
"""
Returns a private key (adding header & footer if required).
:param key A private key
:type: string
:param heads: True if we want to include head and footer
:type: boolean
:returns: Formated private key
:rtype: string
"""
private_key = key.replace('\x0D', '')
private_key = private_key.replace('\r', '')
private_key = private_key.replace('\n', '')
if len(private_key) > 0:
if private_key.find('-----BEGIN PRIVATE KEY-----') != -1:
private_key = private_key.replace('-----BEGIN PRIVATE KEY-----', '')
private_key = private_key.replace('-----END PRIVATE KEY-----', '')
private_key = private_key.replace(' ', '')
if heads:
private_key = "-----BEGIN PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END PRIVATE KEY-----\n"
elif private_key.find('-----BEGIN ENCRYPTED PRIVATE KEY-----') != -1:
private_key = private_key.replace('-----BEGIN ENCRYPTED PRIVATE KEY-----', '')
private_key = private_key.replace('-----END ENCRYPTED PRIVATE KEY-----', '')
private_key = private_key.replace(' ', '')
if heads:
private_key = "-----BEGIN ENCRYPTED PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END ENCRYPTED PRIVATE KEY-----\n"
else:
private_key = private_key.replace('-----BEGIN RSA PRIVATE KEY-----', '')
private_key = private_key.replace('-----END RSA PRIVATE KEY-----', '')
private_key = private_key.replace(' ', '')
if heads:
private_key = "-----BEGIN RSA PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END RSA PRIVATE KEY-----\n"
return private_key
@staticmethod
def redirect(url, parameters={}, request_data={}):
"""
Executes a redirection to the provided url (or return the target url).
:param url: The target url
:type: string
:param parameters: Extra parameters to be passed as part of the url
:type: dict
:param request_data: The request as a dict
:type: dict
:returns: Url
:rtype: string
"""
assert isinstance(url, compat.str_type)
assert isinstance(parameters, dict)
if url.startswith('/'):
url = '%s%s' % (OneLogin_Saml2_Utils.get_self_url_host(request_data), url)
# Verify that the URL is to a http or https site.
if re.search('^https?://', url, flags=re.IGNORECASE) is None:
raise OneLogin_Saml2_Error(
'Redirect to invalid URL: ' + url,
OneLogin_Saml2_Error.REDIRECT_INVALID_URL
)
# Add encoded parameters
if url.find('?') < 0:
param_prefix = '?'
else:
param_prefix = '&'
for name, value in parameters.items():
if value is None:
param = OneLogin_Saml2_Utils.escape_url(name)
elif isinstance(value, list):
param = ''
for val in value:
param += OneLogin_Saml2_Utils.escape_url(name) + '[]=' + OneLogin_Saml2_Utils.escape_url(val) + '&'
if len(param) > 0:
param = param[0:-1]
else:
param = OneLogin_Saml2_Utils.escape_url(name) + '=' + OneLogin_Saml2_Utils.escape_url(value)
if param:
url += param_prefix + param
param_prefix = '&'
return url
@staticmethod
def get_self_url_host(request_data):
"""
Returns the protocol + the current host + the port (if different than
common ports).
:param request_data: The request as a dict
:type: dict
:return: Url
:rtype: string
"""
current_host = OneLogin_Saml2_Utils.get_self_host(request_data)
protocol = 'https' if OneLogin_Saml2_Utils.is_https(request_data) else 'http'
if request_data.get('server_port') is not None:
warnings.warn(
'The server_port key in request data is deprecated. '
'The http_host key should include a port, if required.',
category=DeprecationWarning,
)
port_suffix = ':%s' % request_data['server_port']
if not current_host.endswith(port_suffix):
if not ((protocol == 'https' and port_suffix == ':443') or (protocol == 'http' and port_suffix == ':80')):
current_host += port_suffix
return '%s://%s' % (protocol, current_host)
@staticmethod
def get_self_host(request_data):
"""
Returns the current host (which may include a port number part).
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
"""
if 'http_host' in request_data:
return request_data['http_host']
elif 'server_name' in request_data:
warnings.warn("The server_name key in request data is undocumented & deprecated.", category=DeprecationWarning)
return request_data['server_name']
raise Exception('No hostname defined')
@staticmethod
def is_https(request_data):
"""
Checks if https or http.
:param request_data: The request as a dict
:type: dict
:return: False if https is not active
:rtype: boolean
"""
is_https = 'https' in request_data and request_data['https'] != 'off'
# TODO: this use of server_port should be removed too
is_https = is_https or ('server_port' in request_data and str(request_data['server_port']) == '443')
return is_https
@staticmethod
def get_self_url_no_query(request_data):
"""
Returns the URL of the current host + current view.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
script_name = request_data['script_name']
if script_name:
if script_name[0] != '/':
script_name = '/' + script_name
else:
script_name = ''
self_url_no_query = self_url_host + script_name
if 'path_info' in request_data:
self_url_no_query += request_data['path_info']
return self_url_no_query
@staticmethod
def get_self_routed_url_no_query(request_data):
"""
Returns the routed URL of the current host + current view.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
route = ''
if 'request_uri' in request_data and request_data['request_uri']:
route = request_data['request_uri']
if 'query_string' in request_data and request_data['query_string']:
route = route.replace(request_data['query_string'], '')
return self_url_host + route
@staticmethod
def get_self_url(request_data):
"""
Returns the URL of the current host + current view + query.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view + query
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
request_uri = ''
if 'request_uri' in request_data:
request_uri = request_data['request_uri']
if not request_uri.startswith('/'):
match = re.search('^https?://[^/]*(/.*)', request_uri)
if match is not None:
request_uri = match.groups()[0]
return self_url_host + request_uri
@staticmethod
def generate_unique_id():
"""
Generates an unique string (used for example as ID for assertions).
:return: A unique string
:rtype: string
"""
return 'ONELOGIN_%s' % sha1(compat.to_bytes(uuid4().hex)).hexdigest()
@staticmethod
def parse_time_to_SAML(time):
r"""
Converts a UNIX timestamp to SAML2 timestamp on the form
yyyy-mm-ddThh:mm:ss(\.s+)?Z.
:param time: The time we should convert (DateTime).
:type: string
:return: SAML2 timestamp.
:rtype: string
"""
data = datetime.utcfromtimestamp(float(time))
return data.strftime(OneLogin_Saml2_Utils.TIME_FORMAT)
@staticmethod
def parse_SAML_to_time(timestr):
r"""
Converts a SAML2 timestamp on the form yyyy-mm-ddThh:mm:ss(\.s+)?Z
to a UNIX timestamp. The sub-second part is ignored.
:param timestr: The time we should convert (SAML Timestamp).
:type: string
:return: Converted to a unix timestamp.
:rtype: int
"""
try:
data = datetime.strptime(timestr, OneLogin_Saml2_Utils.TIME_FORMAT)
except ValueError:
try:
data = datetime.strptime(timestr, OneLogin_Saml2_Utils.TIME_FORMAT_2)
except ValueError:
elem = OneLogin_Saml2_Utils.TIME_FORMAT_WITH_FRAGMENT.match(timestr)
if not elem:
raise Exception("time data %s does not match format %s" % (timestr, r'yyyy-mm-ddThh:mm:ss(\.s+)?Z'))
data = datetime.strptime(elem.groups()[0] + "Z", OneLogin_Saml2_Utils.TIME_FORMAT)
return calendar.timegm(data.utctimetuple())
@staticmethod
def now():
"""
:return: unix timestamp of actual time.
:rtype: int
"""
return calendar.timegm(datetime.utcnow().utctimetuple())
@staticmethod
def parse_duration(duration, timestamp=None):
"""
Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int
"""
assert isinstance(duration, compat.str_type)
assert timestamp is None or isinstance(timestamp, int)
timedelta = duration_parser(duration)
if timestamp is None:
data = datetime.utcnow() + timedelta
else:
data = datetime.utcfromtimestamp(timestamp) + timedelta
return calendar.timegm(data.utctimetuple())
@staticmethod
def get_expire_time(cache_duration=None, valid_until=None):
"""
Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int
"""
expire_time = None
if cache_duration is not None:
expire_time = OneLogin_Saml2_Utils.parse_duration(cache_duration)
if valid_until is not None:
if isinstance(valid_until, int):
valid_until_time = valid_until
else:
valid_until_time = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
if expire_time is None or expire_time > valid_until_time:
expire_time = valid_until_time
if expire_time is not None:
return '%d' % expire_time
return None
@staticmethod
def delete_local_session(callback=None):
"""
Deletes the local session.
"""
if callback is not None:
callback()
@staticmethod
def calculate_x509_fingerprint(x509_cert, alg='sha1'):
"""
Calculates the fingerprint of a formatted x509cert.
:param x509_cert: x509 cert formatted
:type: string
:param alg: The algorithm to build the fingerprint
:type: string
:returns: fingerprint
:rtype: string
"""
assert isinstance(x509_cert, compat.str_type)
lines = x509_cert.split('\n')
data = ''
inData = False
for line in lines:
# Remove '\r' from end of line if present.
line = line.rstrip()
if not inData:
if line == '-----BEGIN CERTIFICATE-----':
inData = True
elif line == '-----BEGIN PUBLIC KEY-----' or line == '-----BEGIN RSA PRIVATE KEY-----':
# This isn't an X509 certificate.
return None
else:
if line == '-----END CERTIFICATE-----':
break
# Append the current line to the certificate data.
data += line
if not data:
return None
decoded_data = base64.b64decode(compat.to_bytes(data))
if alg == 'sha512':
fingerprint = sha512(decoded_data)
elif alg == 'sha384':
fingerprint = sha384(decoded_data)
elif alg == 'sha256':
fingerprint = sha256(decoded_data)
else:
fingerprint = sha1(decoded_data)
return fingerprint.hexdigest().lower()
@staticmethod
def format_finger_print(fingerprint):
|
@classmethod
def generate_name_id(cls, value, sp_nq, sp_format=None, cert=None, debug=False, nq=None):
"""
Generates a nameID.
:param value: fingerprint
:type: string
:param sp_nq: SP Name Qualifier
:type: string
:param sp_format: SP Format
:type: string
:param cert: IdP Public Cert to encrypt the nameID
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:returns: DOMElement | XMLSec nameID
:rtype: string
:param nq: IDP Name Qualifier
:type: string
"""
name_id = OneLogin_Saml2_XML.make_root('{%s}NameID' % OneLogin_Saml2_Constants.NS_SAML)
if sp_nq is not None:
name_id.set('SPNameQualifier', sp_nq)
if sp_format is not None:
name_id.set('Format', sp_format)
if nq is not None:
name_id.set('NameQualifier', nq)
name_id.text = value
if cert is not None:
return '<saml:EncryptedID>' + cls.encrypt_element(name_id, cert, debug=debug) + '</saml:EncryptedID>'
else:
root = OneLogin_Saml2_XML.make_root("{%s}container" % OneLogin_Saml2_Constants.NS_SAML)
root.append(name_id)
return OneLogin_Saml2_XML.extract_tag_text(root, "saml:NameID")
@classmethod
def get_status(cls, dom):
"""
Gets Status from a Response.
:param dom: The Response as XML
:type: Document
:returns: The Status, an array with the code and a message. 'code' entry is the
topmost StatusCode, and 'codes' entry contains the StatusCodes in document
order.
:rtype: dict
"""
doc = OneLogin_Saml2_XML.query(dom, '/samlp:Response')
if len(doc) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS
)
return cls.get_specific_status(doc[0])
@staticmethod
def get_specific_status(doc):
status = {}
status_entry = OneLogin_Saml2_XML.query(doc, './samlp:Status')
if len(status_entry) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS
)
code_entries = OneLogin_Saml2_XML.query(doc, './/samlp:StatusCode', status_entry[0])
if not code_entries:
raise OneLogin_Saml2_ValidationError(
'Missing Status Code on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS_CODE
)
status['codes'] = [c.get('Value') for c in code_entries]
status['code'] = status['codes'][0]
status['msg'] = ''
message_entry = OneLogin_Saml2_XML.query(doc, './samlp:StatusMessage', status_entry[0])
if len(message_entry) == 0:
subcode_entry = OneLogin_Saml2_XML.query(doc, './samlp:StatusCode/samlp:StatusCode', status_entry[0])
if len(subcode_entry) == 1:
status['msg'] = subcode_entry[0].values()[0]
elif len(message_entry) == 1:
status['msg'] = OneLogin_Saml2_XML.element_text(message_entry[0])
return status
@staticmethod
def decrypt_element(encrypted_data, key, key_passphrase=None, debug=False, inplace=False):
"""
Decrypts an encrypted element.
:param encrypted_data: The encrypted data.
:type: lxml.etree.Element | DOMElement | basestring
:param key: The key.
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param inplace: update passed data with decrypted result
:type: bool
:returns: The decrypted element.
:rtype: lxml.etree.Element
"""
if isinstance(encrypted_data, Element):
encrypted_data = OneLogin_Saml2_XML.to_etree(str(encrypted_data.toxml()))
if not inplace and isinstance(encrypted_data, OneLogin_Saml2_XML._element_class):
encrypted_data = deepcopy(encrypted_data)
elif isinstance(encrypted_data, OneLogin_Saml2_XML._text_class):
encrypted_data = OneLogin_Saml2_XML._parse_etree(encrypted_data)
xmlsec.enable_debug_trace(debug)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, key_passphrase))
enc_ctx = xmlsec.EncryptionContext(manager)
return enc_ctx.decrypt(encrypted_data)
@staticmethod
def encrypt_element(data, cert, debug=False):
"""
Generates a nameID.
:param value: data
:type: lxml.etree.Element | DOMElement | basestring
:param cert: IdP Public Cert to encrypt the nameID
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:returns: DOMElement | XMLSec nameID
:rtype: string
:param nq: IDP Name Qualifier
:type: string
"""
xmlsec.enable_debug_trace(debug)
root = OneLogin_Saml2_XML.make_root("{%s}container" % OneLogin_Saml2_Constants.NS_SAML)
root.append(data)
# Load the public cert
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None))
# Prepare for encryption
enc_data = xmlsec.template.encrypted_data_create(
root, xmlsec.Transform.AES128, type=xmlsec.EncryptionType.ELEMENT, ns="xenc")
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
key_info = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns="dsig")
enc_key = xmlsec.template.add_encrypted_key(key_info, xmlsec.Transform.RSA_OAEP)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_key)
# Encrypt!
enc_ctx = xmlsec.EncryptionContext(manager)
enc_ctx.key = xmlsec.Key.generate(xmlsec.KeyData.AES, 128, xmlsec.KeyDataType.SESSION)
enc_data = enc_ctx.encrypt_xml(enc_data, data)
return compat.to_string(OneLogin_Saml2_XML.to_string(enc_data))
@staticmethod
def add_sign(xml, key, cert, debug=False, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1, key_passphrase=None):
"""
Adds signature key and senders certificate to an element (Message or
Assertion).
:param xml: The element we should sign
:type: string | Document
:param key: The private key
:type: string
:param cert: The public
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
:returns: Signed XML
:rtype: string
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1)
signature = xmlsec.template.create(elem, xmlsec.Transform.EXCL_C14N, sign_algorithm_transform, ns='ds')
issuer = OneLogin_Saml2_XML.query(elem, '//saml:Issuer')
if len(issuer) > 0:
issuer = issuer[0]
issuer.addnext(signature)
elem_to_sign = issuer.getparent()
else:
entity_descriptor = OneLogin_Saml2_XML.query(elem, '//md:EntityDescriptor')
if len(entity_descriptor) > 0:
elem.insert(0, signature)
else:
elem[0].insert(0, signature)
elem_to_sign = elem
elem_id = elem_to_sign.get('ID', None)
if elem_id is not None:
if elem_id:
elem_id = '#' + elem_id
else:
generated_id = generated_id = OneLogin_Saml2_Utils.generate_unique_id()
elem_id = '#' + generated_id
elem_to_sign.attrib['ID'] = generated_id
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem_to_sign, ["ID"])
digest_algorithm_transform_map = {
OneLogin_Saml2_Constants.SHA1: xmlsec.Transform.SHA1,
OneLogin_Saml2_Constants.SHA256: xmlsec.Transform.SHA256,
OneLogin_Saml2_Constants.SHA384: xmlsec.Transform.SHA384,
OneLogin_Saml2_Constants.SHA512: xmlsec.Transform.SHA512
}
digest_algorithm_transform = digest_algorithm_transform_map.get(digest_algorithm, xmlsec.Transform.SHA1)
ref = xmlsec.template.add_reference(signature, digest_algorithm_transform, uri=elem_id)
xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)
xmlsec.template.add_transform(ref, xmlsec.Transform.EXCL_C14N)
key_info = xmlsec.template.ensure_key_info(signature)
xmlsec.template.add_x509_data(key_info)
dsig_ctx = xmlsec.SignatureContext()
sign_key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, key_passphrase)
sign_key.load_cert_from_memory(cert, xmlsec.KeyFormat.PEM)
dsig_ctx.key = sign_key
dsig_ctx.sign(signature)
return OneLogin_Saml2_XML.to_string(elem)
@staticmethod
@return_false_on_exception
def validate_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False, xpath=None, multicerts=None):
"""
Validates a signature (Message or Assertion).
:param xml: The element we should validate
:type: string | Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
:param xpath: The xpath of the signed element
:type: string
:param multicerts: Multiple public certs
:type: list
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
if xpath:
signature_nodes = OneLogin_Saml2_XML.query(elem, xpath)
else:
signature_nodes = OneLogin_Saml2_XML.query(elem, OneLogin_Saml2_Utils.RESPONSE_SIGNATURE_XPATH)
if len(signature_nodes) == 0:
signature_nodes = OneLogin_Saml2_XML.query(elem, OneLogin_Saml2_Utils.ASSERTION_SIGNATURE_XPATH)
if len(signature_nodes) == 1:
signature_node = signature_nodes[0]
if not multicerts:
return OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug, raise_exceptions=True)
else:
# If multiple certs are provided, I may ignore cert and
# fingerprint provided by the method and just check the
# certs multicerts
fingerprint = fingerprintalg = None
for cert in multicerts:
if OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, False, raise_exceptions=False):
return True
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. SAML Response rejected.',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
else:
raise OneLogin_Saml2_ValidationError(
'Expected exactly one signature node; got {}.'.format(len(signature_nodes)),
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_SIGNATURES
)
@staticmethod
@return_false_on_exception
def validate_metadata_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature of a EntityDescriptor.
:param xml: The element we should validate
:type: string | Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
signature_nodes = OneLogin_Saml2_XML.query(elem, '/md:EntitiesDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:SPSSODescriptor/ds:Signature')
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:IDPSSODescriptor/ds:Signature')
if len(signature_nodes) > 0:
for signature_node in signature_nodes:
# Raises expection if invalid
OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug, raise_exceptions=True)
return True
else:
raise Exception('Could not validate metadata signature: No signature nodes found.')
@staticmethod
@return_false_on_exception
def validate_node_sign(signature_node, elem, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature node.
:param signature_node: The signature node
:type: Node
:param xml: The element we should validate
:type: Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if (cert is None or cert == '') and fingerprint:
x509_certificate_nodes = OneLogin_Saml2_XML.query(signature_node, '//ds:Signature/ds:KeyInfo/ds:X509Data/ds:X509Certificate')
if len(x509_certificate_nodes) > 0:
x509_certificate_node = x509_certificate_nodes[0]
x509_cert_value = OneLogin_Saml2_XML.element_text(x509_certificate_node)
x509_cert_value_formatted = OneLogin_Saml2_Utils.format_cert(x509_cert_value)
x509_fingerprint_value = OneLogin_Saml2_Utils.calculate_x509_fingerprint(x509_cert_value_formatted, fingerprintalg)
if fingerprint == x509_fingerprint_value:
cert = x509_cert_value_formatted
if cert is None or cert == '':
raise OneLogin_Saml2_Error(
'Could not validate node signature: No certificate provided.',
OneLogin_Saml2_Error.CERT_NOT_FOUND
)
# Check if Reference URI is empty
# reference_elem = OneLogin_Saml2_XML.query(signature_node, '//ds:Reference')
# if len(reference_elem) > 0:
# if reference_elem[0].get('URI') == '':
# reference_elem[0].set('URI', '#%s' % signature_node.getparent().get('ID'))
if validatecert:
manager = xmlsec.KeysManager()
manager.load_cert_from_memory(cert, xmlsec.KeyFormat.CERT_PEM, xmlsec.KeyDataType.TRUSTED)
dsig_ctx = xmlsec.SignatureContext(manager)
else:
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None)
dsig_ctx.set_enabled_key_data([xmlsec.KeyData.X509])
try:
dsig_ctx.verify(signature_node)
except Exception as err:
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. SAML Response rejected. %s',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE,
str(err)
)
return True
@staticmethod
def sign_binary(msg, key, algorithm=xmlsec.Transform.RSA_SHA1, debug=False, key_passphrase=None):
"""
Sign binary message
:param msg: The element we should validate
:type: bytes
:param key: The private key
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:return signed message
:rtype str
"""
if isinstance(msg, str):
msg = msg.encode('utf8')
xmlsec.enable_debug_trace(debug)
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, key_passphrase)
return dsig_ctx.sign_binary(compat.to_bytes(msg), algorithm)
@staticmethod
def validate_binary_sign(signed_query, signature, cert=None, algorithm=OneLogin_Saml2_Constants.RSA_SHA1, debug=False):
"""
Validates signed binary data (Used to validate GET Signature).
:param signed_query: The element we should validate
:type: string
:param signature: The signature that will be validate
:type: string
:param cert: The public cert
:type: string
:param algorithm: Signature algorithm
:type: string
:param debug: Activate the xmlsec debug
:type: bool
"""
try:
xmlsec.enable_debug_trace(debug)
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None)
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(algorithm, xmlsec.Transform.RSA_SHA1)
dsig_ctx.verify_binary(compat.to_bytes(signed_query),
sign_algorithm_transform,
compat.to_bytes(signature))
return True
except xmlsec.Error as e:
if debug:
print(e)
return False
@staticmethod
def normalize_url(url):
"""
Returns normalized URL for comparison.
This method converts the netloc to lowercase, as it should be case-insensitive (per RFC 4343, RFC 7617)
If standardization fails, the original URL is returned
Python documentation indicates that URL split also normalizes query strings if empty query fields are present
:param url: URL
:type url: String
:returns: A normalized URL, or the given URL string if parsing fails
:rtype: String
"""
try:
scheme, netloc, path, query, fragment = urlsplit(url)
normalized_url = urlunsplit((scheme.lower(), netloc.lower(), path, query, fragment))
return normalized_url
except Exception:
return url
| """
Formats a fingerprint.
:param fingerprint: fingerprint
:type: string
:returns: Formatted fingerprint
:rtype: string
"""
formatted_fingerprint = fingerprint.replace(':', '')
return formatted_fingerprint.lower() |
client.rs | // Cadence - An extensible Statsd client for Rust!
//
// Copyright 2015-2021 Nick Pillitteri
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::builder::{MetricBuilder, MetricFormatter, MetricValue};
use crate::compat::Compat;
use crate::sealed::Sealed;
use crate::sinks::{MetricSink, UdpMetricSink};
use crate::types::{
Counter, Distribution, ErrorKind, Gauge, Histogram, Meter, Metric, MetricError, MetricResult, Set, Timer,
};
use std::fmt;
use std::net::{ToSocketAddrs, UdpSocket};
use std::panic::RefUnwindSafe;
use std::time::Duration;
use std::u64;
/// Conversion trait for valid values for counters
///
/// This trait must be implemented for any types that are used as counter
/// values (currently only `i64`). This trait is internal to how values are
/// formatted as part of metrics but is exposed publicly for documentation
/// purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToCounterValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToCounterValue for i64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Signed(self))
}
}
/// Conversion trait for valid values for timers
///
/// This trait must be implemented for any types that are used as timer
/// values (currently `u64`, `Duration`, and `Vec`s of those types).
/// This trait is internal to how values are formatted as part of metrics
/// but is exposed publicly for documentation purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToTimerValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToTimerValue for u64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Unsigned(self))
}
}
impl ToTimerValue for Vec<u64> {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::PackedUnsigned(self))
}
}
impl ToTimerValue for Duration {
fn try_to_value(self) -> MetricResult<MetricValue> {
let as_millis = self.as_millis();
if as_millis > u64::MAX as u128 {
Err(MetricError::from((ErrorKind::InvalidInput, "u64 overflow")))
} else {
Ok(MetricValue::Unsigned(as_millis as u64))
}
}
}
impl ToTimerValue for Vec<Duration> {
fn try_to_value(self) -> MetricResult<MetricValue> {
if self.iter().any(|x| x.as_millis() > u64::MAX as u128) {
Err(MetricError::from((ErrorKind::InvalidInput, "u64 overflow")))
} else {
Ok(MetricValue::PackedUnsigned(
self.iter().map(|x| x.as_millis() as u64).collect(),
))
}
}
}
/// Conversion trait for valid values for gauges
///
/// This trait must be implemented for any types that are used as gauge
/// values (currently `u64` and `f64`). This trait is internal to how values
/// are formatted as part of metrics but is exposed publicly for documentation
/// purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToGaugeValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToGaugeValue for u64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Unsigned(self))
}
}
impl ToGaugeValue for f64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Float(self))
}
}
/// Conversion trait for valid values for meters
///
/// This trait must be implemented for any types that are used as meter
/// values (currently only `u64`). This trait is internal to how values are
/// formatted as part of metrics but is exposed publicly for documentation
/// purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToMeterValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToMeterValue for u64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Unsigned(self))
}
}
/// Conversion trait for valid values for histograms
///
/// This trait must be implemented for any types that are used as histogram
/// values (currently `u64`, `f64`, `Duration`, and `Vec`s of those types).
/// This trait is internal to how values are formatted as part of metrics
/// but is exposed publicly for documentation purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToHistogramValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToHistogramValue for u64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Unsigned(self))
}
}
impl ToHistogramValue for f64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Float(self))
}
}
impl ToHistogramValue for Duration {
fn try_to_value(self) -> MetricResult<MetricValue> {
let as_nanos = self.as_nanos();
if as_nanos > u64::MAX as u128 {
Err(MetricError::from((ErrorKind::InvalidInput, "u64 overflow")))
} else {
Ok(MetricValue::Unsigned(as_nanos as u64))
}
}
}
impl ToHistogramValue for Vec<u64> {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::PackedUnsigned(self))
}
}
impl ToHistogramValue for Vec<f64> {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::PackedFloat(self))
}
}
impl ToHistogramValue for Vec<Duration> {
fn try_to_value(self) -> MetricResult<MetricValue> {
if self.iter().any(|x| x.as_nanos() > u64::MAX as u128) {
Err(MetricError::from((ErrorKind::InvalidInput, "u64 overflow")))
} else {
Ok(MetricValue::PackedUnsigned(
self.iter().map(|x| x.as_nanos() as u64).collect(),
))
}
}
}
/// Conversion trait for valid values for distributions
///
/// This trait must be implemented for any types that are used as distribution
/// values (currently `u64`, `f64`, and `Vec`s of those types). This trait is
/// internal to how values are formatted as part of metrics but is exposed
/// publicly for documentation purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToDistributionValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToDistributionValue for u64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Unsigned(self))
} |
impl ToDistributionValue for f64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Float(self))
}
}
impl ToDistributionValue for Vec<u64> {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::PackedUnsigned(self))
}
}
impl ToDistributionValue for Vec<f64> {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::PackedFloat(self))
}
}
/// Conversion trait for valid values for sets
///
/// This trait must be implemented for any types that are used as counter
/// values (currently only `i64`). This trait is internal to how values are
/// formatted as part of metrics but is exposed publicly for documentation
/// purposes.
///
/// Typical use of Cadence shouldn't require interacting with this trait.
pub trait ToSetValue {
fn try_to_value(self) -> MetricResult<MetricValue>;
}
impl ToSetValue for i64 {
fn try_to_value(self) -> MetricResult<MetricValue> {
Ok(MetricValue::Signed(self))
}
}
/// Trait for incrementing and decrementing counters.
///
/// Counters are simple values incremented or decremented by a client. The
/// rates at which these events occur or average values will be determined
/// by the server receiving them. Examples of counter uses include number
/// of logins to a system or requests received.
///
/// The following types are valid for counters:
/// * `i64`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
///
/// Note that tags are a [Datadog](https://docs.datadoghq.com/developers/dogstatsd/)
/// extension to Statsd and may not be supported by your server.
pub trait Counted<T>
where
T: ToCounterValue,
{
/// Increment or decrement the counter by the given amount
fn count(&self, key: &str, count: T) -> MetricResult<Counter> {
self.count_with_tags(key, count).try_send()
}
/// Increment or decrement the counter by the given amount and return
/// a `MetricBuilder` that can be used to add tags to the metric.
fn count_with_tags<'a>(&'a self, key: &'a str, count: T) -> MetricBuilder<'_, '_, Counter>;
}
/// Trait for convenience methods for counters
///
/// This trait specifically implements increment and decrement convenience
/// methods for counters with `i64` types.
pub trait CountedExt: Counted<i64> {
/// Increment the counter by 1
fn incr(&self, key: &str) -> MetricResult<Counter> {
self.incr_with_tags(key).try_send()
}
/// Increment the counter by 1 and return a `MetricBuilder` that can
/// be used to add tags to the metric.
fn incr_with_tags<'a>(&'a self, key: &'a str) -> MetricBuilder<'_, '_, Counter> {
self.count_with_tags(key, 1)
}
/// Decrement the counter by 1
fn decr(&self, key: &str) -> MetricResult<Counter> {
self.decr_with_tags(key).try_send()
}
/// Decrement the counter by 1 and return a `MetricBuilder` that can
/// be used to add tags to the metric.
fn decr_with_tags<'a>(&'a self, key: &'a str) -> MetricBuilder<'_, '_, Counter> {
self.count_with_tags(key, -1)
}
}
/// Trait for recording timings in milliseconds.
///
/// Timings are a positive number of milliseconds between a start and end
/// time. Examples include time taken to render a web page or time taken
/// for a database call to return. `Duration` values are converted to
/// milliseconds before being recorded.
///
/// The following types are valid for timers:
/// * `u64`
/// * `Duration`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
///
/// Note that tags are a [Datadog](https://docs.datadoghq.com/developers/dogstatsd/)
/// extension to Statsd and may not be supported by your server.
pub trait Timed<T>
where
T: ToTimerValue,
{
/// Record a timing in milliseconds with the given key
fn time(&self, key: &str, time: T) -> MetricResult<Timer> {
self.time_with_tags(key, time).try_send()
}
/// Record a timing in milliseconds with the given key and return a
/// `MetricBuilder` that can be used to add tags to the metric.
fn time_with_tags<'a>(&'a self, key: &'a str, time: T) -> MetricBuilder<'_, '_, Timer>;
}
/// Trait for recording gauge values.
///
/// Gauge values are an instantaneous measurement of a value determined
/// by the client. They do not change unless changed by the client. Examples
/// include things like load average or how many connections are active.
///
/// The following types are valid for gauges:
/// * `u64`
/// * `f64`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
///
/// Note that tags are a [Datadog](https://docs.datadoghq.com/developers/dogstatsd/)
/// extension to Statsd and may not be supported by your server.
pub trait Gauged<T>
where
T: ToGaugeValue,
{
/// Record a gauge value with the given key
fn gauge(&self, key: &str, value: T) -> MetricResult<Gauge> {
self.gauge_with_tags(key, value).try_send()
}
/// Record a gauge value with the given key and return a `MetricBuilder`
/// that can be used to add tags to the metric.
fn gauge_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Gauge>;
}
/// Trait for recording meter values.
///
/// Meter values measure the rate at which events occur. These rates are
/// determined by the server, the client simply indicates when they happen.
/// Meters can be thought of as increment-only counters. Examples include
/// things like number of requests handled or number of times something is
/// flushed to disk.
///
/// The following types are valid for meters:
/// * `u64`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
///
/// Note that tags are a [Datadog](https://docs.datadoghq.com/developers/dogstatsd/)
/// extension to Statsd and may not be supported by your server.
pub trait Metered<T>
where
T: ToMeterValue,
{
/// Record a meter value with the given key
fn meter(&self, key: &str, value: T) -> MetricResult<Meter> {
self.meter_with_tags(key, value).try_send()
}
/// Record a meter value with the given key and return a `MetricBuilder`
/// that can be used to add tags to the metric.
fn meter_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Meter>;
}
/// Trait for recording histogram values.
///
/// Histogram values are positive values that can represent anything, whose
/// statistical distribution is calculated by the server. The values can be
/// timings, amount of some resource consumed, size of HTTP responses in
/// some application, etc. Histograms can be thought of as a more general
/// form of timers. `Duration` values are converted to nanoseconds before
/// being emitted.
///
/// The following types are valid for histograms:
/// * `u64`
/// * `f64`
/// * `Duration`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
///
/// Note that tags and histograms are a
/// [Datadog](https://docs.datadoghq.com/developers/dogstatsd/) extension to
/// Statsd and may not be supported by your server.
pub trait Histogrammed<T>
where
T: ToHistogramValue,
{
/// Record a single histogram value with the given key
fn histogram(&self, key: &str, value: T) -> MetricResult<Histogram> {
self.histogram_with_tags(key, value).try_send()
}
/// Record a single histogram value with the given key and return a
/// `MetricBuilder` that can be used to add tags to the metric.
fn histogram_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Histogram>;
}
/// Trait for recording distribution values.
///
/// Similar to histograms, but applies globally. A distribution can be used to
/// instrument logical objects, like services, independently from the underlying
/// hosts.
///
/// The following types are valid for distributions:
/// * `u64`
/// * `f64`
///
/// See the [Datadog docs](https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition)
/// for more information.
///
/// Note that tags and distributions are a
/// [Datadog](https://docs.datadoghq.com/developers/dogstatsd/) extension to
/// Statsd and may not be supported by your server.
pub trait Distributed<T>
where
T: ToDistributionValue,
{
/// Record a single distribution value with the given key
fn distribution(&self, key: &str, value: T) -> MetricResult<Distribution> {
self.distribution_with_tags(key, value).try_send()
}
/// Record a single distribution value with the given key and return a
/// `MetricBuilder` that can be used to add tags to the metric.
fn distribution_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Distribution>;
}
/// Trait for recording set values.
///
/// Sets count the number of unique elements in a group. You can use them to,
/// for example, count the unique visitors to your site.
///
/// The following types are valid for sets:
/// * `i64`
///
/// See the [Statsd spec](https://github.com/b/statsd_spec) for more
/// information.
pub trait Setted<T>
where
T: ToSetValue,
{
/// Record a single set value with the given key
fn set(&self, key: &str, value: T) -> MetricResult<Set> {
self.set_with_tags(key, value).try_send()
}
/// Record a single set value with the given key and return a
/// `MetricBuilder` that can be used to add tags to the metric.
fn set_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Set>;
}
/// Trait that encompasses all other traits for sending metrics.
///
/// If you wish to use `StatsdClient` with a generic type or place a
/// `StatsdClient` instance behind a pointer (such as a `Box`) this will allow
/// you to reference all the implemented methods for recording metrics, while
/// using a single trait. An example of this is shown below.
///
/// ```
/// use std::time::Duration;
/// use cadence::{MetricClient, StatsdClient, NopMetricSink};
///
/// let client: Box<dyn MetricClient> = Box::new(StatsdClient::from_sink(
/// "prefix", NopMetricSink));
///
/// client.count("some.counter", 1).unwrap();
/// client.time("some.timer", 42).unwrap();
/// client.time("some.timer", Duration::from_millis(42)).unwrap();
/// client.time("some.timer", vec![42]).unwrap();
/// client.time("some.timer", vec![Duration::from_millis(42)]).unwrap();
/// client.gauge("some.gauge", 8).unwrap();
/// client.meter("some.meter", 13).unwrap();
/// client.histogram("some.histogram", 4).unwrap();
/// client.histogram("some.histogram", Duration::from_nanos(4)).unwrap();
/// client.histogram("some.histogram", vec![4]).unwrap();
/// client.histogram("some.histogram", vec![Duration::from_nanos(4)]).unwrap();
/// client.distribution("some.distribution", 4).unwrap();
/// client.distribution("some.distribution", vec![4]).unwrap();
/// client.set("some.set", 5).unwrap();
/// ```
pub trait MetricClient:
Counted<i64>
+ CountedExt
+ Timed<u64>
+ Timed<Duration>
+ Timed<Vec<u64>>
+ Timed<Vec<Duration>>
+ Gauged<u64>
+ Gauged<f64>
+ Metered<u64>
+ Histogrammed<u64>
+ Histogrammed<f64>
+ Histogrammed<Duration>
+ Histogrammed<Vec<u64>>
+ Histogrammed<Vec<f64>>
+ Histogrammed<Vec<Duration>>
+ Distributed<u64>
+ Distributed<f64>
+ Distributed<Vec<u64>>
+ Distributed<Vec<f64>>
+ Setted<i64>
+ Compat
{
}
/// Typically internal client methods for sending metrics and handling errors.
///
/// This trait exposes methods of the client that would normally be internal
/// but may be useful for consumers of the library to extend it in unforseen
/// ways. Most consumers of the library shouldn't need to make use of this
/// extension point.
///
/// This trait is not exposed in the `prelude` module since it isn't required
/// to use the client for sending metrics. It is only exposed in the `ext`
/// module which is used to encompass advanced extension points for the library.
///
/// NOTE: This is a sealed trait and so it cannot be implemented outside of the
/// library.
///
/// # Example
///
/// ```
/// use cadence::{Metric, MetricResult, StatsdClient, NopMetricSink};
/// use cadence::ext::MetricBackend;
///
/// struct CustomMetric {
/// repr: String,
/// }
///
/// impl Metric for CustomMetric {
/// fn as_metric_str(&self) -> &str {
/// &self.repr
/// }
/// }
///
/// impl From<String> for CustomMetric {
/// fn from(v: String) -> Self {
/// CustomMetric { repr: v }
/// }
/// }
///
/// struct MyCustomClient {
/// prefix: String,
/// wrapped: StatsdClient,
/// }
///
/// impl MyCustomClient {
/// fn new(prefix: &str, client: StatsdClient) -> Self {
/// MyCustomClient {
/// prefix: prefix.to_string(),
/// wrapped: client,
/// }
/// }
///
/// fn send_event(&self, key: &str, val: i64) -> MetricResult<CustomMetric> {
/// let metric = CustomMetric::from(format!("{}.{}:{}|e", self.prefix, key, val));
/// self.wrapped.send_metric(&metric)?;
/// Ok(metric)
/// }
///
/// fn send_event_quietly(&self, key: &str, val: i64) {
/// if let Err(e) = self.send_event(key, val) {
/// self.wrapped.consume_error(e);
/// }
/// }
/// }
///
/// let prefix = "some.prefix";
/// let inner = StatsdClient::from_sink(&prefix, NopMetricSink);
/// let custom = MyCustomClient::new(&prefix, inner);
///
/// custom.send_event("some.event", 123).unwrap();
/// custom.send_event_quietly("some.event", 456);
/// ```
pub trait MetricBackend: Sealed {
/// Send a full formed `Metric` implementation via the underlying `MetricSink`
///
/// Obtain a `&str` representation of a metric, encode it as UTF-8 bytes, and
/// send it to the underlying `MetricSink`, verbatim. Note that the metric is
/// expected to be full formed already, including any prefix or tags.
///
/// Note that if you simply want to emit standard metrics, you don't need to
/// use this method. This is only useful if you are extending Cadence with a
/// custom metric type or something similar.
fn send_metric<M>(&self, metric: &M) -> MetricResult<()>
where
M: Metric;
/// Consume a possible error from attempting to send a metric.
///
/// When callers have elected to quietly send metrics via the `MetricBuilder::send()`
/// method, this method will be invoked if an error is encountered. By default the
/// handler is a no-op, meaning that errors are discarded.
///
/// Note that if you simply want to emit standard metrics, you don't need to
/// use this method. This is only useful if you are extending Cadence with a
/// custom metric type or something similar.
fn consume_error(&self, err: MetricError);
}
/// Builder for creating and customizing `StatsdClient` instances.
///
/// Instances of the builder should be created by calling the `::builder()`
/// method on the `StatsClient` struct.
///
/// # Example
///
/// ```
/// use cadence::prelude::*;
/// use cadence::{MetricError, StatsdClient, NopMetricSink};
///
/// fn my_error_handler(err: MetricError) {
/// println!("Metric error! {}", err);
/// }
///
/// let client = StatsdClient::builder("prefix", NopMetricSink)
/// .with_error_handler(my_error_handler)
/// .build();
///
/// client.count("something", 123);
/// client.count_with_tags("some.counter", 42)
/// .with_tag("region", "us-east-2")
/// .send();
/// ```
pub struct StatsdClientBuilder {
prefix: String,
sink: Box<dyn MetricSink + Sync + Send + RefUnwindSafe>,
errors: Box<dyn Fn(MetricError) + Sync + Send + RefUnwindSafe>,
}
impl StatsdClientBuilder {
// Set the required fields and defaults for optional fields
fn new<T>(prefix: &str, sink: T) -> Self
where
T: MetricSink + Sync + Send + RefUnwindSafe + 'static,
{
StatsdClientBuilder {
// required
prefix: Self::formatted_prefix(prefix),
sink: Box::new(sink),
// optional with defaults
errors: Box::new(nop_error_handler),
}
}
/// Set an error handler to use for metrics sent via `MetricBuilder::send()`
///
/// The error handler is only invoked when metrics are not able to be sent
/// correctly. Either due to invalid input, I/O errors encountered when trying
/// to send them via a `MetricSink`, or some other reason.
///
/// The error handler should consume the error without panicking. The error
/// may be logged, printed to stderr, discarded, etc. - this is up to the
/// implementation.
pub fn with_error_handler<F>(mut self, errors: F) -> Self
where
F: Fn(MetricError) + Sync + Send + RefUnwindSafe + 'static,
{
self.errors = Box::new(errors);
self
}
/// Construct a new `StatsdClient` instance based on current settings.
pub fn build(self) -> StatsdClient {
StatsdClient::from_builder(self)
}
fn formatted_prefix(prefix: &str) -> String {
if prefix.is_empty() {
String::new()
} else {
format!("{}.", prefix.trim_end_matches('.'))
}
}
}
/// Client for Statsd that implements various traits to record metrics.
///
/// # Traits
///
/// The client is the main entry point for users of this library. It supports
/// several traits for recording metrics of different types.
///
/// * `Counted` for emitting counters.
/// * `Timed` for emitting timings.
/// * `Gauged` for emitting gauge values.
/// * `Metered` for emitting meter values.
/// * `Histogrammed` for emitting histogram values.
/// * `Distributed` for emitting distribution values.
/// * `Setted` for emitting set values.
/// * `MetricClient` for a combination of all of the above.
///
/// For more information about the uses for each type of metric, see the
/// documentation for each mentioned trait.
///
/// # Sinks
///
/// The client uses some implementation of a `MetricSink` to emit the metrics.
///
/// In simple use cases when performance isn't critical, the `UdpMetricSink`
/// is an acceptable choice since it is the simplest to use and understand.
///
/// When performance is more important, users will want to use the
/// `BufferedUdpMetricSink` in combination with the `QueuingMetricSink` for
/// maximum isolation between the sending of metrics and your application as well
/// as minimum overhead when sending metrics.
///
/// # Threading
///
/// The `StatsdClient` is designed to work in a multithreaded application. All
/// parts of the client can be shared between threads (i.e. it is `Send` and
/// `Sync`). An example of how to use the client in a multithreaded environment
/// is given below.
///
/// In the following example, we create a struct `MyRequestHandler` that has a
/// single method that spawns a thread to do some work and emit a metric.
///
/// ## Wrapping With An `Arc`
///
/// In order to share a client between multiple threads, you'll need to wrap it
/// with an atomic reference counting pointer (`std::sync::Arc`). You should refer
/// to the client by the trait of all its methods for recording metrics
/// (`MetricClient`) as well as the `Send` and `Sync` traits since the idea is to
/// share this between threads.
///
/// ``` no_run
/// use std::panic::RefUnwindSafe;
/// use std::net::UdpSocket;
/// use std::sync::Arc;
/// use std::thread;
/// use cadence::prelude::*;
/// use cadence::{StatsdClient, BufferedUdpMetricSink, DEFAULT_PORT};
///
/// struct MyRequestHandler {
/// metrics: Arc<dyn MetricClient + Send + Sync + RefUnwindSafe>,
/// }
///
/// impl MyRequestHandler {
/// fn new() -> MyRequestHandler {
/// let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
/// let host = ("localhost", DEFAULT_PORT);
/// let sink = BufferedUdpMetricSink::from(host, socket).unwrap();
/// MyRequestHandler {
/// metrics: Arc::new(StatsdClient::from_sink("some.prefix", sink))
/// }
/// }
///
/// fn handle_some_request(&self) -> Result<(), String> {
/// let metric_ref = self.metrics.clone();
/// let _t = thread::spawn(move || {
/// println!("Hello from the thread!");
/// metric_ref.count("request.handler", 1);
/// });
///
/// Ok(())
/// }
/// }
/// ```
pub struct StatsdClient {
prefix: String,
sink: Box<dyn MetricSink + Sync + Send + RefUnwindSafe>,
errors: Box<dyn Fn(MetricError) + Sync + Send + RefUnwindSafe>,
}
impl StatsdClient {
/// Create a new client instance that will use the given prefix for
/// all metrics emitted to the given `MetricSink` implementation.
///
/// Note that this client will discard errors encountered when
/// sending metrics via the `MetricBuilder::send()` method.
///
/// # No-op Example
///
/// ```
/// use cadence::{StatsdClient, NopMetricSink};
///
/// let prefix = "my.stats";
/// let client = StatsdClient::from_sink(prefix, NopMetricSink);
/// ```
///
/// # UDP Socket Example
///
/// ```
/// use std::net::UdpSocket;
/// use cadence::{StatsdClient, UdpMetricSink, DEFAULT_PORT};
///
/// let prefix = "my.stats";
/// let host = ("127.0.0.1", DEFAULT_PORT);
///
/// let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
/// socket.set_nonblocking(true).unwrap();
///
/// let sink = UdpMetricSink::from(host, socket).unwrap();
/// let client = StatsdClient::from_sink(prefix, sink);
/// ```
///
/// # Buffered UDP Socket Example
///
/// ```
/// use std::net::UdpSocket;
/// use cadence::{StatsdClient, BufferedUdpMetricSink, DEFAULT_PORT};
///
/// let prefix = "my.stats";
/// let host = ("127.0.0.1", DEFAULT_PORT);
///
/// let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
///
/// let sink = BufferedUdpMetricSink::from(host, socket).unwrap();
/// let client = StatsdClient::from_sink(prefix, sink);
/// ```
pub fn from_sink<T>(prefix: &str, sink: T) -> Self
where
T: MetricSink + Sync + Send + RefUnwindSafe + 'static,
{
Self::builder(prefix, sink).build()
}
/// Create a new client instance that will use the given prefix to send
/// metrics to the given host over UDP using an appropriate sink.
///
/// The created UDP socket will be put into non-blocking mode.
///
/// Note that this client will discard errors encountered when
/// sending metrics via the `MetricBuilder::send()` method.
///
/// # Example
///
/// ```no_run
/// use cadence::{StatsdClient, UdpMetricSink};
///
/// let prefix = "my.stats";
/// let host = ("metrics.example.com", 8125);
///
/// let client = StatsdClient::from_udp_host(prefix, host);
/// ```
///
/// # Failures
///
/// This method may fail if:
///
/// * It is unable to create a local UDP socket.
/// * It is unable to put the UDP socket into non-blocking mode.
/// * It is unable to resolve the hostname of the metric server.
/// * The host address is otherwise unable to be parsed.
#[deprecated(since = "0.19.0", note = "Superseded by ::from_sink() and ::builder()")]
pub fn from_udp_host<A>(prefix: &str, host: A) -> MetricResult<Self>
where
A: ToSocketAddrs,
{
let socket = UdpSocket::bind("0.0.0.0:0")?;
socket.set_nonblocking(true)?;
let sink = UdpMetricSink::from(host, socket)?;
Ok(StatsdClient::builder(prefix, sink).build())
}
/// Create a new builder with the provided prefix and metric sink.
///
/// A prefix and a metric sink are required to create a new client
/// instance. All other optional customizations can be set by calling
/// methods on the returned builder. Any customizations that aren't
/// set by the caller will use defaults.
///
/// Note, though a metric prefix is required, you may pass an empty
/// string as a prefix. In this case, the metrics emitted will use only
/// the bare keys supplied when you call the various methods to emit
/// metrics.
///
/// General defaults:
///
/// * A no-op error handler will be used by default. Note that this
/// only affects errors encountered when using the `MetricBuilder::send()`
/// method (as opposed to `.try_send()` or any other method for sending
/// metrics).
///
/// # Example
///
/// ```
/// use cadence::prelude::*;
/// use cadence::{StatsdClient, MetricError, NopMetricSink};
///
/// fn my_handler(err: MetricError) {
/// println!("Metric error: {}", err);
/// }
///
/// let client = StatsdClient::builder("some.prefix", NopMetricSink)
/// .with_error_handler(my_handler)
/// .build();
///
/// client.gauge_with_tags("some.key", 7)
/// .with_tag("region", "us-west-1")
/// .send();
/// ```
pub fn builder<T>(prefix: &str, sink: T) -> StatsdClientBuilder
where
T: MetricSink + Sync + Send + RefUnwindSafe + 'static,
{
StatsdClientBuilder::new(prefix, sink)
}
// Create a new StatsdClient by consuming the builder
fn from_builder(builder: StatsdClientBuilder) -> Self {
StatsdClient {
prefix: builder.prefix,
sink: builder.sink,
errors: builder.errors,
}
}
}
impl Sealed for StatsdClient {}
impl MetricBackend for StatsdClient {
fn send_metric<M>(&self, metric: &M) -> MetricResult<()>
where
M: Metric,
{
let metric_string = metric.as_metric_str();
self.sink.emit(metric_string)?;
Ok(())
}
fn consume_error(&self, err: MetricError) {
(self.errors)(err);
}
}
impl fmt::Debug for StatsdClient {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"StatsdClient {{ prefix: {:?}, sink: ..., errors: ... }}",
self.prefix
)
}
}
impl<T> Counted<T> for StatsdClient
where
T: ToCounterValue,
{
fn count_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Counter> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::counter(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl CountedExt for StatsdClient {}
impl<T> Timed<T> for StatsdClient
where
T: ToTimerValue,
{
fn time_with_tags<'a>(&'a self, key: &'a str, time: T) -> MetricBuilder<'_, '_, Timer> {
match time.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::timer(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl<T> Gauged<T> for StatsdClient
where
T: ToGaugeValue,
{
fn gauge_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Gauge> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::gauge(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl<T> Metered<T> for StatsdClient
where
T: ToMeterValue,
{
fn meter_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Meter> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::meter(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl<T> Histogrammed<T> for StatsdClient
where
T: ToHistogramValue,
{
fn histogram_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Histogram> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::histogram(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl<T> Distributed<T> for StatsdClient
where
T: ToDistributionValue,
{
fn distribution_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Distribution> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::distribution(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl<T> Setted<T> for StatsdClient
where
T: ToSetValue,
{
fn set_with_tags<'a>(&'a self, key: &'a str, value: T) -> MetricBuilder<'_, '_, Set> {
match value.try_to_value() {
Ok(v) => MetricBuilder::from_fmt(MetricFormatter::set(&self.prefix, key, v), self),
Err(e) => MetricBuilder::from_error(e, self),
}
}
}
impl MetricClient for StatsdClient {}
#[allow(clippy::needless_pass_by_value)]
fn nop_error_handler(_err: MetricError) {
// nothing
}
#[cfg(test)]
mod tests {
use super::{
Counted, CountedExt, Distributed, Gauged, Histogrammed, Metered, MetricClient, Setted, StatsdClient, Timed,
};
use crate::sinks::{MetricSink, NopMetricSink, QueuingMetricSink, SpyMetricSink};
use crate::types::{ErrorKind, Metric, MetricError};
use std::io;
use std::panic::RefUnwindSafe;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use std::u64;
#[test]
fn test_statsd_client_empty_prefix() {
let client = StatsdClient::from_sink("", NopMetricSink);
let res = client.count("some.method", 1);
assert_eq!("some.method:1|c", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_count_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.count_with_tags("some.counter", 3)
.with_tag("foo", "bar")
.try_send();
assert_eq!("prefix.some.counter:3|c|#foo:bar", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_incr_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.incr_with_tags("some.counter").with_tag("foo", "bar").try_send();
assert_eq!("prefix.some.counter:1|c|#foo:bar", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_decr_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.decr_with_tags("some.counter").with_tag("foo", "bar").try_send();
assert_eq!("prefix.some.counter:-1|c|#foo:bar", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_gauge_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.gauge_with_tags("some.gauge", 4)
.with_tag("bucket", "A")
.with_tag_value("file-server")
.try_send();
assert_eq!(
"prefix.some.gauge:4|g|#bucket:A,file-server",
res.unwrap().as_metric_str()
);
}
#[test]
fn test_statsd_client_time_duration() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.time("key", Duration::from_millis(157));
assert_eq!("prefix.key:157|ms", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_time_multiple_durations() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_millis(157),
Duration::from_millis(158),
Duration::from_millis(159),
];
let res = client.time("key", durations);
assert_eq!("prefix.key:157:158:159|ms", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_time_duration_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.time("key", Duration::from_secs(u64::MAX));
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind())
}
#[test]
fn test_statsd_client_time_multiple_durations_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_millis(157),
Duration::from_secs(u64::MAX),
Duration::from_millis(159),
];
let res = client.time("key", durations);
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind())
}
#[test]
fn test_statsd_client_time_duration_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.time_with_tags("key", Duration::from_millis(157))
.with_tag("foo", "bar")
.with_tag_value("quux")
.try_send();
assert_eq!("prefix.key:157|ms|#foo:bar,quux", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_time_multiple_durations_with_tags_() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_millis(157),
Duration::from_millis(158),
Duration::from_millis(159),
];
let res = client
.time_with_tags("key", durations)
.with_tag("foo", "bar")
.with_tag_value("quux")
.try_send();
assert_eq!("prefix.key:157:158:159|ms|#foo:bar,quux", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_time_duration_with_tags_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.time_with_tags("key", Duration::from_secs(u64::MAX))
.with_tag("foo", "bar")
.with_tag_value("quux")
.try_send();
assert!(res.is_err());
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind());
}
#[test]
fn test_statsd_client_time_multiple_durations_with_tags_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_millis(157),
Duration::from_secs(u64::MAX),
Duration::from_millis(159),
];
let res = client
.time_with_tags("key", durations)
.with_tag("foo", "bar")
.with_tag_value("quux")
.try_send();
assert!(res.is_err());
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind());
}
#[test]
fn test_statsd_client_meter_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.meter_with_tags("some.meter", 64)
.with_tag("segment", "142")
.with_tag_value("beta")
.try_send();
assert_eq!("prefix.some.meter:64|m|#segment:142,beta", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_histogram_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.histogram_with_tags("some.histo", 27)
.with_tag("host", "www03.example.com")
.with_tag_value("rc1")
.try_send();
assert_eq!(
"prefix.some.histo:27|h|#host:www03.example.com,rc1",
res.unwrap().as_metric_str()
);
}
#[test]
fn test_statsd_client_histogram_with_multiple_values() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.histogram_with_tags("some.histo", vec![27, 28, 29]).try_send();
assert_eq!("prefix.some.histo:27:28:29|h", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_histogram_duration() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.histogram("key", Duration::from_nanos(210));
assert_eq!("prefix.key:210|h", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_histogram_multiple_durations() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_nanos(210),
Duration::from_nanos(211),
Duration::from_nanos(212),
];
let res = client.histogram("key", durations);
assert_eq!("prefix.key:210:211:212|h", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_histogram_duration_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client.histogram("key", Duration::from_secs(u64::MAX));
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind());
}
#[test]
fn test_statsd_client_histogram_multiple_durations_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let durations = vec![
Duration::from_nanos(210),
Duration::from_secs(u64::MAX),
Duration::from_nanos(212),
];
let res = client.histogram("key", durations);
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind());
}
#[test]
fn test_statsd_client_histogram_duration_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.histogram_with_tags("key", Duration::from_nanos(4096))
.with_tag("foo", "bar")
.with_tag_value("beta")
.try_send();
assert_eq!("prefix.key:4096|h|#foo:bar,beta", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_histogram_duration_with_tags_with_overflow() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.histogram_with_tags("key", Duration::from_millis(u64::MAX))
.with_tag("foo", "bar")
.with_tag_value("beta")
.try_send();
assert_eq!(ErrorKind::InvalidInput, res.unwrap_err().kind());
}
#[test]
fn test_statsd_client_distribution_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.distribution_with_tags("some.distr", 27)
.with_tag("host", "www03.example.com")
.with_tag_value("rc1")
.try_send();
assert_eq!(
"prefix.some.distr:27|d|#host:www03.example.com,rc1",
res.unwrap().as_metric_str()
);
}
#[test]
fn test_statsd_client_distribution_multiple_values_with_tags() {
let client = StatsdClient::from_sink("prefix", NopMetricSink);
let res = client
.distribution_with_tags("some.distr", vec![27, 28, 29])
.with_tag("host", "www03.example.com")
.with_tag_value("rc1")
.try_send();
assert_eq!(
"prefix.some.distr:27:28:29|d|#host:www03.example.com,rc1",
res.unwrap().as_metric_str()
);
}
#[test]
fn test_statsd_client_set_with_tags() {
let client = StatsdClient::from_sink("myapp", NopMetricSink);
let res = client.set_with_tags("some.set", 3).with_tag("foo", "bar").try_send();
assert_eq!("myapp.some.set:3|s|#foo:bar", res.unwrap().as_metric_str());
}
#[test]
fn test_statsd_client_with_tags_send_success() {
let (rx, sink) = SpyMetricSink::new();
let client = StatsdClient::from_sink("prefix", sink);
client.count_with_tags("some.key", 1).with_tag("test", "a").send();
let sent = rx.recv().unwrap();
assert_eq!("prefix.some.key:1|c|#test:a", String::from_utf8(sent).unwrap());
}
#[test]
fn test_statsd_client_with_tags_send_error() {
struct ErrorSink;
impl MetricSink for ErrorSink {
fn emit(&self, _metric: &str) -> io::Result<usize> {
Err(io::Error::from(io::ErrorKind::Other))
}
}
let count = Arc::new(AtomicUsize::new(0));
let count_ref = count.clone();
let handler = move |_err: MetricError| {
count_ref.fetch_add(1, Ordering::Release);
};
let client = StatsdClient::builder("prefix", ErrorSink)
.with_error_handler(handler)
.build();
client.count_with_tags("some.key", 1).with_tag("tier", "web").send();
assert_eq!(1, count.load(Ordering::Acquire));
}
// The following tests really just ensure that we've actually
// implemented all the traits we're supposed to correctly. If
// we hadn't, this wouldn't compile.
#[test]
fn test_statsd_client_as_counted() {
let client: Box<dyn Counted<i64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.count("some.counter", 5).unwrap();
}
#[test]
fn test_statsd_client_as_countedext() {
let client: Box<dyn CountedExt> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.incr("some.counter").unwrap();
}
#[test]
fn test_statsd_client_as_timed_u64() {
let client: Box<dyn Timed<u64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.time("some.timer", 20).unwrap();
}
#[test]
fn test_statsd_client_as_timed_duration() {
let client: Box<dyn Timed<Duration>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.time("some.timer", Duration::from_millis(20)).unwrap();
}
#[test]
fn test_statsd_client_as_timed_packed_duration() {
let client: Box<dyn Timed<Vec<Duration>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
let durations = vec![Duration::from_millis(20), Duration::from_millis(21)];
client.time("some.timer", durations).unwrap();
}
#[test]
fn test_statsd_client_as_gauged_u64() {
let client: Box<dyn Gauged<u64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.gauge("some.gauge", 32).unwrap();
}
#[test]
fn test_statsd_client_as_gauged_f64() {
let client: Box<dyn Gauged<f64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.gauge("some.gauge", 3.2).unwrap();
}
#[test]
fn test_statsd_client_as_metered() {
let client: Box<dyn Metered<u64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.meter("some.meter", 9).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_u64() {
let client: Box<dyn Histogrammed<u64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.histogram("some.histogram", 4).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_packed_u64() {
let client: Box<dyn Histogrammed<Vec<u64>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.histogram("some.histogram", vec![4, 5, 6]).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_f64() {
let client: Box<dyn Histogrammed<f64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.histogram("some.histogram", 4.0).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_packed_f64() {
let client: Box<dyn Histogrammed<Vec<f64>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.histogram("some.histogram", vec![4.0, 5.0, 6.0]).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_duration() {
let client: Box<dyn Histogrammed<Duration>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.histogram("some.histogram", Duration::from_nanos(4)).unwrap();
}
#[test]
fn test_statsd_client_as_histogrammed_packed_duration() {
let client: Box<dyn Histogrammed<Vec<Duration>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
let durations = vec![Duration::from_nanos(4), Duration::from_nanos(5)];
client.histogram("some.histogram", durations).unwrap();
}
#[test]
fn test_statsd_client_as_distributed_u64() {
let client: Box<dyn Distributed<u64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.distribution("some.distribution", 33).unwrap();
}
#[test]
fn test_statsd_client_as_distributed_packed_u64() {
let client: Box<dyn Distributed<Vec<u64>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.distribution("some.distribution", vec![33, 34]).unwrap();
}
#[test]
fn test_statsd_client_as_distributed_f64() {
let client: Box<dyn Distributed<f64>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.distribution("some.distribution", 33.0).unwrap();
}
#[test]
fn test_statsd_client_as_distributed_packed_f64() {
let client: Box<dyn Distributed<Vec<f64>>> = Box::new(StatsdClient::from_sink("prefix", NopMetricSink));
client.distribution("some.distribution", vec![33.0, 34.0]).unwrap();
}
#[test]
fn test_statsd_client_as_setted() {
let client: Box<dyn Setted<i64>> = Box::new(StatsdClient::from_sink("myapp", NopMetricSink));
client.set("some.set", 5).unwrap();
}
#[test]
fn test_statsd_client_as_thread_and_panic_safe() {
let client: Box<dyn MetricClient + Send + Sync + RefUnwindSafe> = Box::new(StatsdClient::from_sink(
"prefix",
QueuingMetricSink::from(NopMetricSink),
));
client.count("some.counter", 3).unwrap();
client.time("some.timer", 198).unwrap();
client.time("some.timer", Duration::from_millis(198)).unwrap();
client.time("some.timer", vec![198]).unwrap();
client.time("some.timer", vec![Duration::from_millis(198)]).unwrap();
client.gauge("some.gauge", 4).unwrap();
client.gauge("some.gauge", 4.0).unwrap();
client.meter("some.meter", 29).unwrap();
client.histogram("some.histogram", 32).unwrap();
client.histogram("some.histogram", 32.0).unwrap();
client.histogram("some.histogram", Duration::from_nanos(32)).unwrap();
client.histogram("some.histogram", vec![32]).unwrap();
client.histogram("some.histogram", vec![32.0]).unwrap();
client
.histogram("some.histogram", vec![Duration::from_nanos(32)])
.unwrap();
client.distribution("some.distribution", 248).unwrap();
client.distribution("some.distribution", 248.0).unwrap();
client.distribution("some.distribution", vec![248]).unwrap();
client.distribution("some.distribution", vec![248.0]).unwrap();
client.set("some.set", 5).unwrap();
}
} | } |
virtualMachineScaleSet.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20160430preview
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Describes a Virtual Machine Scale Set.
type VirtualMachineScaleSet struct {
pulumi.CustomResourceState
// The identity of the virtual machine scale set, if configured.
Identity VirtualMachineScaleSetIdentityResponsePtrOutput `pulumi:"identity"`
// Resource location
Location pulumi.StringOutput `pulumi:"location"`
// Resource name
Name pulumi.StringOutput `pulumi:"name"`
// Specifies whether the Virtual Machine Scale Set should be overprovisioned.
OverProvision pulumi.BoolPtrOutput `pulumi:"overProvision"`
// Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Plan PlanResponsePtrOutput `pulumi:"plan"`
// The provisioning state, which only appears in the response.
ProvisioningState pulumi.StringOutput `pulumi:"provisioningState"`
// When true this limits the scale set to a single placement group, of max size 100 virtual machines.
SinglePlacementGroup pulumi.BoolPtrOutput `pulumi:"singlePlacementGroup"`
// The virtual machine scale set sku.
Sku SkuResponsePtrOutput `pulumi:"sku"`
// Resource tags
Tags pulumi.StringMapOutput `pulumi:"tags"`
// Resource type
Type pulumi.StringOutput `pulumi:"type"`
// The upgrade policy.
UpgradePolicy UpgradePolicyResponsePtrOutput `pulumi:"upgradePolicy"`
// The virtual machine profile.
VirtualMachineProfile VirtualMachineScaleSetVMProfileResponsePtrOutput `pulumi:"virtualMachineProfile"`
}
// NewVirtualMachineScaleSet registers a new resource with the given unique name, arguments, and options.
func NewVirtualMachineScaleSet(ctx *pulumi.Context,
name string, args *VirtualMachineScaleSetArgs, opts ...pulumi.ResourceOption) (*VirtualMachineScaleSet, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:compute:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/latest:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20150615:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20160330:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20170330:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20171201:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20180401:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20180601:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20181001:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20190301:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20190701:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20191201:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20200601:VirtualMachineScaleSet"),
},
{
Type: pulumi.String("azure-nextgen:compute/v20201201:VirtualMachineScaleSet"),
},
})
opts = append(opts, aliases)
var resource VirtualMachineScaleSet
err := ctx.RegisterResource("azure-nextgen:compute/v20160430preview:VirtualMachineScaleSet", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetVirtualMachineScaleSet gets an existing VirtualMachineScaleSet resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func | (ctx *pulumi.Context,
name string, id pulumi.IDInput, state *VirtualMachineScaleSetState, opts ...pulumi.ResourceOption) (*VirtualMachineScaleSet, error) {
var resource VirtualMachineScaleSet
err := ctx.ReadResource("azure-nextgen:compute/v20160430preview:VirtualMachineScaleSet", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering VirtualMachineScaleSet resources.
type virtualMachineScaleSetState struct {
// The identity of the virtual machine scale set, if configured.
Identity *VirtualMachineScaleSetIdentityResponse `pulumi:"identity"`
// Resource location
Location *string `pulumi:"location"`
// Resource name
Name *string `pulumi:"name"`
// Specifies whether the Virtual Machine Scale Set should be overprovisioned.
OverProvision *bool `pulumi:"overProvision"`
// Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Plan *PlanResponse `pulumi:"plan"`
// The provisioning state, which only appears in the response.
ProvisioningState *string `pulumi:"provisioningState"`
// When true this limits the scale set to a single placement group, of max size 100 virtual machines.
SinglePlacementGroup *bool `pulumi:"singlePlacementGroup"`
// The virtual machine scale set sku.
Sku *SkuResponse `pulumi:"sku"`
// Resource tags
Tags map[string]string `pulumi:"tags"`
// Resource type
Type *string `pulumi:"type"`
// The upgrade policy.
UpgradePolicy *UpgradePolicyResponse `pulumi:"upgradePolicy"`
// The virtual machine profile.
VirtualMachineProfile *VirtualMachineScaleSetVMProfileResponse `pulumi:"virtualMachineProfile"`
}
type VirtualMachineScaleSetState struct {
// The identity of the virtual machine scale set, if configured.
Identity VirtualMachineScaleSetIdentityResponsePtrInput
// Resource location
Location pulumi.StringPtrInput
// Resource name
Name pulumi.StringPtrInput
// Specifies whether the Virtual Machine Scale Set should be overprovisioned.
OverProvision pulumi.BoolPtrInput
// Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Plan PlanResponsePtrInput
// The provisioning state, which only appears in the response.
ProvisioningState pulumi.StringPtrInput
// When true this limits the scale set to a single placement group, of max size 100 virtual machines.
SinglePlacementGroup pulumi.BoolPtrInput
// The virtual machine scale set sku.
Sku SkuResponsePtrInput
// Resource tags
Tags pulumi.StringMapInput
// Resource type
Type pulumi.StringPtrInput
// The upgrade policy.
UpgradePolicy UpgradePolicyResponsePtrInput
// The virtual machine profile.
VirtualMachineProfile VirtualMachineScaleSetVMProfileResponsePtrInput
}
func (VirtualMachineScaleSetState) ElementType() reflect.Type {
return reflect.TypeOf((*virtualMachineScaleSetState)(nil)).Elem()
}
type virtualMachineScaleSetArgs struct {
// The identity of the virtual machine scale set, if configured.
Identity *VirtualMachineScaleSetIdentity `pulumi:"identity"`
// Resource location
Location *string `pulumi:"location"`
// The name of the VM scale set to create or update.
Name *string `pulumi:"name"`
// Specifies whether the Virtual Machine Scale Set should be overprovisioned.
OverProvision *bool `pulumi:"overProvision"`
// Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Plan *Plan `pulumi:"plan"`
// The name of the resource group.
ResourceGroupName string `pulumi:"resourceGroupName"`
// When true this limits the scale set to a single placement group, of max size 100 virtual machines.
SinglePlacementGroup *bool `pulumi:"singlePlacementGroup"`
// The virtual machine scale set sku.
Sku *Sku `pulumi:"sku"`
// Resource tags
Tags map[string]string `pulumi:"tags"`
// The upgrade policy.
UpgradePolicy *UpgradePolicy `pulumi:"upgradePolicy"`
// The virtual machine profile.
VirtualMachineProfile *VirtualMachineScaleSetVMProfile `pulumi:"virtualMachineProfile"`
}
// The set of arguments for constructing a VirtualMachineScaleSet resource.
type VirtualMachineScaleSetArgs struct {
// The identity of the virtual machine scale set, if configured.
Identity VirtualMachineScaleSetIdentityPtrInput
// Resource location
Location pulumi.StringPtrInput
// The name of the VM scale set to create or update.
Name pulumi.StringPtrInput
// Specifies whether the Virtual Machine Scale Set should be overprovisioned.
OverProvision pulumi.BoolPtrInput
// Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
Plan PlanPtrInput
// The name of the resource group.
ResourceGroupName pulumi.StringInput
// When true this limits the scale set to a single placement group, of max size 100 virtual machines.
SinglePlacementGroup pulumi.BoolPtrInput
// The virtual machine scale set sku.
Sku SkuPtrInput
// Resource tags
Tags pulumi.StringMapInput
// The upgrade policy.
UpgradePolicy UpgradePolicyPtrInput
// The virtual machine profile.
VirtualMachineProfile VirtualMachineScaleSetVMProfilePtrInput
}
func (VirtualMachineScaleSetArgs) ElementType() reflect.Type {
return reflect.TypeOf((*virtualMachineScaleSetArgs)(nil)).Elem()
}
type VirtualMachineScaleSetInput interface {
pulumi.Input
ToVirtualMachineScaleSetOutput() VirtualMachineScaleSetOutput
ToVirtualMachineScaleSetOutputWithContext(ctx context.Context) VirtualMachineScaleSetOutput
}
func (*VirtualMachineScaleSet) ElementType() reflect.Type {
return reflect.TypeOf((*VirtualMachineScaleSet)(nil))
}
func (i *VirtualMachineScaleSet) ToVirtualMachineScaleSetOutput() VirtualMachineScaleSetOutput {
return i.ToVirtualMachineScaleSetOutputWithContext(context.Background())
}
func (i *VirtualMachineScaleSet) ToVirtualMachineScaleSetOutputWithContext(ctx context.Context) VirtualMachineScaleSetOutput {
return pulumi.ToOutputWithContext(ctx, i).(VirtualMachineScaleSetOutput)
}
type VirtualMachineScaleSetOutput struct {
*pulumi.OutputState
}
func (VirtualMachineScaleSetOutput) ElementType() reflect.Type {
return reflect.TypeOf((*VirtualMachineScaleSet)(nil))
}
func (o VirtualMachineScaleSetOutput) ToVirtualMachineScaleSetOutput() VirtualMachineScaleSetOutput {
return o
}
func (o VirtualMachineScaleSetOutput) ToVirtualMachineScaleSetOutputWithContext(ctx context.Context) VirtualMachineScaleSetOutput {
return o
}
func init() {
pulumi.RegisterOutputType(VirtualMachineScaleSetOutput{})
}
| GetVirtualMachineScaleSet |
label_create.go | package cmd
import (
"github.com/MakeNowJust/heredoc/v2"
"github.com/rsteube/carapace"
"github.com/spf13/cobra"
gitlab "github.com/xanzy/go-gitlab"
"github.com/zaquestion/lab/internal/action"
lab "github.com/zaquestion/lab/internal/gitlab"
)
var labelCreateCmd = &cobra.Command{
Use: "create [remote] <name>",
Aliases: []string{"add"},
Short: "Create a new label",
Example: heredoc.Doc(`
lab label create my-label
lab label create --color cornflowerblue --description "Blue as a cornflower" blue
lab label create --color #6495ed --description "Also blue as a cornflower" blue2`),
PersistentPreRun: labPersistentPreRun,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
rn, name, err := parseArgsRemoteAndProject(args)
if err != nil {
log.Fatal(err)
}
color, err := cmd.Flags().GetString("color")
if err != nil {
log.Fatal(err)
}
desc, err := cmd.Flags().GetString("description")
if err != nil {
log.Fatal(err)
}
err = lab.LabelCreate(rn, &gitlab.CreateLabelOptions{
Name: &name,
Description: &desc,
Color: &color,
})
if err != nil {
log.Fatal(err)
}
},
}
func init() | {
labelCreateCmd.Flags().String("color", "#428BCA", "color of the new label in HTML hex notation or CSS color name")
labelCreateCmd.Flags().String("description", "", "description of the new label")
labelCmd.AddCommand(labelCreateCmd)
carapace.Gen(labelCmd).PositionalCompletion(
action.Remotes(),
)
} |
|
jobs.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Periodic (background) jobs.
//!
//! ## Record Persistence & Expiry
//!
//! To ensure persistence of records in the DHT, a Kademlia node
//! must periodically (re-)publish and (re-)replicate its records:
//!
//! 1. (Re-)publishing: The original publisher or provider of a record
//! must regularly re-publish in order to prolong the expiration.
//!
//! 2. (Re-)replication: Every node storing a replica of a record must
//! regularly re-replicate it to the closest nodes to the key in
//! order to ensure the record is present at these nodes.
//!
//! Re-publishing primarily ensures persistence of the record beyond its
//! initial TTL, for as long as the publisher stores (or provides) the record,
//! whilst (re-)replication primarily ensures persistence for the duration
//! of the TTL in the light of topology changes. Consequently, replication
//! intervals should be shorter than publication intervals and
//! publication intervals should be shorter than the TTL.
//!
//! This module implements two periodic jobs:
//!
//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of
//! regular (value-)records.
//!
//! * [`AddProviderJob`]: For (re-)publication of provider records.
//! Provider records currently have no separate replication mechanism.
//!
//! A periodic job is driven like a `Future` or `Stream` by `poll`ing it.
//! Once a job starts running it emits records to send to the `k` closest
//! nodes to the key, where `k` is the replication factor.
//!
//! Furthermore, these jobs perform double-duty by removing expired records
//! from the `RecordStore` on every run. Expired records are never emitted
//! by the jobs.
//!
//! > **Note**: The current implementation takes a snapshot of the records
//! > to replicate from the `RecordStore` when it starts and thus, to account
//! > for the worst case, it temporarily requires additional memory proportional
//! > to the size of all stored records. As a job runs, the records are moved
//! > out of the job to the consumer, where they can be dropped after being sent.
use crate::record::{self, Record, ProviderRecord, store::RecordStore};
use libp2p_core::PeerId;
use futures::prelude::*;
use std::collections::HashSet;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::vec;
use wasm_timer::{Instant, Delay};
/// The maximum number of queries towards which background jobs
/// are allowed to start new queries on an invocation of
/// `Kademlia::poll`.
pub const JOBS_MAX_QUERIES: usize = 100;
/// The maximum number of new queries started by a background job
/// per invocation of `Kademlia::poll`.
pub const JOBS_MAX_NEW_QUERIES: usize = 10;
/// A background job run periodically.
#[derive(Debug)]
struct PeriodicJob<T> {
interval: Duration,
state: PeriodicJobState<T>,
}
impl<T> PeriodicJob<T> {
fn | (&self) -> bool {
match self.state {
PeriodicJobState::Running(..) => true,
PeriodicJobState::Waiting(..) => false,
}
}
/// Cuts short the remaining delay, if the job is currently waiting
/// for the delay to expire.
fn asap(&mut self) {
if let PeriodicJobState::Waiting(delay, deadline) = &mut self.state {
let new_deadline = Instant::now() - Duration::from_secs(1);
*deadline = new_deadline;
delay.reset_at(new_deadline);
}
}
/// Returns `true` if the job is currently not running but ready
/// to be run, `false` otherwise.
fn is_ready(&mut self, cx: &mut Context, now: Instant) -> bool {
if let PeriodicJobState::Waiting(delay, deadline) = &mut self.state {
if now >= *deadline || !Future::poll(Pin::new(delay), cx).is_pending() {
return true
}
}
false
}
}
/// The state of a background job run periodically.
#[derive(Debug)]
enum PeriodicJobState<T> {
Running(T),
Waiting(Delay, Instant)
}
//////////////////////////////////////////////////////////////////////////////
// PutRecordJob
/// Periodic job for replicating / publishing records.
pub struct PutRecordJob {
local_id: PeerId,
next_publish: Option<Instant>,
publish_interval: Option<Duration>,
record_ttl: Option<Duration>,
skipped: HashSet<record::Key>,
inner: PeriodicJob<vec::IntoIter<Record>>,
}
impl PutRecordJob {
/// Creates a new periodic job for replicating and re-publishing
/// locally stored records.
pub fn new(
local_id: PeerId,
replicate_interval: Duration,
publish_interval: Option<Duration>,
record_ttl: Option<Duration>,
) -> Self {
let now = Instant::now();
let deadline = now + replicate_interval;
let delay = Delay::new_at(deadline);
let next_publish = publish_interval.map(|i| now + i);
Self {
local_id,
next_publish,
publish_interval,
record_ttl,
skipped: HashSet::new(),
inner: PeriodicJob {
interval: replicate_interval,
state: PeriodicJobState::Waiting(delay, deadline)
}
}
}
/// Adds the key of a record that is ignored on the current or
/// next run of the job.
pub fn skip(&mut self, key: record::Key) {
self.skipped.insert(key);
}
/// Checks whether the job is currently running.
pub fn is_running(&self) -> bool {
self.inner.is_running()
}
/// Cuts short the remaining delay, if the job is currently waiting
/// for the delay to expire.
///
/// The job is guaranteed to run on the next invocation of `poll`.
pub fn asap(&mut self, publish: bool) {
if publish {
self.next_publish = Some(Instant::now() - Duration::from_secs(1))
}
self.inner.asap()
}
/// Polls the job for records to replicate.
///
/// Must be called in the context of a task. When `NotReady` is returned,
/// the current task is registered to be notified when the job is ready
/// to be run.
pub fn poll<T>(&mut self, cx: &mut Context, store: &mut T, now: Instant) -> Poll<Record>
where
for<'a> T: RecordStore<'a>
{
if self.inner.is_ready(cx, now) {
let publish = self.next_publish.map_or(false, |t_pub| now >= t_pub);
let records = store.records()
.filter_map(|r| {
let is_publisher = r.publisher.as_ref() == Some(&self.local_id);
if self.skipped.contains(&r.key) || (!publish && is_publisher) {
None
} else {
let mut record = r.into_owned();
if publish && is_publisher {
record.expires = record.expires.or_else(||
self.record_ttl.map(|ttl| now + ttl));
}
Some(record)
}
})
.collect::<Vec<_>>()
.into_iter();
// Schedule the next publishing run.
if publish {
self.next_publish = self.publish_interval.map(|i| now + i);
}
self.skipped.clear();
self.inner.state = PeriodicJobState::Running(records);
}
if let PeriodicJobState::Running(records) = &mut self.inner.state {
loop {
if let Some(r) = records.next() {
if r.is_expired(now) {
store.remove(&r.key)
} else {
return Poll::Ready(r)
}
} else {
break
}
}
// Wait for the next run.
let deadline = now + self.inner.interval;
let delay = Delay::new_at(deadline);
self.inner.state = PeriodicJobState::Waiting(delay, deadline);
assert!(!self.inner.is_ready(cx, now));
}
Poll::Pending
}
}
//////////////////////////////////////////////////////////////////////////////
// AddProviderJob
/// Periodic job for replicating provider records.
pub struct AddProviderJob {
inner: PeriodicJob<vec::IntoIter<ProviderRecord>>
}
impl AddProviderJob {
/// Creates a new periodic job for provider announcements.
pub fn new(interval: Duration) -> Self {
let now = Instant::now();
Self {
inner: PeriodicJob {
interval,
state: {
let deadline = now + interval;
PeriodicJobState::Waiting(Delay::new_at(deadline), deadline)
}
}
}
}
/// Checks whether the job is currently running.
pub fn is_running(&self) -> bool {
self.inner.is_running()
}
/// Cuts short the remaining delay, if the job is currently waiting
/// for the delay to expire.
///
/// The job is guaranteed to run on the next invocation of `poll`.
pub fn asap(&mut self) {
self.inner.asap()
}
/// Polls the job for provider records to replicate.
///
/// Must be called in the context of a task. When `NotReady` is returned,
/// the current task is registered to be notified when the job is ready
/// to be run.
pub fn poll<T>(&mut self, cx: &mut Context, store: &mut T, now: Instant) -> Poll<ProviderRecord>
where
for<'a> T: RecordStore<'a>
{
if self.inner.is_ready(cx, now) {
let records = store.provided()
.map(|r| r.into_owned())
.collect::<Vec<_>>()
.into_iter();
self.inner.state = PeriodicJobState::Running(records);
}
if let PeriodicJobState::Running(keys) = &mut self.inner.state {
loop {
if let Some(r) = keys.next() {
if r.is_expired(now) {
store.remove_provider(&r.key, &r.provider)
} else {
return Poll::Ready(r)
}
} else {
break
}
}
let deadline = now + self.inner.interval;
let delay = Delay::new_at(deadline);
self.inner.state = PeriodicJobState::Waiting(delay, deadline);
assert!(!self.inner.is_ready(cx, now));
}
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use crate::record::store::MemoryStore;
use futures::{executor::block_on, future::poll_fn};
use quickcheck::*;
use rand::Rng;
use super::*;
fn rand_put_record_job() -> PutRecordJob {
let mut rng = rand::thread_rng();
let id = PeerId::random();
let replicate_interval = Duration::from_secs(rng.gen_range(1, 60));
let publish_interval = Some(replicate_interval * rng.gen_range(1, 10));
let record_ttl = Some(Duration::from_secs(rng.gen_range(1, 600)));
PutRecordJob::new(id.clone(), replicate_interval, publish_interval, record_ttl)
}
fn rand_add_provider_job() -> AddProviderJob {
let mut rng = rand::thread_rng();
let interval = Duration::from_secs(rng.gen_range(1, 60));
AddProviderJob::new(interval)
}
#[test]
fn new_job_not_running() {
let job = rand_put_record_job();
assert!(!job.is_running());
let job = rand_add_provider_job();
assert!(!job.is_running());
}
#[test]
fn run_put_record_job() {
fn prop(records: Vec<Record>) {
let mut job = rand_put_record_job();
// Fill a record store.
let mut store = MemoryStore::new(job.local_id.clone());
for r in records {
let _ = store.put(r);
}
block_on(poll_fn(|ctx| {
let now = Instant::now() + job.inner.interval;
// All (non-expired) records in the store must be yielded by the job.
for r in store.records().map(|r| r.into_owned()).collect::<Vec<_>>() {
if !r.is_expired(now) {
assert_eq!(job.poll(ctx, &mut store, now), Poll::Ready(r));
assert!(job.is_running());
}
}
assert_eq!(job.poll(ctx, &mut store, now), Poll::Pending);
assert!(!job.is_running());
Poll::Ready(())
}));
}
quickcheck(prop as fn(_))
}
#[test]
fn run_add_provider_job() {
fn prop(records: Vec<ProviderRecord>) {
let mut job = rand_add_provider_job();
let id = PeerId::random();
// Fill a record store.
let mut store = MemoryStore::new(id.clone());
for mut r in records {
r.provider = id.clone();
let _ = store.add_provider(r);
}
block_on(poll_fn(|ctx| {
let now = Instant::now() + job.inner.interval;
// All (non-expired) records in the store must be yielded by the job.
for r in store.provided().map(|r| r.into_owned()).collect::<Vec<_>>() {
if !r.is_expired(now) {
assert_eq!(job.poll(ctx, &mut store, now), Poll::Ready(r));
assert!(job.is_running());
}
}
assert_eq!(job.poll(ctx, &mut store, now), Poll::Pending);
assert!(!job.is_running());
Poll::Ready(())
}));
}
quickcheck(prop as fn(_))
}
}
| is_running |
test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Caffe testcases
====================
This article is a test script to test Caffe operator with Relay.
"""
import os
os.environ["GLOG_minloglevel"] = "2"
import sys
import logging
logging.basicConfig(level=logging.ERROR)
import numpy as np
from google.protobuf import text_format
import caffe
from caffe import layers as L, params as P
from caffe.proto import caffe_pb2 as pb
import tvm
from tvm import relay
from tvm.contrib import utils, graph_executor
from tvm.contrib.download import download_testdata
CURRENT_DIR = os.path.join(os.path.expanduser("~"), ".tvm_test_data", "caffe_test")
#######################################################################
# Generic functions for TVM & Caffe
# ------------------------------------------
def _create_dir(d_path):
"""If the directory is not existed, create it"""
if not (os.path.exists(d_path) and os.path.isdir(d_path)):
os.makedirs(d_path)
def _list_to_str(ll):
"""Convert list or tuple to str, separated by underline."""
if isinstance(ll, (tuple, list)):
tmp = [str(i) for i in ll]
return "_".join(tmp)
def _gen_filename_str(op_name, data_shape, *args, **kwargs):
"""Combining the filename according to the op_name, shape and other args."""
file_dir = os.path.join(CURRENT_DIR, op_name)
_create_dir(file_dir)
res = op_name + "_"
shape_str = _list_to_str(list(data_shape))
res += shape_str
for arg in args:
if isinstance(arg, (tuple, list)):
res += "_" + _list_to_str(arg)
elif isinstance(arg, (int, float, str)):
res += "_" + str(arg)
for _, v in kwargs.items():
if isinstance(v, (tuple, list)):
res += "_" + _list_to_str(v)
elif isinstance(v, (int, float, str)):
res += "_" + str(v)
res = res.replace(".", "_")
res = res.replace("-", "_")
proto_file = os.path.join(file_dir, res + ".prototxt")
blob_file = os.path.join(file_dir, res + ".caffemodel")
solver_file = os.path.join(file_dir, res + "_solver.prototxt")
return (proto_file, blob_file, solver_file)
def _save_prototxt(n_netspec, f_path):
"""Generate .prototxt file according to caffe.NetSpec"""
s = n_netspec.to_proto()
with open(f_path, "w") as f:
f.write(str(s))
def _save_solver(solver_file, proto_file, blob_file):
"""Define a solver proto, you can change the configs."""
blob_file_prefix = blob_file.split(".caffemodel")[0]
s = pb.SolverParameter()
s.train_net = proto_file
s.base_lr = 0.01
s.momentum = 0.9
s.weight_decay = 0.0005
s.lr_policy = "inv"
s.gamma = 0.0001
s.power = 0.75
s.display = 1
s.max_iter = 100000
s.snapshot = 100000
s.snapshot_prefix = blob_file_prefix
with open(solver_file, "w") as f:
f.write(str(s))
def _save_caffemodel(solver_file, blob_file):
"""Generate .caffemodel file."""
solver = caffe.SGDSolver(solver_file)
solver.net.save(blob_file)
def _gen_model_files(n_netspec, proto_file, blob_file, solver_file):
_save_prototxt(n_netspec, proto_file)
_save_solver(solver_file, proto_file, blob_file)
_save_caffemodel(solver_file, blob_file)
def _siso_op(data, func, *args, **kwargs):
"""Create single input and single output Caffe op"""
n = caffe.NetSpec()
n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}})
n.output = func(n.data, *args, **kwargs)
return n
def _miso_op(data_list, func, *args, **kwargs):
"""Create multi input and single output Caffe op"""
n = caffe.NetSpec()
if not isinstance(data_list, (tuple, list)):
raise TypeError("Need tuple or list but get {}".format(type(data_list)))
input_list = list()
for idx, data in enumerate(data_list):
n["data" + str(idx)] = L.Input(input_param={"shape": {"dim": list(data.shape)}})
input_list.append(n["data" + str(idx)])
n.output = func(*input_list, *args, **kwargs)
return n
def _simo_op(data, func, *args, **kwargs):
"""Create single input and multi output Caffe op"""
n = caffe.NetSpec()
n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}})
output_list = func(n.data, *args, **kwargs)
for idx, out in enumerate(output_list):
n["output" + str(idx)] = out
return n
def _run_caffe(data, proto_file, blob_file):
"""Run caffe model by Caffe according to .caffemodel and .prototxt"""
net = caffe.Net(proto_file, blob_file, caffe.TEST)
if isinstance(data, (list, tuple)):
for idx, d in enumerate(data):
net.blobs["data" + str(idx)].data[...] = d
else:
net.blobs["data"].data[...] = data
out = net.forward()
caffe_output = list()
for i in range(len(out.keys())):
if "output" + str(i) not in out.keys():
caffe_output.clear()
return list(out.values())
caffe_output.append(out["output" + str(i)])
return caffe_output
def _run_tvm(data, proto_file, blob_file):
"""Run caffe model by TVM according to .caffemodel and .prototxt"""
init_net = pb.NetParameter()
predict_net = pb.NetParameter()
# load model
with open(proto_file, "r") as f:
text_format.Merge(f.read(), predict_net)
# load blob
with open(blob_file, "rb") as f:
init_net.ParseFromString(f.read())
shape_dict = dict()
dtype_dict = dict()
if isinstance(data, (tuple, list)):
for idx, d in enumerate(data):
shape_dict["data" + str(idx)] = d.shape
dtype_dict["data" + str(idx)] = "float32"
else:
shape_dict = {"data": data.shape}
dtype_dict = {"data": "float32"}
mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)
target = "llvm"
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
if isinstance(data, (tuple, list)):
for idx, d in enumerate(data):
m.set_input("data" + str(idx), tvm.nd.array(d.astype(dtype)))
else:
m.set_input("data", tvm.nd.array(data.astype(dtype)))
# execute
m.run()
tvm_output = list()
# get outputs
for i in range(m.get_num_outputs()):
tvm_output.append(m.get_output(i).numpy())
return tvm_output
def _compare_caffe_tvm(caffe_out, tvm_out, is_network=False):
for i in range(len(caffe_out)):
if is_network:
caffe_out[i] = caffe_out[i][:1]
tvm.testing.assert_allclose(caffe_out[i], tvm_out[i], rtol=1e-5, atol=1e-5)
def _test_op(data, func_op, op_name, **kwargs):
"""Single op testing pipline."""
shape_list = list()
if isinstance(data, (list, tuple)):
n = _miso_op(data, func_op, **kwargs)
for d in data:
shape_list.extend(list(d.shape))
else:
output_num = 1
if "ntop" in kwargs.keys():
output_num = kwargs["ntop"]
if output_num == 1:
n = _siso_op(data, func_op, **kwargs)
else:
n = _simo_op(data, func_op, **kwargs)
shape_list = list(data.shape)
# obtain the .caffemodel file and .prototxt file
(proto_file, blob_file, solver_file) = _gen_filename_str(op_name, shape_list, **kwargs)
_gen_model_files(n, proto_file, blob_file, solver_file)
# run model in Caffe
caffe_out = _run_caffe(data, proto_file, blob_file)
# run model in TVM
tvm_out = _run_tvm(data, proto_file, blob_file)
_compare_caffe_tvm(caffe_out, tvm_out)
def _test_network(data, proto_file, blob_file):
# run model in Caffe
caffe_out = _run_caffe(data, proto_file, blob_file)
# run model in TVM
tvm_out = _run_tvm(data, proto_file, blob_file)
_compare_caffe_tvm(caffe_out, tvm_out, is_network=True)
#######################################################################
# BatchNorm
# -----------
def _test_batchnorm(data, moving_average_fraction=0.999, eps=1e-5):
"""One iteration of BatchNorm"""
_test_op(
data, L.BatchNorm, "BatchNorm", moving_average_fraction=moving_average_fraction, eps=eps
)
def | ():
"""BatchNorm"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_batchnorm(data)
_test_batchnorm(data, moving_average_fraction=0.88, eps=1e-4)
#######################################################################
# Concat
# -----------
def _test_concat(data_list, axis=1):
"""One iteration of Concat"""
_test_op(data_list, L.Concat, "Concat", axis=axis)
def test_forward_Concat():
"""Concat"""
_test_concat([np.random.rand(1, 3, 10, 10), np.random.rand(1, 2, 10, 10)], axis=1)
_test_concat([np.random.rand(3, 10, 10), np.random.rand(2, 10, 10)], axis=0)
_test_concat([np.random.rand(3, 10), np.random.rand(2, 10)], axis=0)
#######################################################################
# Convolution
# -----------
def _test_convolution(data, **kwargs):
"""One iteration of Convolution"""
_test_op(data, L.Convolution, "Convolution", **kwargs)
def test_forward_Convolution():
"""Convolution"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad=0,
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
data,
num_output=20,
bias_term=False,
pad=[1, 2],
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad=[1, 2],
kernel_size=[3, 5],
stride=[2, 1],
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
np.random.rand(1, 2, 10, 10).astype(np.float32),
num_output=20,
bias_term=True,
pad=[1, 2],
kernel_size=[3, 5],
stride=[2, 1],
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
group=2,
)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad_h=1,
pad_w=2,
kernel_h=3,
kernel_w=5,
stride_h=2,
stride_w=1,
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# Crop
# -----------
def _test_crop(data, **kwargs):
"""One iteration of Crop"""
_test_op(data, L.Crop, "Crop", **kwargs)
def test_forward_Crop():
"""Crop"""
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)
_test_crop(
[np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]
)
_test_crop(
[np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]
)
_test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])
_test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])
#######################################################################
# Deconvolution
# -----------
def _test_deconvolution(data, **kwargs):
"""One iteration of Deconvolution"""
_test_op(data, L.Deconvolution, "Deconvolution", **kwargs)
def test_forward_Deconvolution():
"""Deconvolution"""
data = np.random.rand(1, 16, 32, 32).astype(np.float32)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=True,
pad=0,
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=False,
pad=[1, 2],
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=True,
pad_h=1,
pad_w=2,
kernel_h=3,
kernel_w=5,
stride_h=2,
stride_w=1,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
#######################################################################
# Dropout
# -----------
def _test_dropout(data, **kwargs):
"""One iteration of Dropout"""
_test_op(data, L.Dropout, "Dropout", **kwargs)
def test_forward_Dropout():
"""Dropout"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_dropout(data)
_test_dropout(data, dropout_ratio=0.7)
#######################################################################
# Eltwise
# -----------
def _test_eltwise(data_list, **kwargs):
"""One iteration of Eltwise"""
_test_op(data_list, L.Eltwise, "Eltwise", **kwargs)
def test_forward_Eltwise():
"""Eltwise"""
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=0,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=2,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
coeff=[0.5, 1],
)
#######################################################################
# Flatten
# -----------
def _test_flatten(data, axis=1):
"""One iteration of Flatten"""
_test_op(data, L.Flatten, "Flatten", axis=axis)
def test_forward_Flatten():
"""Flatten"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_flatten(data)
_test_flatten(data, axis=1)
#######################################################################
# Flatten
# -----------
def _test_inner_product(data, **kwargs):
"""One iteration of InnerProduct"""
_test_op(data, L.InnerProduct, "InnerProduct", **kwargs)
def test_forward_InnerProduct():
"""InnerProduct"""
data = np.random.rand(1, 3, 10, 10)
_test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type="xavier"))
_test_inner_product(
data,
num_output=20,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_inner_product(
np.random.rand(20, 10).astype(np.float32),
num_output=30,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# LRN
# -----------
def _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):
"""One iteration of LRN"""
_test_op(data, L.LRN, "LRN", local_size=local_size, alpha=alpha, beta=beta, k=k)
def test_forward_LRN():
"""LRN"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_lrn(data)
_test_lrn(data, local_size=3)
_test_lrn(data, local_size=3, alpha=2.0)
_test_lrn(
data,
local_size=3,
alpha=2.0,
beta=0.5,
)
_test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)
#######################################################################
# Pooling
# -----------
def _test_pooling(data, **kwargs):
"""One iteration of Pooling."""
_test_op(data, L.Pooling, "Pooling", **kwargs)
def test_forward_Pooling():
"""Pooing"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
# MAX Pooling
_test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)
_test_pooling(
data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX
)
_test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)
# AVE Pooing
_test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)
_test_pooling(
data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE
)
_test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)
#######################################################################
# PReLU
# -----------
def _test_prelu(data, **kwargs):
"""One iteration of PReLU."""
_test_op(data, L.PReLU, "PReLU", **kwargs)
def test_forward_PReLU():
"""PReLU"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_prelu(data, filler=dict(type="constant", value=0.5))
_test_prelu(data)
_test_prelu(np.random.rand(10, 20).astype(np.float32))
#######################################################################
# ReLU
# -----------
def _test_relu(data, **kwargs):
"""One iteration of ReLU."""
_test_op(data, L.ReLU, "ReLU", **kwargs)
def test_forward_ReLU():
"""ReLU"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_relu(data)
_test_relu(np.random.rand(10, 20).astype(np.float32))
#######################################################################
# Reshape
# -----------
def _test_reshape(data, **kwargs):
"""One iteration of Reshape."""
_test_op(data, L.Reshape, "Reshape", **kwargs)
def test_forward_Reshape():
"""Reshape"""
data = np.random.rand(1, 8, 6).astype(np.float32)
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 0, 3]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 0, -1]}})
_test_reshape(data, reshape_param={"shape": {"dim": [0, -1]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 3]}, "axis": 2})
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": 1})
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": -3})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 4]}, "axis": 1, "num_axes": 1})
_test_reshape(data, reshape_param={"shape": {"dim": [3, 16]}, "axis": 1, "num_axes": 2})
#######################################################################
# Scale
# -----------
def _test_scale(data, **kwargs):
"""One iteration of Scale."""
_test_op(data, L.Scale, "Scale", **kwargs)
def test_forward_Scale():
"""Scale"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_scale(data, filler=dict(type="xavier"))
_test_scale(data, filler=dict(type="xavier"), bias_term=True, bias_filler=dict(type="xavier"))
#######################################################################
# Sigmoid
# -----------
def _test_sigmoid(data, **kwargs):
"""One iteration of Sigmoid."""
_test_op(data, L.Sigmoid, "Sigmoid", **kwargs)
def test_forward_Sigmoid():
"""Sigmoid"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_sigmoid(data)
#######################################################################
# Slice
# -----------
def _test_slice(data, **kwargs):
"""One iteration of Slice"""
_test_op(data, L.Slice, "Slice", **kwargs)
def test_forward_Slice():
"""Slice"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))
_test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))
_test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))
_test_slice(data, ntop=3)
#######################################################################
# Softmax
# -----------
def _test_softmax(data, **kwargs):
"""One iteration of Softmax"""
_test_op(data, L.Softmax, "Softmax", **kwargs)
def test_forward_Softmax():
"""Softmax"""
_test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))
_test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)
_test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)
_test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)
#######################################################################
# TanH
# -----------
def _test_tanh(data, **kwargs):
"""One iteration of TanH"""
_test_op(data, L.TanH, "TanH", **kwargs)
def test_forward_TanH():
"""TanH"""
_test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))
_test_tanh(np.random.rand(3, 10, 10).astype(np.float32))
_test_tanh(np.random.rand(10, 10).astype(np.float32))
_test_tanh(np.random.rand(10).astype(np.float32))
#######################################################################
# Embed
# -----------
def _test_embed(data, **kwargs):
"""One iteration of Embed"""
_test_op(data, L.Embed, "Embed", **kwargs)
def test_forward_Embed():
k = 20
data = [i for i in range(k)]
np.random.shuffle(data)
# dimension is 1
data = np.asarray(data)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 2
data = np.reshape(data, [4, 5])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 3
data = np.reshape(data, [2, 2, 5])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 4
data = np.reshape(data, [2, 2, 5, 1])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# Mobilenetv2
# -----------
def _test_mobilenetv2(data):
"""One iteration of Mobilenetv2"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process / 58.8
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/shicai/MobileNet-Caffe/raw/" "master/mobilenet_v2_deploy.prototxt"
)
blob_file_url = (
"https://github.com/shicai/MobileNet-Caffe/blob/" "master/mobilenet_v2.caffemodel?raw=true"
)
proto_file = download_testdata(proto_file_url, "mobilenetv2.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "mobilenetv2.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Mobilenetv2():
"""Mobilenetv2"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_mobilenetv2(data)
#######################################################################
# Alexnet
# -----------
def _test_alexnet(data):
"""One iteration of Alexnet"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 227, 227))
data_process = data - mean_val
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/BVLC/caffe/raw/master/models/" "bvlc_alexnet/deploy.prototxt"
)
blob_file_url = "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel"
proto_file = download_testdata(proto_file_url, "alexnet.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "alexnet.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Alexnet():
"""Alexnet"""
data = np.random.randint(0, 256, size=(1, 3, 227, 227)).astype(np.float32)
_test_alexnet(data)
#######################################################################
# Resnet50
# -----------
def _test_resnet50(data):
"""One iteration of Resnet50"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/fernchen/CaffeModels/raw/" "master/resnet/ResNet-50-deploy.prototxt"
)
blob_file_url = (
"https://github.com/fernchen/CaffeModels/raw/" "master/resnet/ResNet-50-model.caffemodel"
)
proto_file = download_testdata(proto_file_url, "resnet50.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "resnet50.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Resnet50():
"""Resnet50"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_resnet50(data)
#######################################################################
# Inceptionv4
# -----------
def _test_inceptionv1(data):
"""One iteration of Inceptionv4"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process / 58.8
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/BVLC/caffe/raw/master/models" "/bvlc_googlenet/deploy.prototxt"
)
blob_file_url = "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel"
proto_file = download_testdata(proto_file_url, "inceptionv1.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "inceptionv1.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Inceptionv1():
"""Inceptionv4"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_inceptionv1(data)
if __name__ == "__main__":
# NN
test_forward_Convolution()
test_forward_Deconvolution()
test_forward_Dropout()
test_forward_LRN()
test_forward_Pooling()
test_forward_Scale()
test_forward_InnerProduct()
test_forward_BatchNorm()
# Elemwise
test_forward_Eltwise()
# Activation
test_forward_PReLU()
test_forward_ReLU()
test_forward_Sigmoid()
test_forward_Softmax()
test_forward_TanH()
# Reshape
test_forward_Reshape()
test_forward_Flatten()
# Math
test_forward_Concat()
test_forward_Crop()
test_forward_Slice()
# End to End
test_forward_Mobilenetv2()
test_forward_Alexnet()
test_forward_Resnet50()
test_forward_Inceptionv1()
| test_forward_BatchNorm |
enemy.js | module.exports = (sequelize, DataTypes) => {
const Enemy = sequelize.define('Enemy', { | id: {
type: DataTypes.INTEGER,
allowNull: false,
primaryKey: true,
autoIncrement: true
},
name: {
type: DataTypes.STRING,
allowNull: false
},
health: {
type: DataTypes.INTEGER,
allowNull: false,
defaultValue: 1,
},
power: {
type: DataTypes.INTEGER,
allowNull: false,
defaultValue: 1,
},
defense: {
type: DataTypes.INTEGER,
allowNull: false,
defaultValue: 1,
},
speed: {
type: DataTypes.INTEGER,
allowNull: false,
defaultValue: 1,
},
entranceText: {
type: DataTypes.STRING,
allowNull: true,
},
createdAt: {
type: DataTypes.DATE,
allowNull: false,
defaultValue: DataTypes.NOW
},
updatedAt: {
type: DataTypes.DATE,
allowNull: false,
defaultValue: DataTypes.NOW
}
}, {
tableName: 'Enemy',
timestamps: false
});
return Enemy;
}; | |
signature.rs | //! The `signature` module provides functionality for public, and private keys.
use crate::{pubkey::Pubkey, transaction::TransactionError};
use generic_array::{typenum::U64, GenericArray};
use hmac::Hmac;
use rand::{rngs::OsRng, CryptoRng, RngCore};
use std::{
borrow::{Borrow, Cow},
error, fmt,
fs::{self, File, OpenOptions},
io::{Read, Write},
mem,
path::Path,
str::FromStr,
};
use thiserror::Error;
#[derive(Debug, Default)]
pub struct Keypair(ed25519_dalek::Keypair);
impl Keypair {
pub fn generate<R>(csprng: &mut R) -> Self
where
R: CryptoRng + RngCore,
{
Self(ed25519_dalek::Keypair::generate(csprng))
}
/// Return a new ED25519 keypair
pub fn new() -> Self {
let mut rng = OsRng::new().unwrap();
Self::generate(&mut rng)
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self, ed25519_dalek::SignatureError> {
ed25519_dalek::Keypair::from_bytes(bytes).map(Self)
}
pub fn to_bytes(&self) -> [u8; 64] {
self.0.to_bytes()
}
pub fn secret(&self) -> &ed25519_dalek::SecretKey {
&self.0.secret
}
}
#[repr(transparent)]
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Signature(GenericArray<u8, U64>);
impl crate::sanitize::Sanitize for Signature {}
impl Signature {
pub fn new(signature_slice: &[u8]) -> Self {
Self(GenericArray::clone_from_slice(&signature_slice))
}
pub fn verify(&self, pubkey_bytes: &[u8], message_bytes: &[u8]) -> bool {
let pubkey = ed25519_dalek::PublicKey::from_bytes(pubkey_bytes);
let signature = ed25519_dalek::Signature::from_bytes(self.0.as_slice());
if pubkey.is_err() || signature.is_err() {
return false;
}
pubkey
.unwrap()
.verify(message_bytes, &signature.unwrap())
.is_ok()
}
}
pub trait Signable {
fn sign(&mut self, keypair: &Keypair) {
let signature = keypair.sign_message(self.signable_data().borrow());
self.set_signature(signature);
}
fn verify(&self) -> bool {
self.get_signature()
.verify(&self.pubkey().as_ref(), self.signable_data().borrow())
}
fn pubkey(&self) -> Pubkey;
fn signable_data(&self) -> Cow<[u8]>;
fn get_signature(&self) -> Signature;
fn set_signature(&mut self, signature: Signature);
}
impl AsRef<[u8]> for Signature {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
impl fmt::Display for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
impl Into<[u8; 64]> for Signature {
fn into(self) -> [u8; 64] {
<GenericArray<u8, U64> as Into<[u8; 64]>>::into(self.0)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum ParseSignatureError {
#[error("string decoded to wrong size for signature")]
WrongSize,
#[error("failed to decode string to signature")]
Invalid,
}
impl FromStr for Signature {
type Err = ParseSignatureError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = bs58::decode(s)
.into_vec()
.map_err(|_| ParseSignatureError::Invalid)?;
if bytes.len() != mem::size_of::<Signature>() {
Err(ParseSignatureError::WrongSize)
} else {
Ok(Signature::new(&bytes))
}
}
}
pub trait Signer {
fn pubkey(&self) -> Pubkey {
self.try_pubkey().unwrap_or_default()
}
fn try_pubkey(&self) -> Result<Pubkey, SignerError>;
fn sign_message(&self, message: &[u8]) -> Signature {
self.try_sign_message(message).unwrap_or_default()
}
fn try_sign_message(&self, message: &[u8]) -> Result<Signature, SignerError>;
}
impl PartialEq for dyn Signer {
fn eq(&self, other: &dyn Signer) -> bool {
self.pubkey() == other.pubkey()
}
}
impl std::fmt::Debug for dyn Signer {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "Signer: {:?}", self.pubkey())
}
}
impl Signer for Keypair {
/// Return the public key for the given keypair
fn pubkey(&self) -> Pubkey {
Pubkey::new(self.0.public.as_ref())
}
fn try_pubkey(&self) -> Result<Pubkey, SignerError> {
Ok(self.pubkey())
}
fn sign_message(&self, message: &[u8]) -> Signature {
Signature::new(&self.0.sign(message).to_bytes())
}
fn try_sign_message(&self, message: &[u8]) -> Result<Signature, SignerError> {
Ok(self.sign_message(message))
}
}
impl<T> PartialEq<T> for Keypair
where
T: Signer,
{
fn eq(&self, other: &T) -> bool {
self.pubkey() == other.pubkey()
}
}
impl<T> From<T> for Box<dyn Signer>
where
T: Signer + 'static,
{
fn from(signer: T) -> Self {
Box::new(signer)
}
}
#[derive(Debug, Error, PartialEq)]
pub enum SignerError {
#[error("keypair-pubkey mismatch")]
KeypairPubkeyMismatch,
#[error("not enough signers")]
NotEnoughSigners,
#[error("transaction error")]
TransactionError(#[from] TransactionError),
#[error("custom error: {0}")]
Custom(String),
// Presigner-specific Errors
#[error("presigner error")]
PresignerError(#[from] PresignerError),
// Remote Keypair-specific Errors
#[error("connection error: {0}")]
Connection(String),
#[error("invalid input: {0}")]
InvalidInput(String),
#[error("no device found")]
NoDeviceFound,
#[error("device protocol error: {0}")]
Protocol(String),
#[error("{0}")]
UserCancel(String),
}
#[derive(Clone, Debug, Default)]
pub struct Presigner {
pubkey: Pubkey,
signature: Signature,
}
impl Presigner {
pub fn new(pubkey: &Pubkey, signature: &Signature) -> Self {
Self {
pubkey: *pubkey,
signature: *signature,
}
}
}
#[derive(Debug, Error, PartialEq)]
pub enum PresignerError {
#[error("pre-generated signature cannot verify data")]
VerificationFailure,
}
impl Signer for Presigner {
fn try_pubkey(&self) -> Result<Pubkey, SignerError> {
Ok(self.pubkey)
}
fn try_sign_message(&self, message: &[u8]) -> Result<Signature, SignerError> {
if self.signature.verify(self.pubkey.as_ref(), message) {
Ok(self.signature)
} else {
Err(PresignerError::VerificationFailure.into())
}
}
}
impl<T> PartialEq<T> for Presigner
where
T: Signer,
{
fn eq(&self, other: &T) -> bool {
self.pubkey() == other.pubkey()
}
}
/// NullSigner - A `Signer` implementation that always produces `Signature::default()`.
/// Used as a placeholder for absentee signers whose 'Pubkey` is required to construct
/// the transaction
#[derive(Clone, Debug, Default)]
pub struct NullSigner {
pubkey: Pubkey,
}
impl NullSigner {
pub fn new(pubkey: &Pubkey) -> Self {
Self { pubkey: *pubkey }
}
}
impl Signer for NullSigner {
fn try_pubkey(&self) -> Result<Pubkey, SignerError> {
Ok(self.pubkey)
}
fn try_sign_message(&self, _message: &[u8]) -> Result<Signature, SignerError> {
Ok(Signature::default())
}
}
impl<T> PartialEq<T> for NullSigner
where
T: Signer,
{
fn eq(&self, other: &T) -> bool {
self.pubkey == other.pubkey()
}
}
pub fn read_keypair<R: Read>(reader: &mut R) -> Result<Keypair, Box<dyn error::Error>> {
let bytes: Vec<u8> = serde_json::from_reader(reader)?;
let dalek_keypair = ed25519_dalek::Keypair::from_bytes(&bytes)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
Ok(Keypair(dalek_keypair))
}
pub fn read_keypair_file(path: &str) -> Result<Keypair, Box<dyn error::Error>> {
assert!(path != "-");
let mut file = File::open(path.to_string())?;
read_keypair(&mut file)
}
pub fn write_keypair<W: Write>(
keypair: &Keypair,
writer: &mut W,
) -> Result<String, Box<dyn error::Error>> {
let keypair_bytes = keypair.0.to_bytes();
let serialized = serde_json::to_string(&keypair_bytes.to_vec())?;
writer.write_all(&serialized.clone().into_bytes())?;
Ok(serialized)
}
pub fn write_keypair_file(
keypair: &Keypair,
outfile: &str,
) -> Result<String, Box<dyn error::Error>> {
assert!(outfile != "-");
if let Some(outdir) = Path::new(outfile).parent() {
fs::create_dir_all(outdir)?;
}
let mut f = {
#[cfg(not(unix))]
{
OpenOptions::new()
}
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
OpenOptions::new().mode(0o600)
}
}
.write(true)
.truncate(true)
.create(true)
.open(outfile)?;
write_keypair(keypair, &mut f)
}
pub fn keypair_from_seed(seed: &[u8]) -> Result<Keypair, Box<dyn error::Error>> {
if seed.len() < ed25519_dalek::SECRET_KEY_LENGTH {
return Err("Seed is too short".into());
}
let secret = ed25519_dalek::SecretKey::from_bytes(&seed[..ed25519_dalek::SECRET_KEY_LENGTH])
.map_err(|e| e.to_string())?;
let public = ed25519_dalek::PublicKey::from(&secret);
let dalek_keypair = ed25519_dalek::Keypair { secret, public };
Ok(Keypair(dalek_keypair))
}
| pub fn keypair_from_seed_phrase_and_passphrase(
seed_phrase: &str,
passphrase: &str,
) -> Result<Keypair, Box<dyn error::Error>> {
const PBKDF2_ROUNDS: usize = 2048;
const PBKDF2_BYTES: usize = 64;
let salt = format!("mnemonic{}", passphrase);
let mut seed = vec![0u8; PBKDF2_BYTES];
pbkdf2::pbkdf2::<Hmac<sha2::Sha512>>(
seed_phrase.as_bytes(),
salt.as_bytes(),
PBKDF2_ROUNDS,
&mut seed,
);
keypair_from_seed(&seed[..])
}
#[cfg(test)]
mod tests {
use super::*;
use bip39::{Language, Mnemonic, MnemonicType, Seed};
use std::mem;
fn tmp_file_path(name: &str) -> String {
use std::env;
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let keypair = Keypair::new();
format!("{}/tmp/{}-{}", out_dir, name, keypair.pubkey()).to_string()
}
#[test]
fn test_write_keypair_file() {
let outfile = tmp_file_path("test_write_keypair_file.json");
let serialized_keypair = write_keypair_file(&Keypair::new(), &outfile).unwrap();
let keypair_vec: Vec<u8> = serde_json::from_str(&serialized_keypair).unwrap();
assert!(Path::new(&outfile).exists());
assert_eq!(
keypair_vec,
read_keypair_file(&outfile).unwrap().0.to_bytes().to_vec()
);
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
assert_eq!(
File::open(&outfile)
.expect("open")
.metadata()
.expect("metadata")
.permissions()
.mode()
& 0o777,
0o600
);
}
assert_eq!(
read_keypair_file(&outfile).unwrap().pubkey().as_ref().len(),
mem::size_of::<Pubkey>()
);
fs::remove_file(&outfile).unwrap();
assert!(!Path::new(&outfile).exists());
}
#[test]
fn test_write_keypair_file_overwrite_ok() {
let outfile = tmp_file_path("test_write_keypair_file_overwrite_ok.json");
write_keypair_file(&Keypair::new(), &outfile).unwrap();
write_keypair_file(&Keypair::new(), &outfile).unwrap();
}
#[test]
fn test_write_keypair_file_truncate() {
let outfile = tmp_file_path("test_write_keypair_file_truncate.json");
write_keypair_file(&Keypair::new(), &outfile).unwrap();
read_keypair_file(&outfile).unwrap();
// Ensure outfile is truncated
{
let mut f = File::create(&outfile).unwrap();
f.write_all(String::from_utf8([b'a'; 2048].to_vec()).unwrap().as_bytes())
.unwrap();
}
write_keypair_file(&Keypair::new(), &outfile).unwrap();
read_keypair_file(&outfile).unwrap();
}
#[test]
fn test_keypair_from_seed() {
let good_seed = vec![0; 32];
assert!(keypair_from_seed(&good_seed).is_ok());
let too_short_seed = vec![0; 31];
assert!(keypair_from_seed(&too_short_seed).is_err());
}
#[test]
fn test_signature_fromstr() {
let signature = Keypair::new().sign_message(&[0u8]);
let mut signature_base58_str = bs58::encode(signature).into_string();
assert_eq!(signature_base58_str.parse::<Signature>(), Ok(signature));
signature_base58_str.push_str(&bs58::encode(signature.0).into_string());
assert_eq!(
signature_base58_str.parse::<Signature>(),
Err(ParseSignatureError::WrongSize)
);
signature_base58_str.truncate(signature_base58_str.len() / 2);
assert_eq!(signature_base58_str.parse::<Signature>(), Ok(signature));
signature_base58_str.truncate(signature_base58_str.len() / 2);
assert_eq!(
signature_base58_str.parse::<Signature>(),
Err(ParseSignatureError::WrongSize)
);
let mut signature_base58_str = bs58::encode(signature.0).into_string();
assert_eq!(signature_base58_str.parse::<Signature>(), Ok(signature));
// throw some non-base58 stuff in there
signature_base58_str.replace_range(..1, "I");
assert_eq!(
signature_base58_str.parse::<Signature>(),
Err(ParseSignatureError::Invalid)
);
}
#[test]
fn test_keypair_from_seed_phrase_and_passphrase() {
let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English);
let passphrase = "42";
let seed = Seed::new(&mnemonic, passphrase);
let expected_keypair = keypair_from_seed(seed.as_bytes()).unwrap();
let keypair =
keypair_from_seed_phrase_and_passphrase(mnemonic.phrase(), passphrase).unwrap();
assert_eq!(keypair.pubkey(), expected_keypair.pubkey());
}
#[test]
fn test_keypair() {
let keypair = keypair_from_seed(&[0u8; 32]).unwrap();
let pubkey = keypair.pubkey();
let data = [1u8];
let sig = keypair.sign_message(&data);
// Signer
assert_eq!(keypair.try_pubkey().unwrap(), pubkey);
assert_eq!(keypair.pubkey(), pubkey);
assert_eq!(keypair.try_sign_message(&data).unwrap(), sig);
assert_eq!(keypair.sign_message(&data), sig);
// PartialEq
let keypair2 = keypair_from_seed(&[0u8; 32]).unwrap();
assert_eq!(keypair, keypair2);
}
#[test]
fn test_presigner() {
let keypair = keypair_from_seed(&[0u8; 32]).unwrap();
let pubkey = keypair.pubkey();
let data = [1u8];
let sig = keypair.sign_message(&data);
// Signer
let presigner = Presigner::new(&pubkey, &sig);
assert_eq!(presigner.try_pubkey().unwrap(), pubkey);
assert_eq!(presigner.pubkey(), pubkey);
assert_eq!(presigner.try_sign_message(&data).unwrap(), sig);
assert_eq!(presigner.sign_message(&data), sig);
let bad_data = [2u8];
assert!(presigner.try_sign_message(&bad_data).is_err());
assert_eq!(presigner.sign_message(&bad_data), Signature::default());
// PartialEq
assert_eq!(presigner, keypair);
assert_eq!(keypair, presigner);
let presigner2 = Presigner::new(&pubkey, &sig);
assert_eq!(presigner, presigner2);
}
} | |
c_d_r_management_api_client.go | // Code generated by go-swagger; DO NOT EDIT.
package client
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"net/url"
| "github.com/go-openapi/strfmt"
"github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr/client/status_management"
"github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr/client/trigger_management"
"github.com/Cyclops-Labs/cyclops-4-hpc.git/services/cdr/client/usage_management"
)
const (
// DefaultHost is the default Host
// found in Meta (info) section of spec file
DefaultHost string = "localhost:8000"
// DefaultBasePath is the default BasePath
// found in Meta (info) section of spec file
DefaultBasePath string = "/api/v1.0"
)
// DefaultSchemes are the default schemes found in Meta (info) section of spec file
var DefaultSchemes = []string{"http", "https"}
type Config struct {
// URL is the base URL of the upstream server
URL *url.URL
// Transport is an inner transport for the client
Transport http.RoundTripper
// AuthInfo is for authentication
AuthInfo runtime.ClientAuthInfoWriter
}
// New creates a new c d r management API HTTP client.
func New(c Config) *CDRManagementAPI {
var (
host = DefaultHost
basePath = DefaultBasePath
schemes = DefaultSchemes
)
if c.URL != nil {
host = c.URL.Host
basePath = c.URL.Path
schemes = []string{c.URL.Scheme}
}
transport := rtclient.New(host, basePath, schemes)
if c.Transport != nil {
transport.Transport = c.Transport
}
cli := new(CDRManagementAPI)
cli.Transport = transport
cli.StatusManagement = status_management.New(transport, strfmt.Default, c.AuthInfo)
cli.TriggerManagement = trigger_management.New(transport, strfmt.Default, c.AuthInfo)
cli.UsageManagement = usage_management.New(transport, strfmt.Default, c.AuthInfo)
return cli
}
// CDRManagementAPI is a client for c d r management API
type CDRManagementAPI struct {
StatusManagement *status_management.Client
TriggerManagement *trigger_management.Client
UsageManagement *usage_management.Client
Transport runtime.ClientTransport
} | "github.com/go-openapi/runtime"
rtclient "github.com/go-openapi/runtime/client" |
lib.rs | // Copyright (c) 2017 Martijn Rijkeboer <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Library for hashing passwords using
//! [Argon2](https://github.com/P-H-C/phc-winner-argon2), the password-hashing
//! function that won the
//! [Password Hashing Competition (PHC)](https://password-hashing.net).
//!
//! # Usage
//!
//! To use this crate, add the following to your Cargo.toml:
//!
//! ```toml
//! [dependencies]
//! rust-argon2 = "0.7"
//! ```
//!
//! And the following to your crate root:
//!
//! ```rust
//! extern crate argon2;
//! ```
//!
//! # Examples
//!
//! Create a password hash using the defaults and verify it:
//!
//! ```rust
//! use argon2::{self, Config};
//!
//! let password = b"password";
//! let salt = b"randomsalt";
//! let config = Config::default();
//! let hash = argon2::hash_encoded(password, salt, &config).unwrap();
//! let matches = argon2::verify_encoded(&hash, password).unwrap();
//! assert!(matches);
//! ```
//!
//! Create a password hash with custom settings and verify it:
//!
//! ```rust
//! use argon2::{self, Config, ThreadMode, Variant, Version};
//!
//! let password = b"password";
//! let salt = b"othersalt";
//! let config = Config {
//! variant: Variant::Argon2i,
//! version: Version::Version13,
//! mem_cost: 65536,
//! time_cost: 10,
//! lanes: 4,
//! thread_mode: ThreadMode::Parallel,
//! secret: &[],
//! ad: &[],
//! hash_length: 32
//! };
//! let hash = argon2::hash_encoded(password, salt, &config).unwrap();
//! let matches = argon2::verify_encoded(&hash, password).unwrap();
//! assert!(matches);
//! ```
//!
//! # Limitations
//!
//! This crate has the same limitation as the `blake2-rfc` crate that it uses. | //! require help from the compiler. It's better to not attempt to do so than to
//! present a false assurance.
//!
//! This version uses the standard implementation and does not yet implement
//! optimizations. Therefore, it is not the fastest implementation available.
mod argon2;
mod block;
mod common;
mod config;
mod context;
mod core;
mod decoded;
mod encoding;
mod error;
mod memory;
mod result;
mod thread_mode;
mod variant;
mod version;
pub use crate::argon2::*;
pub use crate::config::Config;
pub use crate::error::Error;
pub use crate::result::Result;
pub use crate::thread_mode::ThreadMode;
pub use crate::variant::Variant;
pub use crate::version::Version; | //! It does not attempt to clear potentially sensitive data from its work
//! memory. To do so correctly without a heavy performance penalty would |
Min_test.go | // =================================================================
//
// Copyright (C) 2019 Spatial Current, Inc. - All Rights Reserved
// Released as open source under the MIT License. See LICENSE file.
//
// =================================================================
package math
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestMinUInt8s(t *testing.T) {
in := []uint8{2, 1, 3}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, uint8(1), out)
}
func TestMinInts(t *testing.T) {
in := []int{2, 1, 3}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, 1, out)
}
func TestMinInt32s(t *testing.T) {
in := []int32{2, 1, 3}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, int32(1), out)
}
func TestMinInt64s(t *testing.T) {
in := []int64{2, 1, 3}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, int64(1), out)
}
func | (t *testing.T) {
in := []float64{2.22, 1.11, 3.33}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, 1.11, out)
}
func TestMinDuration(t *testing.T) {
in := []time.Duration{
time.Hour * 2,
time.Hour * 3,
time.Hour * 1,
}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, time.Hour*1, out)
}
func TestMinTimes(t *testing.T) {
now := time.Now()
in := []time.Time{
now.Add(time.Hour * 1),
now,
now.Add(time.Minute * 2),
}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, now, out)
}
func TestMinInterface(t *testing.T) {
in := []interface{}{
2,
3,
2.22,
uint8(8),
int64(2),
2.0,
}
out, err := Min(in)
assert.NoError(t, err)
assert.Equal(t, 2, out)
}
func TestMinErrorEmpty(t *testing.T) {
in := []interface{}{}
out, err := Min(in)
assert.Equal(t, ErrEmptyInput, err)
assert.Nil(t, out)
}
func TestMinErrorComparison(t *testing.T) {
in := []interface{}{
2,
4,
time.Now(),
}
out, err := Min(in)
assert.IsType(t, &ErrInvalidComparison{}, err)
assert.Nil(t, out)
}
| TestMinFloats |
rtree.go | package rtree
import (
"github.com/liov/tiga/utils/structure/tree/rtree/base"
"math"
"sync"
)
type Iterator func(item Item) bool
type Item interface {
Rect(ctx interface{}) (min []float64, max []float64)
}
type RTree struct {
dims int
maxEntries int
ctx interface{}
trs []*base.RTree
used int
}
func New(ctx interface{}) *RTree {
tr := &RTree{
ctx: ctx,
dims: 20,
maxEntries: 13,
}
tr.trs = make([]*base.RTree, 20)
return tr
}
func (tr *RTree) Insert(item Item) {
if item == nil {
panic("nil item")
}
min, max := item.Rect(tr.ctx)
if len(min) != len(max) {
return // just return
panic("invalid item rectangle")
}
if len(min) < 1 || len(min) > len(tr.trs) {
return // just return
panic("invalid dimension")
}
btr := tr.trs[len(min)-1]
if btr == nil {
btr = base.New(len(min), tr.maxEntries)
tr.trs[len(min)-1] = btr
tr.used++
}
amin := make([]float64, len(min))
amax := make([]float64, len(max))
for i := 0; i < len(min); i++ {
amin[i], amax[i] = min[i], max[i]
}
btr.Insert(amin, amax, item)
}
func (tr *RTree) Remove(item Item) {
if item == nil {
panic("nil item")
}
min, max := item.Rect(tr.ctx)
if len(min) != len(max) {
return // just return
panic("invalid item rectangle")
}
if len(min) < 1 || len(min) > len(tr.trs) {
return // just return
panic("invalid dimension")
}
btr := tr.trs[len(min)-1]
if btr == nil {
return
}
amin := make([]float64, len(min))
amax := make([]float64, len(max))
for i := 0; i < len(min); i++ {
amin[i], amax[i] = min[i], max[i]
}
btr.Remove(amin, amax, item)
if btr.IsEmpty() {
tr.trs[len(min)-1] = nil
tr.used--
}
}
func (tr *RTree) Reset() {
for i := 0; i < len(tr.trs); i++ {
tr.trs[i] = nil
}
tr.used = 0
}
func (tr *RTree) Count() int {
var count int
for _, btr := range tr.trs {
if btr != nil {
count += btr.Count()
}
}
return count
}
func (tr *RTree) Search(bounds Item, iter Iterator) {
if bounds == nil {
panic("nil bounds being used for search")
}
min, max := bounds.Rect(tr.ctx)
if len(min) != len(max) {
return // just return
panic("invalid item rectangle")
}
if len(min) < 1 || len(min) > len(tr.trs) {
return // just return
panic("invalid dimension")
}
used := tr.used
for i, btr := range tr.trs {
if used == 0 {
break
}
if btr != nil {
if !search(btr, min, max, i+1, iter) {
return
}
used--
}
}
}
func search(btr *base.RTree, min, max []float64, dims int, iter Iterator) bool {
amin := make([]float64, dims)
amax := make([]float64, dims)
for i := 0; i < dims; i++ {
if i < len(min) | else {
amin[i] = math.Inf(-1)
amax[i] = math.Inf(+1)
}
}
var ended bool
btr.Search(amin, amax, func(item interface{}) bool {
if !iter(item.(Item)) {
ended = true
return false
}
return true
})
return !ended
}
func (tr *RTree) KNN(bounds Item, center bool, iter func(item Item, dist float64) bool) {
if bounds == nil {
panic("nil bounds being used for search")
}
min, max := bounds.Rect(tr.ctx)
if len(min) != len(max) {
return // just return
panic("invalid item rectangle")
}
if len(min) < 1 || len(min) > len(tr.trs) {
return // just return
panic("invalid dimension")
}
if tr.used == 0 {
return
}
if tr.used == 1 {
for i, btr := range tr.trs {
if btr != nil {
knn(btr, min, max, center, i+1, func(item interface{}, dist float64) bool {
return iter(item.(Item), dist)
})
break
}
}
return
}
type queueT struct {
done bool
step int
item Item
dist float64
}
var mu sync.Mutex
var ended bool
queues := make(map[int][]queueT)
cond := sync.NewCond(&mu)
for i, btr := range tr.trs {
if btr != nil {
dims := i + 1
mu.Lock()
queues[dims] = []queueT{}
cond.Signal()
mu.Unlock()
go func(dims int, btr *base.RTree) {
knn(btr, min, max, center, dims, func(item interface{}, dist float64) bool {
mu.Lock()
if ended {
mu.Unlock()
return false
}
queues[dims] = append(queues[dims], queueT{item: item.(Item), dist: dist})
cond.Signal()
mu.Unlock()
return true
})
mu.Lock()
queues[dims] = append(queues[dims], queueT{done: true})
cond.Signal()
mu.Unlock()
}(dims, btr)
}
}
mu.Lock()
for {
ready := true
for i := range queues {
if len(queues[i]) == 0 {
ready = false
break
}
if queues[i][0].done {
delete(queues, i)
}
}
if len(queues) == 0 {
break
}
if ready {
var j int
var minDist float64
var minItem Item
var minQueue int
for i := range queues {
if j == 0 || queues[i][0].dist < minDist {
minDist = queues[i][0].dist
minItem = queues[i][0].item
minQueue = i
}
}
queues[minQueue] = queues[minQueue][1:]
if !iter(minItem, minDist) {
ended = true
break
}
continue
}
cond.Wait()
}
mu.Unlock()
}
func knn(btr *base.RTree, min, max []float64, center bool, dims int, iter func(item interface{}, dist float64) bool) bool {
amin := make([]float64, dims)
amax := make([]float64, dims)
for i := 0; i < dims; i++ {
if i < len(min) {
amin[i] = min[i]
amax[i] = max[i]
} else {
amin[i] = math.Inf(-1)
amax[i] = math.Inf(+1)
}
}
var ended bool
btr.KNN(amin, amax, center, func(item interface{}, dist float64) bool {
if !iter(item.(Item), dist) {
ended = true
return false
}
return true
})
return !ended
}
| {
amin[i] = min[i]
amax[i] = max[i]
} |
i2c.rs | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! Inter-integrated circuit
//!
//! Used by: stm32g030, stm32g031, stm32g041, stm32g070, stm32g071, stm32g07x, stm32g081
#[cfg(not(feature = "nosync"))]
pub use crate::stm32g0::peripherals::i2c::Instance;
pub use crate::stm32g0::peripherals::i2c::{RegisterBlock, ResetValues};
pub use crate::stm32g0::peripherals::i2c::{
CR1, CR2, ICR, ISR, OAR1, OAR2, PECR, RXDR, TIMEOUTR, TIMINGR, TXDR,
};
/// Access functions for the I2C1 peripheral instance
pub mod I2C1 {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x40005400,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in I2C1
pub const reset: ResetValues = ResetValues {
CR1: 0x00000000,
CR2: 0x00000000,
OAR1: 0x00000000,
OAR2: 0x00000000,
TIMINGR: 0x00000000,
TIMEOUTR: 0x00000000,
ISR: 0x00000001,
ICR: 0x00000000,
PECR: 0x00000000,
RXDR: 0x00000000,
TXDR: 0x00000000,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut I2C1_TAKEN: bool = false;
/// Safe access to I2C1
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if I2C1_TAKEN {
None
} else {
I2C1_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to I2C1
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn release(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if I2C1_TAKEN && inst.addr == INSTANCE.addr {
I2C1_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal I2C1
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn | () -> Instance {
I2C1_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to I2C1
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const I2C1: *const RegisterBlock = 0x40005400 as *const _;
/// Access functions for the I2C2 peripheral instance
pub mod I2C2 {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x40005800,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in I2C2
pub const reset: ResetValues = ResetValues {
CR1: 0x00000000,
CR2: 0x00000000,
OAR1: 0x00000000,
OAR2: 0x00000000,
TIMINGR: 0x00000000,
TIMEOUTR: 0x00000000,
ISR: 0x00000001,
ICR: 0x00000000,
PECR: 0x00000000,
RXDR: 0x00000000,
TXDR: 0x00000000,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut I2C2_TAKEN: bool = false;
/// Safe access to I2C2
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if I2C2_TAKEN {
None
} else {
I2C2_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to I2C2
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn release(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if I2C2_TAKEN && inst.addr == INSTANCE.addr {
I2C2_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal I2C2
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {
I2C2_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to I2C2
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const I2C2: *const RegisterBlock = 0x40005800 as *const _;
| steal |
test_sum.py | import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import sum
def test_sum():
| assert sum.sum(3, 4) == 7 |
|
haproxy.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ravi Bhure <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: haproxy
short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
author:
- Ravi Bhure (@ravibhure)
description:
- Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
notes:
- Enable, disable and drain commands are restricted and can only be issued on
sockets configured for level 'admin'. For example, you can add the line
'stats socket /var/run/haproxy.sock level admin' to the general section of
haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
- Depends on netcat (nc) being available; you need to install the appropriate
package for your operating system before this module can be used.
options:
backend:
description:
- Name of the HAProxy backend pool.
- If this parameter is unset, it will be auto-detected.
type: str
drain:
description:
- Wait until the server has no active connections or until the timeout
determined by wait_interval and wait_retries is reached.
- Continue only after the status changes to 'MAINT'.
- This overrides the shutdown_sessions option.
type: bool
default: false
host:
description:
- Name of the backend host to change.
type: str
required: true
shutdown_sessions:
description:
- When disabling a server, immediately terminate all the sessions attached
to the specified server.
- This can be used to terminate long-running sessions after a server is put
into maintenance mode. Overridden by the drain option.
type: bool
default: no
socket:
description:
- Path to the HAProxy socket file.
type: path
default: /var/run/haproxy.sock
state:
description:
- Desired state of the provided backend host.
- Note that C(drain) state was added in version 2.4.
- It is supported only by HAProxy version 1.5 or later,
- When used on versions < 1.5, it will be ignored.
type: str
required: true
choices: [ disabled, drain, enabled ]
agent:
description:
- Disable/enable agent checks (depending on I(state) value).
type: bool
default: no
version_added: 1.0.0
health:
description:
- Disable/enable health checks (depending on I(state) value).
type: bool
default: no
version_added: "1.0.0"
fail_on_not_found:
description:
- Fail whenever trying to enable/disable a backend host that does not exist
type: bool
default: no
wait:
description:
- Wait until the server reports a status of 'UP' when C(state=enabled),
status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain)
type: bool
default: no
wait_interval:
description:
- Number of seconds to wait between retries.
type: int
default: 5
wait_retries:
description:
- Number of times to check for status after changing the state.
type: int
default: 25
weight:
description:
- The value passed in argument.
- If the value ends with the `%` sign, then the new weight will be
relative to the initially configured weight.
- Relative weights are only permitted between 0 and 100% and absolute
weights are permitted between 0 and 256.
type: str
'''
EXAMPLES = r'''
- name: Disable server in 'www' backend pool
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
- name: Disable server in 'www' backend pool, also stop health/agent checks
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
health: yes
agent: yes
- name: Disable server without backend pool name (apply to all available backend pool)
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
- name: Disable server, provide socket file
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
- name: Disable server, provide socket file, wait until status reports in maintenance
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
# Place server in drain mode, providing a socket file. Then check the server's
# status every minute to see if it changes to maintenance mode, continuing if it
# does in an hour and failing otherwise.
- community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
drain: yes
wait_interval: 60
wait_retries: 60
- name: Disable backend server in 'www' backend pool and drop open sessions to it
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
socket: /var/run/haproxy.sock
shutdown_sessions: yes
- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
community.general.haproxy:
state: disabled
host: '{{ inventory_hostname }}'
fail_on_not_found: yes
- name: Enable server in 'www' backend pool
community.general.haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
- name: Enable server in 'www' backend pool wait until healthy
community.general.haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
community.general.haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
wait_retries: 10
wait_interval: 5
- name: Enable server in 'www' backend pool with change server(s) weight
community.general.haproxy:
state: enabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
weight: 10
backend: www
- name: Set the server in 'www' backend pool to drain mode
community.general.haproxy:
state: drain
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
'''
import csv
import socket
import time
from string import Template
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_text
DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
RECV_SIZE = 1024
ACTION_CHOICES = ['enabled', 'disabled', 'drain']
WAIT_RETRIES = 25
WAIT_INTERVAL = 5
######################################################################
class TimeoutException(Exception):
pass
class HAProxy(object):
"""
Used for communicating with HAProxy through its local UNIX socket interface.
Perform common tasks in Haproxy related to enable server and
disable server.
The complete set of external commands Haproxy handles is documented
on their website:
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
"""
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.host = self.module.params['host']
self.backend = self.module.params['backend']
self.weight = self.module.params['weight']
self.socket = self.module.params['socket']
self.shutdown_sessions = self.module.params['shutdown_sessions']
self.fail_on_not_found = self.module.params['fail_on_not_found']
self.agent = self.module.params['agent']
self.health = self.module.params['health']
self.wait = self.module.params['wait']
self.wait_retries = self.module.params['wait_retries']
self.wait_interval = self.module.params['wait_interval']
self._drain = self.module.params['drain']
self.command_results = {}
def execute(self, cmd, timeout=200, capture_output=True):
"""
Executes a HAProxy command by sending a message to a HAProxy's local
UNIX socket and waiting up to 'timeout' milliseconds for the response.
"""
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.socket)
self.client.sendall(to_bytes('%s\n' % cmd))
result = b''
buf = b''
buf = self.client.recv(RECV_SIZE)
while buf:
result += buf
buf = self.client.recv(RECV_SIZE)
result = to_text(result, errors='surrogate_or_strict')
if capture_output:
self.capture_command_output(cmd, result.strip())
self.client.close()
return result
def capture_command_output(self, cmd, output):
"""
Capture the output for a command
"""
if 'command' not in self.command_results:
self.command_results['command'] = []
self.command_results['command'].append(cmd)
if 'output' not in self.command_results:
self.command_results['output'] = []
self.command_results['output'].append(output)
def | (self):
"""
Discover all entries with svname = 'BACKEND' and return a list of their corresponding
pxnames
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
def discover_version(self):
"""
Attempt to extract the haproxy version.
Return a tuple containing major and minor version.
"""
data = self.execute('show info', 200, False)
lines = data.splitlines()
line = [x for x in lines if 'Version:' in x]
try:
version_values = line[0].partition(':')[2].strip().split('.', 3)
version = (int(version_values[0]), int(version_values[1]))
except (ValueError, TypeError, IndexError):
version = None
return version
def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
"""
Run some command on the specified backends. If no backends are provided they will
be discovered automatically (all backends)
"""
# Discover backends if none are given
if pxname is None:
backends = self.discover_all_backends()
else:
backends = [pxname]
# Run the command for each requested backend
for backend in backends:
# Fail when backends were not found
state = self.get_state_for(backend, svname)
if (self.fail_on_not_found) and state is None:
self.module.fail_json(
msg="The specified backend '%s/%s' was not found!" % (backend, svname))
if state is not None:
self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
if self.wait:
self.wait_until_status(backend, svname, wait_for_status)
def get_state_for(self, pxname, svname):
"""
Find the state of specific services. When pxname is not set, get all backends for a specific host.
Returns a list of dictionaries containing the status and weight for those services.
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
state = tuple(
map(
lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
filter(lambda d: (pxname is None or d['pxname']
== pxname) and d['svname'] == svname, r)
)
)
return state or None
def wait_until_status(self, pxname, svname, status):
"""
Wait for a service to reach the specified status. Try RETRIES times
with INTERVAL seconds of sleep in between. If the service has not reached
the expected status in that time, the module will fail. If the service was
not found, the module will fail.
"""
for i in range(1, self.wait_retries):
state = self.get_state_for(pxname, svname)
# We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
# When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
if status in state[0]['status']:
if not self._drain or state[0]['scur'] == '0':
return True
time.sleep(self.wait_interval)
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
(pxname, svname, status, self.wait_retries))
def enabled(self, host, backend, weight):
"""
Enabled action, marks server to UP and checks are re-enabled,
also supports to get current weight for server (default) and
set the weight for haproxy backend server when provides.
"""
cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
if self.agent:
cmd += "; enable agent $pxname/$svname"
if self.health:
cmd += "; enable health $pxname/$svname"
if weight:
cmd += "; set weight $pxname/$svname %s" % weight
self.execute_for_backends(cmd, backend, host, 'UP')
def disabled(self, host, backend, shutdown_sessions):
"""
Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
performed on the server until it leaves maintenance,
also it shutdown sessions while disabling backend host server.
"""
cmd = "get weight $pxname/$svname"
if self.agent:
cmd += "; disable agent $pxname/$svname"
if self.health:
cmd += "; disable health $pxname/$svname"
cmd += "; disable server $pxname/$svname"
if shutdown_sessions:
cmd += "; shutdown sessions server $pxname/$svname"
self.execute_for_backends(cmd, backend, host, 'MAINT')
def drain(self, host, backend, status='DRAIN'):
"""
Drain action, sets the server to DRAIN mode.
In this mode, the server will not accept any new connections
other than those that are accepted via persistence.
"""
haproxy_version = self.discover_version()
# check if haproxy version supports DRAIN state (starting with 1.5)
if haproxy_version and (1, 5) <= haproxy_version:
cmd = "set server $pxname/$svname state drain"
self.execute_for_backends(cmd, backend, host, "DRAIN")
if status == "MAINT":
self.disabled(host, backend, self.shutdown_sessions)
def act(self):
"""
Figure out what you want to do from ansible, and then do it.
"""
# Get the state before the run
self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
# toggle enable/disable server
if self.state == 'enabled':
self.enabled(self.host, self.backend, self.weight)
elif self.state == 'disabled' and self._drain:
self.drain(self.host, self.backend, status='MAINT')
elif self.state == 'disabled':
self.disabled(self.host, self.backend, self.shutdown_sessions)
elif self.state == 'drain':
self.drain(self.host, self.backend)
else:
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
# Get the state after the run
self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
# Report change status
self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
self.module.exit_json(**self.command_results)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', required=True, choices=ACTION_CHOICES),
host=dict(type='str', required=True),
backend=dict(type='str'),
weight=dict(type='str'),
socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
shutdown_sessions=dict(type='bool', default=False),
fail_on_not_found=dict(type='bool', default=False),
health=dict(type='bool', default=False),
agent=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_retries=dict(type='int', default=WAIT_RETRIES),
wait_interval=dict(type='int', default=WAIT_INTERVAL),
drain=dict(type='bool', default=False),
),
)
if not socket:
module.fail_json(msg="unable to locate haproxy socket")
ansible_haproxy = HAProxy(module)
ansible_haproxy.act()
if __name__ == '__main__':
main()
| discover_all_backends |
0005_contact_company.py | # Generated by Django 2.2.10 on 2020-04-23 10:29
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
dependencies = [
("common", "0020_auto_20200409_1653"),
("contacts", "0004_contact_teams"),
]
operations = [
migrations.AddField(
model_name="contact",
name="company",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="common.Company",
),
),
]
| Migration |
Account_Guarantor.rs | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r4::Element::Element;
use crate::models::r4::Extension::Extension;
use crate::models::r4::Period::Period;
use crate::models::r4::Reference::Reference;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// A financial tool for tracking value accrued for a particular purpose. In the
/// healthcare field, used to track charges for a patient, cost centers, etc.
#[derive(Debug)]
pub struct Account_Guarantor<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl Account_Guarantor<'_> {
pub fn new(value: &Value) -> Account_Guarantor {
Account_Guarantor {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for onHold
pub fn _on_hold(&self) -> Option<Element> {
if let Some(val) = self.value.get("_onHold") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may be
/// any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element
/// in which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To make
/// the use of extensions safe and manageable, there is a strict set of governance
/// applied to the definition and use of extensions. Though any implementer can define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A guarantor may be placed on credit hold or otherwise have their role temporarily
/// suspended.
pub fn on_hold(&self) -> Option<bool> {
if let Some(val) = self.value.get("onHold") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// The entity who is responsible.
pub fn party(&self) -> Reference {
Reference {
value: Cow::Borrowed(&self.value["party"]),
}
}
/// The timeframe during which the guarantor accepts responsibility for the account.
pub fn period(&self) -> Option<Period> {
if let Some(val) = self.value.get("period") {
return Some(Period {
value: Cow::Borrowed(val),
});
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._on_hold() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.on_hold() |
if !self.party().validate() {
return false;
}
if let Some(_val) = self.period() {
if !_val.validate() {
return false;
}
}
return true;
}
}
#[derive(Debug)]
pub struct Account_GuarantorBuilder {
pub(crate) value: Value,
}
impl Account_GuarantorBuilder {
pub fn build(&self) -> Account_Guarantor {
Account_Guarantor {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: Account_Guarantor) -> Account_GuarantorBuilder {
Account_GuarantorBuilder {
value: (*existing.value).clone(),
}
}
pub fn new(party: Reference) -> Account_GuarantorBuilder {
let mut __value: Value = json!({});
__value["party"] = json!(party.value);
return Account_GuarantorBuilder { value: __value };
}
pub fn _on_hold<'a>(&'a mut self, val: Element) -> &'a mut Account_GuarantorBuilder {
self.value["_onHold"] = json!(val.value);
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut Account_GuarantorBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut Account_GuarantorBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut Account_GuarantorBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn on_hold<'a>(&'a mut self, val: bool) -> &'a mut Account_GuarantorBuilder {
self.value["onHold"] = json!(val);
return self;
}
pub fn period<'a>(&'a mut self, val: Period) -> &'a mut Account_GuarantorBuilder {
self.value["period"] = json!(val.value);
return self;
}
}
| {} |
Agent.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@FileName : Agent.py
@Author : citang
@Date : 2021/7/27 5:46 下午
@Description : description the function of the file
"""
import sys
from framework import Model, Db, Log, Config, Common
class __Agent__:
"""模块功能"""
def __init__(self, resultype, mod, handler, ip):
self.__DATA = None
self.__RESULTYPE = resultype
self.__APICODE = 200
self.__MODULENAME = mod
self.__HANDLERNAME = handler
self.__REMOTE_IP = ip
def Data(self, name):
"""创建数据模型对象"""
return Model.__ModelData__(name)
def Db(self):
"""创建数据库对象"""
return Db.__MysqlDb__()
def Log(self):
"""创建日志对象"""
return Log.__Module__(self.__MODULENAME, self.__HANDLERNAME)
def __Cache(self):
"""创建缓存对象"""
pass
def GetAppConfig(self, group, name):
"""获取应用程序配置"""
return Config.GetAppConfig(group, name)
| fig.GetSysConfig(group, name)
def SetApiCode(self, code):
"""设置API错误代码"""
self.__APICODE = str(code)
def GetApiCode(self):
"""获取API错误代码"""
return self.__APICODE
def GetRemoteIp(self):
"""获取请求IP"""
return self.__REMOTE_IP
def SetResult(self, data):
"""设置返回内容"""
# 若没有设置RESULTYPE则不允许设置返回值
if self.__RESULTYPE == '':
raise Exception('resultype is empty, cant set result')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
if data.GetName() != self.__RESULTYPE:
raise Exception('router resultype different!')
self.__DATA = data.DumpDict()
def SetDictData(self, data):
"""设置返回内容"""
if not isinstance(data, dict):
raise Exception('data type must be dict')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
self.__DATA = data
def GetResult(self):
return self.__DATA
def ImportMod(self, mod):
path = Common.ExtendPath(Config.GetSysConfig('AppSettings', 'module_path'))
if '/' in mod:
modPath = mod[0:mod.rfind('/')]
sys.path.append(path + '/' + modPath)
mod = mod[mod.rfind('/') + 1:]
else:
sys.path.append(path)
impmod = __import__(mod)
sys.path.pop()
return impmod
| def GetSysConfig(self, group, name):
"""获取系统配置"""
return Con |
test_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
import math
import unittest
import random
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
''' not implemented yet
a = t / s
self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)
'''
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t == 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.eq(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def matmul_high_dim_helper(self, dev):
configs = [
[(1, 12, 7, 64), (1, 12, 64, 7)],
[(1, 7, 768), (768, 768)],
]
print()
for config in configs:
X = np.random.random(config[0]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random(config[1]).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
y_t = np.matmul(X, W)
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
def test_matmul_high_dim_cpu(self):
self.matmul_high_dim_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_high_dim_gpu(self):
self.matmul_high_dim_helper(gpu_dev)
def test_tensor_inplace_api(self):
""" tensor inplace methods alter internal state and also return self
"""
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gpu_6d_transpose(self,dev=gpu_dev):
s0 = (2,3,4,5,6,7)
axes1=[5,4,3,2,1,0]
s1 = (2,7,6,5,4,3)
s2 = (2,4,3,5,7,6)
a = np.random.random(s1)
ta = tensor.from_numpy(a)
ta.to_device(dev)
ta = tensor.reshape(ta,s1)
ta = tensor.transpose(ta,axes1)
ta = tensor.reshape(ta,s2)
a = np.reshape(a,s1)
a = np.transpose(a,axes1)
a = np.reshape(a,s2)
np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gaussian_gpu(self, dev=gpu_dev):
x = tensor.Tensor((3, 5, 3, 5), device=dev)
x.gaussian(0, 1)
x = tensor.Tensor((4, 5, 3, 2), device=dev)
x.gaussian(0, 1)
def _kfloat32_int(self, dev=gpu_dev):
|
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kfloat32_int_gpu(self):
self._kfloat32_int(gpu_dev)
def test_kfloat32_int_cpu(self):
self._kfloat32_int(cpu_dev)
def _kint_float(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.randint(0, 10, (2, 3))
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = random.random() * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_float_gpu(self):
self._kint_float(gpu_dev)
def test_kint_float_cpu(self):
self._kint_float(cpu_dev)
def _kint_kint(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
[[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],
[-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],
[[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],
[18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_cpu(self, dev=cpu_dev):
self._kint_kint(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_gpu(self, dev=gpu_dev):
self._kint_kint(gpu_dev)
def _kint_kint_bc(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_bc_cpu(self, dev=cpu_dev):
self._kint_kint_bc(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_bc_gpu(self, dev=gpu_dev):
self._kint_kint_bc(gpu_dev)
if __name__ == '__main__':
unittest.main()
| np.random.seed(0)
x_val = np.random.random((2, 3)).astype(np.float32) * 10
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = np.random.random((1,))[0] * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar) |
seajs-preload.js | !function(){var a=seajs.data,b=document;seajs.Module.preload=function(b){var c=a.preload,d=c.length;d?seajs.Module.use(c,function(){c.splice(0,d),seajs.Module.preload(b)},a.cwd+"_preload_"+a.cid()):b()},seajs.use=function(b,c){return seajs.Module.preload(function(){seajs.Module.use(b,c,a.cwd+"_use_"+a.cid())}),seajs},a.preload=function(){var a=[],c=location.search.replace(/(seajs-\w+)(&|$)/g,"$1=1$2");return c+=" "+b.cookie,c.replace(/(seajs-\w+)=1/g,function(b,c){a.push(c)}),a}(),define("seajs/seajs-preload/1.0.0/seajs-preload",[],{})}(); |
||
restraints.py | """
Validation of models of any type against basic covalent geometry restraints.
By default this will flag all restrained atoms deviating by more than 4 sigma
from the target value.
"""
from __future__ import absolute_import, division, print_function
from mmtbx.validation import atoms, validation, get_atoms_info
from libtbx.str_utils import make_sub_header
from libtbx import slots_getstate_setstate
from math import sqrt
import sys
__restraint_attr__ = [
"sigma",
"target",
"model",
"delta",
"residual",
] # XXX others?
class restraint(atoms):
n_atoms = None
"""
Base class for covalent sterochemistry restraint outliers (except for
planarity, which is weird and different). Unlike most of the other
outlier implementations elsewhere in the validation module, the restraint
outliers are printed on multiple lines to facilitate display of the atoms
involved.
"""
__slots__ = atoms.__slots__ + __restraint_attr__
def __init__(self, **kwds):
atoms.__init__(self, **kwds)
if (self.n_atoms is not None):
assert (len(self.atoms_info) == self.n_atoms)
if (self.score is None):
self.score = abs(self.delta / self.sigma)
@staticmethod
def header():
return "%-20s %7s %7s %7s %6s %6s %10s" % ("atoms", "ideal", "model",
"delta", "sigma", "residual", "deviation")
def as_table_row_phenix(self):
"""
Values for populating ListCtrl in Phenix GUI.
"""
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score ]
def id_str(self, ignore_altloc=None):
return ",".join([ a.id_str() for a in self.atoms_info ])
def as_string(self, prefix=""):
id_strs = [ a.id_str() for a in self.atoms_info ]
id_len = max([ len(s) for s in id_strs ])
lines = []
for atom_str in id_strs :
lines.append("%s%-20s" % (prefix, atom_str))
lines[-1] += " " + self.format_values()
return "\n".join(lines)
def format_values(self):
return "%7.2f %7.2f %7.2f %6.2e %6.2e %4.1f*sigma" % (self.target,
self.model, self.delta, self.sigma, self.residual, self.score)
def __cmp__(self, other):
return cmp(other.score, self.score)
def __eq__(self, other):
return self.score == other.score
def __ne__(self, other):
return self.score != other.score
def __lt__(self, other):
return self.score < other.score
def __le__(self, other):
return self.score <= other.score
def __gt__ (self, other):
return self.score > other.score
def __ge__(self, other):
return self.score >= other.score
def kinemage_key(self):
atom0 = self.atoms_info[0]
# bonds are assigned to the following residue
if len(self.atoms_info)==2:
atom0 = self.atoms_info[1]
# angles are assigned to the central atom's residue
elif len(self.atoms_info)==3:
atom0 = self.atoms_info[1]
# dihedrals are assigned to the following residue - this applies to
# omega dihedral but planes are not a problem
elif len(self.atoms_info)==4:
atom0 = self.atoms_info[2]
atom_names = [ a.name.strip().lower() for a in self.atoms_info ]
kin_key = "%1s%3s%2s%4s%1s %s" % (self.get_altloc(),
atom0.resname.lower(), atom0.chain_id, atom0.resseq, atom0.icode,
"-".join(atom_names))
return kin_key
class bond(restraint):
n_atoms = 2
__bond_attr__ = [
"slack",
"symop",
]
__slots__ = restraint.__slots__ + __bond_attr__
def as_table_row_phenix(self):
return [ self.atoms_info[0].id_str(), self.atoms_info[1].id_str(),
self.target, self.model, self.score ]
@staticmethod
def header():
return "%-20s %5s %6s %6s %6s %6s %8s %10s" % ("atoms", "ideal",
"model", "delta", "sigma", "slack", "residual", "deviation")
def formate_values(self):
return "%5.3f %6.2f %6.3f %6.3f %6.2e %8.2e %4.1f*sigma" % \
(self.target, self.model, self.delta, self.sigma, self.slack,
self.residual, abs(self.score))
def as_kinemage(self):
from mmtbx.kinemage.validation import bond_outlier_as_kinemage
return bond_outlier_as_kinemage(self)
class angle(restraint):
n_atoms = 3
def as_kinemage(self):
from mmtbx.kinemage.validation import angle_outlier_as_kinemage
return angle_outlier_as_kinemage(self)
class dihedral(restraint):
n_atoms = 4
def as_kinemage(self):
return None
| return chiral_outlier_as_kinemage(self)
def as_table_row_phenix(self):
"""
Values for populating ListCtrl in Phenix GUI.
"""
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score, self.outlier_type() ]
def is_pseudochiral(self):
#Certain atoms are treated like chiral centers because they bond to atoms that have different names without chemical difference.
#VAL CB bonds to CG1 and CG2, for example.
#A large chiral volume outlier relfects a failure to follow chemical naming conventions, not necessarily a major geometry error
#So these pseudochiral centers should be treated differently.
#
#backbone phosphate in nucleic acids
#OP1 and OP2 atoms are chemically identical
resname = self.atoms_info[0].resname
atomname = self.atoms_info[0].name.strip()
if atomname == 'P': return True
#SF4 and F3S are iron-sulfur clusters with frequent naming problems
if resname in ['SF4','F3S']: return True
#Val CG1 and CG2 are chemically identical
if resname == 'VAL' and atomname == 'CB': return True
#LEU CD1 and CD2 are chemically identical
if resname == 'LEU' and atomname == 'CG': return True
#Otherwise
return False
def is_handedness_swap(self):
resname = self.atoms_info[0].resname
if resname in ['PRO','DPR']: #proline has slightly different geometry
if self.score > 22:
return True
elif self.score > 20:
return True
else:
return False
def outlier_type(self):
if self.score <= 4: return None
if not self.is_handedness_swap():
return "Tetrahedral geometry outlier"
else:
if self.is_pseudochiral():
return "Pseudochiral naming error"
else:
return "Chiral handedness swap"
class planarity(restraint):
__slots__ = atoms.__slots__ + [
"rms_deltas",
"delta_max",
"residual",
]
def as_table_row_phenix(self):
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.delta_max, self.rms_deltas, self.score ]
@staticmethod
def header():
return "%-20s %10s %10s %10s %10s" % ("atoms", "rms_deltas",
"delta_max", "residual", "deviation")
def format_values(self):
return "%10.3f %10.3f %10.2f %4.1f*sigma" % (self.rms_deltas,
self.delta_max, self.residual, self.score)
def as_kinemage(self):
return None
class restraint_validation(validation):
"""
Base class for collecting information about all restraints of a certain
type, including overall statistics and individual outliers.
"""
restraint_type = None
kinemage_header = None
gui_list_headers = ["Atoms","Ideal value","Model value","Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [500, 100, 100, 180]
__restraints_attr__ = [
"min",
"max",
"mean",
"z_min",
"z_max",
"z_mean",
"target",
]
__slots__ = validation.__slots__ + __restraints_attr__
def __init__(self,
pdb_atoms,
sites_cart,
energies_sites,
restraint_proxies,
unit_cell,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False):
validation.__init__(self)
self.z_min = self.z_max = self.z_mean = None
deviations_method = getattr(energies_sites, "%s_deviations" %
self.restraint_type)
self.min, self.max, self.mean = deviations_method()
target = getattr(energies_sites, "%s_residual_sum" %
self.restraint_type)
self.n_total = getattr(energies_sites, "n_%s_proxies" %
self.restraint_type)
if (self.n_total > 0):
self.target = target / self.n_total
else :
self.target = 0
deviations_z_method = getattr(energies_sites, "%s_deviations_z" %
self.restraint_type, None)
if (deviations_z_method is not None):
deviations_z = deviations_z_method()
self.z_min, self.z_max, self.z_mean = deviations_z_method()
self.results = sorted(self.get_outliers(
proxies=restraint_proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids))
self.n_outliers = len(self.results)
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff):
raise NotImplementedError()
def show_old_output(self, *args, **kwds):
raise NotImplementedError()
def show(self, out=sys.stdout, prefix=" ", verbose=True):
if (len(self.results) > 0):
print(prefix + self.get_result_class().header(), file=out)
for result in self.results :
print(result.as_string(prefix=prefix), file=out)
self.show_summary(out=out, prefix=prefix)
def show_summary(self, out=sys.stdout, prefix=""):
if (self.n_total == 0):
print(prefix + "No restraints of this type.", file=out)
return
elif (self.n_outliers == 0):
print(prefix + \
"All restrained atoms within 4.0 sigma of ideal values.", file=out)
print("", file=out)
if (self.z_mean is not None):
print(prefix + "Min. delta: %7.3f (Z=%7.3f)" % (self.min,
self.z_min), file=out)
print(prefix + "Max. delta: %7.3f (Z=%7.3f)" % (self.max,
self.z_max), file=out)
print(prefix + "Mean delta: %7.3f (Z=%7.3f)" % (self.mean,
self.z_mean), file=out)
else :
print(prefix + "Min. delta: %7.3f" % self.min, file=out)
print(prefix + "Max. delta: %7.3f" % self.max, file=out)
print(prefix + "Mean delta: %7.3f" % self.mean, file=out)
def as_kinemage(self, chain_id=None):
header = self.kinemage_header
if (header is not None):
kin_blocks = []
for result in self.results :
if (result.is_outlier()) and (result.is_in_chain(chain_id)):
outlier_kin_txt = result.as_kinemage()
if (outlier_kin_txt is not None):
kin_blocks.append(outlier_kin_txt)
return header + "\n".join(kin_blocks)
return None
class bonds(restraint_validation):
restraint_type = "bond"
restraint_label = "Bond length"
kinemage_header = "@subgroup {length devs} dominant\n"
gui_list_headers = ["Atom 1","Atom 2","Ideal value","Model value",
"Deviation (sigmas)"]
gui_formats = ["%s", "%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [150, 150, 100, 100, 180]
def get_result_class(self) : return bond
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
origin_id=origin_ids.get_origin_id('covalent geometry'))
# this can happen for C-alpha-only models, etc.
if (sorted_table is None):
return []
outliers = []
for restraint_info in sorted_table :
(i_seq, j_seq, i_seqs, ideal, model, slack, delta, sigma, weight, residual, sym_op_j,
rt_mx) = restraint_info
bond_atoms = get_atoms_info(pdb_atoms, iselection=i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
if sym_op_j:
import scitbx
m3 = rt_mx.r().as_double()
m3 = scitbx.matrix.sqr(m3)
t = rt_mx.t().as_double()
t = scitbx.matrix.col((t[0],t[1],t[2]))
xyz = unit_cell.fractionalize(flex.vec3_double([bond_atoms[1].xyz]))
new_xyz = unit_cell.orthogonalize(m3.elems*xyz+t)
bond_atoms[1].xyz = new_xyz[0]
outlier = bond(
atoms_info=bond_atoms,
target=ideal,
model=model,
sigma=sigma,
slack=slack,
delta=delta,
residual=residual,
symop=sym_op_j,
outlier=True,
xyz=get_mean_xyz(bond_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class angles(restraint_validation):
restraint_type = "angle"
restraint_label = "Bond angle"
kinemage_header = "@subgroup {geom devs} dominant\n"
def get_result_class(self) : return angle
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.angle(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = angle(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=proxy_atoms[1].xyz)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class dihedrals(restraint_validation):
restraint_type = "dihedral"
restraint_label = "Dihedral angle"
def get_result_class(self) : return dihedral
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.dihedral(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = dihedral(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
xyz=get_mean_xyz([proxy_atoms[1], proxy_atoms[2]]),
outlier=True)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class chiralities(restraint_validation):
restraint_type = "chirality"
restraint_label = "Chiral volume"
kinemage_header = "@subgroup {chiral devs} dominant\n"
gui_list_headers = ["Atoms","Ideal value","Model value",
"Deviation (sigmas)","Probable cause"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f", "%s"]
wx_column_widths = [250, 100, 100, 180, 250]
def get_result_class(self) : return chirality
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=None,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.chirality(
proxy=proxy,
sites_cart=sites_cart)
outlier = chirality(
atoms_info=proxy_atoms,
target=restraint.volume_ideal,
delta=restraint.delta,
model=restraint.volume_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=get_mean_xyz(proxy_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class planarities(restraint_validation):
restraint_type = "planarity"
restraint_label = "Planar group"
gui_list_headers = ["Atoms", "Max. delta", "RMS(delta)", "Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [250, 100, 100, 130]
def get_result_class(self) : return planarity
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
from scitbx.array_family import flex
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, n_not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
unit_cell=unit_cell)
if (sorted_table is None) : return []
outliers = []
for restraint_info in sorted_table :
(plane_atoms, rms_delta, residual) = restraint_info
i_seqs = [ a[0] for a in plane_atoms ]
deviation = max([ a[1] / a[2] for a in plane_atoms ])
plane_atoms_ = get_atoms_info(pdb_atoms, iselection=i_seqs)
outlier = planarity(
atoms_info=plane_atoms_,
rms_deltas=rms_delta,
residual=residual,
delta_max=max([ a[1] for a in plane_atoms ]),
score=deviation,
outlier=True,
xyz=get_mean_xyz(plane_atoms_))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
def get_mean_xyz(atoms):
from scitbx.matrix import col
sum = col(atoms[0].xyz)
for atom in atoms[1:] :
sum += col(atom.xyz)
return sum / len(atoms)
def _get_sorted(O,
unit_cell,
sites_cart,
pdb_atoms,
by_value="residual",
use_segids_in_place_of_chainids=False):
assert by_value in ["residual", "delta"]
if (O.size() == 0): return []
import cctbx.geometry_restraints
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
deltas = flex.abs(O.deltas(sites_cart=sites_cart))
residuals = O.residuals(sites_cart=sites_cart)
if (by_value == "residual"):
data_to_sort = residuals
elif (by_value == "delta"):
data_to_sort = deltas
i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True)
sorted_table = []
for i_proxy in i_proxies_sorted:
proxy = O[i_proxy]
if proxy.origin_id != origin_ids.get_origin_id('covalent geometry'):
continue
sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight)
score = sqrt(residuals[i_proxy]) / sigma
proxy_atoms = get_atoms_info(pdb_atoms, iselection=proxy.i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
sorted_table.append((proxy, proxy_atoms))
return sorted_table
class combined(slots_getstate_setstate):
"""
Container for individual validations of each of the five covalent restraint
classes.
"""
__geo_types__ = ["bonds", "angles", "dihedrals", "chiralities", "planarities"]
__slots__ = __geo_types__ + ["_use_cdl"]
def __init__(self,
pdb_hierarchy,
xray_structure,
geometry_restraints_manager,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False,
cdl=None):
self._use_cdl = cdl
from mmtbx import restraints
restraints_manager = restraints.manager(
geometry=geometry_restraints_manager)
sites_cart = xray_structure.sites_cart()
hd_selection = xray_structure.hd_selection()
pdb_atoms = pdb_hierarchy.atoms()
if (ignore_hd and hd_selection.count(True) > 0):
restraints_manager = restraints_manager.select(selection = ~hd_selection)
sites_cart = sites_cart.select(~hd_selection)
pdb_atoms = pdb_atoms.select(~hd_selection)
energies_sites = restraints_manager.energies_sites(
sites_cart=sites_cart,
compute_gradients=False).geometry
for geo_type in self.__geo_types__ :
restraint_validation_class = globals()[geo_type]
if (geo_type == "bonds" ):
restraint_proxies = restraints_manager.geometry.pair_proxies(
sites_cart=sites_cart).bond_proxies
else :
restraint_proxies = getattr(restraints_manager.geometry,
"%s_proxies" % restraint_validation_class.restraint_type)
rv = restraint_validation_class(
pdb_atoms=pdb_atoms,
sites_cart=sites_cart,
energies_sites=energies_sites,
restraint_proxies=restraint_proxies,
unit_cell=xray_structure.unit_cell(),
ignore_hd=ignore_hd,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
setattr(self, geo_type, rv)
def show(self, out=sys.stdout, prefix="", verbose=True):
for geo_type in self.__geo_types__ :
rv = getattr(self, geo_type)
make_sub_header(rv.restraint_label + "s", out=out)
if (geo_type == "angles") and getattr(self, "_use_cdl", False):
print(" Using conformation-dependent library for mainchain "+\
"bond angle targets", file=out)
print("", file=out)
rv.show(out=out, prefix=prefix)
def get_bonds_angles_rmsds(self):
return (self.bonds.mean, self.angles.mean)
def as_kinemage(self, chain_id=None):
kin_txt = self.angles.as_kinemage(chain_id=chain_id)
kin_txt += "\n"
kin_txt += self.bonds.as_kinemage(chain_id=chain_id)
return kin_txt | class chirality(restraint):
def as_kinemage(self):
from mmtbx.kinemage.validation import chiral_outlier_as_kinemage |
lib.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Result,
errors::{ffx_bail, ffx_error},
ffx_component::connect_to_lifecycle_controller,
ffx_component_start_args::ComponentStartCommand,
ffx_core::ffx_plugin,
fidl_fuchsia_developer_remotecontrol as rc, fidl_fuchsia_sys2 as fsys,
moniker::{AbsoluteMoniker, AbsoluteMonikerBase},
};
#[ffx_plugin()]
pub async fn start(rcs_proxy: rc::RemoteControlProxy, cmd: ComponentStartCommand) -> Result<()> {
let lifecycle_controller = connect_to_lifecycle_controller(&rcs_proxy).await?;
start_impl(lifecycle_controller, cmd.moniker, &mut std::io::stdout()).await?;
Ok(())
}
async fn start_impl<W: std::io::Write>(
lifecycle_controller: fsys::LifecycleControllerProxy,
moniker: String,
writer: &mut W,
) -> Result<fsys::StartResult> {
let moniker = AbsoluteMoniker::parse_str(&moniker)
.map_err(|e| ffx_error!("Moniker could not be parsed: {}", e))?;
writeln!(writer, "Moniker: {}", moniker)?;
// LifecycleController accepts RelativeMonikers only
let moniker = format!(".{}", moniker.to_string());
let res = lifecycle_controller.start(&moniker).await;
match res {
Ok(sr) => match sr {
Ok(fsys::StartResult::Started) => {
writeln!(writer, "Component started.")?;
Ok(fsys::StartResult::Started)
}
Ok(fsys::StartResult::AlreadyStarted) => {
writeln!(writer, "Component is already running.")?;
Ok(fsys::StartResult::AlreadyStarted)
}
Err(e) => {
ffx_bail!("Lifecycle protocol could not start the component instance: {:?}", e)
}
},
Err(e) => {
ffx_bail!("FIDL error: {:?}", e)
}
}
}
////////////////////////////////////////////////////////////////////////////////
// tests
#[cfg(test)]
mod test {
use {
super::*, fidl::endpoints::create_proxy_and_stream, futures::TryStreamExt,
std::io::BufWriter,
};
fn setup_fake_lifecycle_controller(
expected_moniker: &'static str,
is_running: bool,
) -> fsys::LifecycleControllerProxy |
#[fuchsia_async::run_singlethreaded(test)]
async fn test_success() -> Result<()> {
let mut output = String::new();
let mut writer = unsafe { BufWriter::new(output.as_mut_vec()) };
let lifecycle_controller =
setup_fake_lifecycle_controller("./core/ffx-laboratory:test", false);
let response =
start_impl(lifecycle_controller, "/core/ffx-laboratory:test".to_string(), &mut writer)
.await;
assert_eq!(response.unwrap(), fsys::StartResult::Started);
Ok(())
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_already_started() -> Result<()> {
let mut output = String::new();
let mut writer = unsafe { BufWriter::new(output.as_mut_vec()) };
let lifecycle_controller =
setup_fake_lifecycle_controller("./core/ffx-laboratory:test", true);
let response =
start_impl(lifecycle_controller, "/core/ffx-laboratory:test".to_string(), &mut writer)
.await;
assert_eq!(response.unwrap(), fsys::StartResult::AlreadyStarted);
Ok(())
}
}
| {
let (lifecycle_controller, mut stream) =
create_proxy_and_stream::<fsys::LifecycleControllerMarker>().unwrap();
fuchsia_async::Task::local(async move {
let req = stream.try_next().await.unwrap().unwrap();
match req {
fsys::LifecycleControllerRequest::Start { moniker, responder, .. } => {
assert_eq!(expected_moniker, moniker);
let sr = if is_running {
fsys::StartResult::AlreadyStarted
} else {
fsys::StartResult::Started
};
responder.send(&mut Ok(sr)).unwrap();
}
_ => panic!("Unexpected Lifecycle Controller request"),
}
})
.detach();
lifecycle_controller
} |
index.ts | // export * from './types';
import * as sessionActions from "./actions" |
export { sessionActions, sessionReducer, SessionState, sessionSelectors } | import * as sessionSelectors from "./selectors"
import { sessionReducer, SessionState } from "./reducer" |
cancelbuild_test.go | package cmd
import (
"io/ioutil"
"strconv"
"strings"
"testing"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
buildapi "github.com/openshift/origin/pkg/build/api"
"github.com/openshift/origin/pkg/client/testclient"
)
// TestCancelBuildDefaultFlags ensures that flags default values are set.
func TestCancelBuildDefaultFlags(t *testing.T) {
o := CancelBuildOptions{}
tests := map[string]struct {
flagName string
defaultVal string
}{
"state": {
flagName: "state",
defaultVal: "[" + strings.Join(o.States, ",") + "]",
},
"dump-logs": {
flagName: "dump-logs",
defaultVal: strconv.FormatBool(o.DumpLogs),
},
"restart": {
flagName: "restart",
defaultVal: strconv.FormatBool(o.Restart),
},
}
cmd := NewCmdCancelBuild("oc", CancelBuildRecommendedCommandName, nil, nil, nil, nil)
for _, v := range tests {
f := cmd.Flag(v.flagName)
if f == nil {
t.Fatalf("expected flag %s to be registered but found none", v.flagName)
}
if f.DefValue != v.defaultVal {
t.Errorf("expected default value of %s for %s but found %s", v.defaultVal, v.flagName, f.DefValue)
}
}
}
// TestCancelBuildRun ensures that RunCancelBuild command calls the right actions.
func TestCancelBuildRun(t *testing.T) {
tests := map[string]struct {
opts *CancelBuildOptions
phase buildapi.BuildPhase
expectedActions []testAction
expectedErr error
}{
"cancelled": {
opts: &CancelBuildOptions{
Out: ioutil.Discard,
Namespace: "test",
States: []string{"new", "pending", "running"},
},
phase: buildapi.BuildPhaseCancelled,
expectedActions: []testAction{
{verb: "get", resource: "builds"},
},
expectedErr: nil,
},
"complete": {
opts: &CancelBuildOptions{
Out: ioutil.Discard,
Namespace: "test",
},
phase: buildapi.BuildPhaseComplete,
expectedActions: []testAction{
{verb: "get", resource: "builds"},
},
expectedErr: nil,
},
"new": {
opts: &CancelBuildOptions{
Out: ioutil.Discard,
Namespace: "test",
}, | expectedActions: []testAction{
{verb: "get", resource: "builds"},
{verb: "update", resource: "builds"},
{verb: "get", resource: "builds"},
},
expectedErr: nil,
},
"pending": {
opts: &CancelBuildOptions{
Out: ioutil.Discard,
Namespace: "test",
},
phase: buildapi.BuildPhaseNew,
expectedActions: []testAction{
{verb: "get", resource: "builds"},
{verb: "update", resource: "builds"},
{verb: "get", resource: "builds"},
},
expectedErr: nil,
},
"running and restart": {
opts: &CancelBuildOptions{
Out: ioutil.Discard,
Namespace: "test",
Restart: true,
},
phase: buildapi.BuildPhaseNew,
expectedActions: []testAction{
{verb: "get", resource: "builds"},
{verb: "update", resource: "builds"},
{verb: "get", resource: "builds"},
{verb: "create", resource: "builds"},
},
expectedErr: nil,
},
}
for _, test := range tests {
client := testclient.NewSimpleFake(genBuild(test.phase))
buildClient := NewFakeTestBuilds(client, test.opts.Namespace)
test.opts.Client = client
test.opts.BuildClient = buildClient
test.opts.ReportError = func(err error) {
test.opts.HasError = true
}
test.opts.Mapper = registered.RESTMapper()
test.opts.BuildNames = []string{"ruby-ex"}
test.opts.States = []string{"new", "pending", "running"}
if err := test.opts.RunCancelBuild(); err != test.expectedErr {
t.Fatalf("error mismatch: expected %v, got %v", test.expectedErr, err)
}
got := test.opts.Client.(*testclient.Fake).Actions()
if len(test.expectedActions) != len(got) {
t.Fatalf("action length mismatch: expected %d, got %d", len(test.expectedActions), len(got))
}
for i, action := range test.expectedActions {
if !got[i].Matches(action.verb, action.resource) {
t.Errorf("action mismatch: expected %s %s, got %s %s", action.verb, action.resource, got[i].GetVerb(), got[i].GetResource())
}
}
}
}
type FakeTestBuilds struct {
*testclient.FakeBuilds
Obj *buildapi.Build
}
func NewFakeTestBuilds(c *testclient.Fake, ns string) *FakeTestBuilds {
f := FakeTestBuilds{}
f.FakeBuilds = &testclient.FakeBuilds{}
f.Fake = c
f.Namespace = ns
return &f
}
func (c *FakeTestBuilds) Get(name string) (*buildapi.Build, error) {
obj, err := c.FakeBuilds.Get(name)
if c.Obj == nil {
c.Obj = obj
}
return c.Obj, err
}
func (c *FakeTestBuilds) Update(inObj *buildapi.Build) (*buildapi.Build, error) {
_, err := c.FakeBuilds.Update(inObj)
if inObj.Status.Cancelled == true {
inObj.Status.Phase = buildapi.BuildPhaseCancelled
}
c.Obj = inObj
return c.Obj, err
}
func genBuild(phase buildapi.BuildPhase) *buildapi.Build {
build := buildapi.Build{
ObjectMeta: kapi.ObjectMeta{
Name: "ruby-ex",
Namespace: "test",
},
Status: buildapi.BuildStatus{
Phase: phase,
},
}
return &build
} | phase: buildapi.BuildPhaseNew, |
day1-pt1.py | with open("input.txt", "r") as f: | total = int(line1) + int(line2)
if total == 2020:
print(f"line1: {line1}")
print(f"line2: {line2}")
print(f"Multiply: {int(line1) * int(line2)}") | lines = f.readlines()
for line1 in lines:
for line2 in lines: |
test_loader.rs | use crate::config::{types::TraceEngine, Config};
use crate::path_utils::is_coverable_file_path;
use crate::source_analysis::*;
use crate::traces::*;
use gimli::read::Error;
use gimli::*;
use memmap::MmapOptions;
use object::{read::ObjectSection, File as OFile, Object};
use rustc_demangle::demangle;
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use tracing::{debug, error, trace};
/// Describes a function as `low_pc`, `high_pc` and bool representing `is_test`.
type FuncDesc = (u64, u64, FunctionType, Option<String>);
#[derive(Debug, Clone, Copy, PartialEq)]
enum FunctionType {
Generated,
Test,
Standard,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd)]
pub enum LineType {
/// Generated test main. Shouldn't be traced.
TestMain,
/// Entry of function known to be a test
TestEntry(u64),
/// Entry of function. May or may not be test
FunctionEntry(u64),
/// Standard statement
Statement,
/// Condition
Condition,
/// Unknown type
Unknown,
/// Unused meta-code
UnusedGeneric,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct SourceLocation {
pub path: PathBuf,
pub line: u64,
}
impl From<(PathBuf, usize)> for SourceLocation {
fn from(other: (PathBuf, usize)) -> Self {
Self {
path: other.0,
line: other.1 as u64,
}
}
}
#[derive(Debug, Clone)]
pub struct TracerData {
/// Currently used to find generated __test::main and remove from coverage,
/// may have uses in future for finding conditions etc
pub trace_type: LineType,
/// Start address of the line
pub address: Option<u64>,
/// Length of the instruction
pub length: u64,
/// Function name
pub fn_name: Option<String>,
}
fn generate_func_desc<R, Offset>(
die: &DebuggingInformationEntry<R, Offset>,
debug_str: &DebugStr<R>,
) -> Result<FuncDesc>
where
R: Reader<Offset = Offset>,
Offset: ReaderOffset,
{
let mut func_type = FunctionType::Standard;
let low = die.attr_value(DW_AT_low_pc)?;
let high = die.attr_value(DW_AT_high_pc)?;
let linkage = die.attr_value(DW_AT_linkage_name)?;
let fn_name = die.attr_value(DW_AT_name)?;
let fn_name: Option<String> = match fn_name {
Some(AttributeValue::DebugStrRef(offset)) => debug_str
.get_str(offset)
.and_then(|r| r.to_string().map(|s| s.to_string()))
.ok()
.map(|r| demangle(r.as_ref()).to_string()),
_ => None,
};
// Low is a program counter address so stored in an Addr
let low = match low {
Some(AttributeValue::Addr(x)) => x,
_ => 0u64,
};
// High is an offset from the base pc, therefore is u64 data.
let high = match high {
Some(AttributeValue::Udata(x)) => x,
_ => 0u64,
};
if let Some(AttributeValue::DebugStrRef(offset)) = linkage {
let name = debug_str
.get_str(offset)
.and_then(|r| r.to_string().map(|s| s.to_string()))
.unwrap_or_else(|_| "".into());
let name = demangle(name.as_ref()).to_string();
// Simplest test is whether it's in tests namespace.
// Rust guidelines recommend all tests are in a tests module.
func_type = if name.contains("tests::") {
FunctionType::Test
} else if name.contains("__test::main") {
FunctionType::Generated
} else {
FunctionType::Standard
};
}
Ok((low, high, func_type, fn_name))
}
/// Finds all function entry points and returns a vector
/// This will identify definite tests, but may be prone to false negatives.
fn get_entry_points<R, Offset>(
debug_info: &UnitHeader<R, Offset>,
debug_abbrev: &Abbreviations,
debug_str: &DebugStr<R>,
) -> Vec<FuncDesc>
where
R: Reader<Offset = Offset>,
Offset: ReaderOffset,
{
let mut result: Vec<FuncDesc> = Vec::new();
let mut cursor = debug_info.entries(debug_abbrev);
// skip compilation unit root.
let _ = cursor.next_entry();
while let Ok(Some((_, node))) = cursor.next_dfs() {
// Function DIE
if node.tag() == DW_TAG_subprogram {
if let Ok(fd) = generate_func_desc(node, debug_str) {
result.push(fd);
}
}
}
result
}
fn | <R, Offset>(
prog: IncompleteLineProgram<R>,
debug_strs: &DebugStr<R>,
entries: &[(u64, LineType, &Option<String>)],
config: &Config,
result: &mut HashMap<SourceLocation, Vec<TracerData>>,
) -> Result<()>
where
R: Reader<Offset = Offset>,
Offset: ReaderOffset,
{
let project = config.root();
let get_string = |x: R| x.to_string().map(|y| y.to_string()).ok();
let (cprog, seq) = prog.sequences()?;
for s in seq {
let mut sm = cprog.resume_from(&s);
while let Ok(Some((header, &ln_row))) = sm.next_row() {
if ln_row.end_sequence() {
break;
}
// If this row isn't useful move on
if !ln_row.is_stmt() || ln_row.line().is_none() {
continue;
}
if let Some(file) = ln_row.file(header) {
let mut path = project.clone();
if let Some(dir) = file.directory(header) {
if let Some(temp) = dir.string_value(debug_strs).and_then(get_string) {
path.push(temp);
}
}
if let Ok(p) = path.canonicalize() {
path = p;
}
let file = file.path_name();
let line = ln_row.line().unwrap();
if let Some(file) = file.string_value(debug_strs).and_then(get_string) {
path.push(file);
if !path.is_file() {
// Not really a source file!
continue;
}
if is_coverable_file_path(&path, &project, &config.target_dir()) {
let address = ln_row.address();
let (desc, fn_name) = entries
.iter()
.filter(|&&(addr, _, _)| addr == address)
.map(|&(_, t, fn_name)| (t, fn_name.to_owned()))
.next()
.unwrap_or((LineType::Unknown, None));
let loc = SourceLocation {
path,
line: line.into(),
};
if desc != LineType::TestMain {
let trace = TracerData {
address: Some(address),
trace_type: desc,
length: 1,
fn_name,
};
let tracerdata = result.entry(loc).or_default();
tracerdata.push(trace);
}
}
}
}
}
}
Ok(())
}
fn get_line_addresses(
endian: RunTimeEndian,
obj: &OFile,
analysis: &HashMap<PathBuf, LineAnalysis>,
config: &Config,
) -> Result<TraceMap> {
let project = config.root();
let io_err = |e| {
error!("Io error parsing section: {}", e);
Error::Io
};
let mut result = TraceMap::new();
let debug_info = obj.section_by_name(".debug_info").ok_or(Error::Io)?;
let debug_info = DebugInfo::new(debug_info.data().map_err(io_err)?, endian);
let debug_abbrev = obj.section_by_name(".debug_abbrev").ok_or(Error::Io)?;
let debug_abbrev = DebugAbbrev::new(debug_abbrev.data().map_err(io_err)?, endian);
let debug_strings = obj.section_by_name(".debug_str").ok_or(Error::Io)?;
let debug_strings = DebugStr::new(debug_strings.data().map_err(io_err)?, endian);
let debug_line = obj.section_by_name(".debug_line").ok_or(Error::Io)?;
let debug_line = DebugLine::new(debug_line.data().map_err(io_err)?, endian);
let base_addr = obj.section_by_name(".text").ok_or(Error::Io)?;
let mut iter = debug_info.units();
while let Ok(Some(cu)) = iter.next() {
let addr_size = cu.address_size();
let abbr = match cu.abbreviations(&debug_abbrev) {
Ok(a) => a,
_ => continue,
};
let entry_points = get_entry_points(&cu, &abbr, &debug_strings);
let entries = entry_points
.iter()
.map(|(a, b, c, fn_name)| match c {
FunctionType::Test => (*a, LineType::TestEntry(*b), fn_name),
FunctionType::Standard => (*a, LineType::FunctionEntry(*b), fn_name),
FunctionType::Generated => (*a, LineType::TestMain, fn_name),
})
.collect::<Vec<_>>();
if let Ok(Some((_, root))) = cu.entries(&abbr).next_dfs() {
let offset = match root.attr_value(DW_AT_stmt_list) {
Ok(Some(AttributeValue::DebugLineRef(o))) => o,
_ => continue,
};
let prog = debug_line.program(offset, addr_size, None, None)?;
let mut temp_map: HashMap<SourceLocation, Vec<TracerData>> = HashMap::new();
if let Err(e) =
get_addresses_from_program(prog, &debug_strings, &entries, config, &mut temp_map)
{
debug!("Potential issue reading test addresses {}", e);
} else {
// Deduplicate addresses
for v in temp_map.values_mut() {
v.dedup_by_key(|x| x.address);
}
let temp_map = temp_map
.into_iter()
.filter(|&(ref k, _)| {
!(config.ignore_tests && k.path.starts_with(&project.join("tests")))
})
.filter(|&(ref k, _)| !(config.exclude_path(&k.path)))
.filter(|&(ref k, _)| {
!analysis.should_ignore(k.path.as_ref(), &(k.line as usize))
})
.map(|(k, v)| {
let ret = analysis.normalise(k.path.as_ref(), k.line as usize);
let k_n = SourceLocation::from(ret);
(k_n, v)
})
.collect::<HashMap<SourceLocation, Vec<TracerData>>>();
let mut tracemap = TraceMap::new();
for (k, val) in &temp_map {
let rpath = config.strip_base_dir(&k.path);
let mut address = HashSet::new();
let mut fn_name = None;
for v in val.iter() {
if let Some(a) = v.address {
if a < base_addr.address()
&& a >= (base_addr.address() + base_addr.size())
{
continue;
}
address.insert(a);
trace!(
"Adding trace at address 0x{:x} in {}:{}",
a,
rpath.display(),
k.line
);
}
if fn_name.is_none() && v.fn_name.is_some() {
fn_name = v.fn_name.clone();
}
}
if address.is_empty() {
trace!(
"Adding trace with no address at {}:{}",
rpath.display(),
k.line
);
}
tracemap.add_trace(&k.path, Trace::new(k.line, address, 1, fn_name));
}
result.merge(&tracemap);
}
}
}
for (file, line_analysis) in analysis.iter() {
if config.exclude_path(file) {
continue;
}
for line in &line_analysis.cover {
let line = *line as u64;
if !result.contains_location(file, line) && !line_analysis.should_ignore(line as usize)
{
let rpath = config.strip_base_dir(file);
trace!(
"Adding trace for potentially uncoverable line in {}:{}",
rpath.display(),
line
);
result.add_trace(file, Trace::new_stub(line));
}
}
}
Ok(result)
}
#[cfg(target_os = "linux")]
fn open_symbols_file(test: &Path) -> io::Result<File> {
File::open(test)
}
#[cfg(target_os = "macos")]
fn open_symbols_file(test: &Path) -> io::Result<File> {
let d_sym = test.with_extension("dSYM");
File::open(&d_sym)
}
#[cfg(target_os = "windows")]
fn open_symbols_file(test: &Path) -> io::Result<File> {
Err(io::Error::new(
io::ErrorKind::Other,
"Windows is not currently supported",
))
}
pub fn generate_tracemap(
test: &Path,
analysis: &HashMap<PathBuf, LineAnalysis>,
config: &Config,
) -> io::Result<TraceMap> {
let file = match open_symbols_file(test) {
Ok(s) => Ok(s),
Err(e) if config.engine() != TraceEngine::Llvm => Err(e),
_ => {
return Ok(TraceMap::new());
}
}?;
let file = unsafe { MmapOptions::new().map(&file)? };
if let Ok(obj) = OFile::parse(&*file) {
let endian = if obj.is_little_endian() {
RunTimeEndian::Little
} else {
RunTimeEndian::Big
};
if let Ok(result) = get_line_addresses(endian, &obj, analysis, config) {
Ok(result)
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Error while parsing",
))
}
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Unable to parse binary.",
))
}
}
| get_addresses_from_program |
azenvtypes.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package api
//AzureEnvironmentSpecConfig is the overall configuration differences in different cloud environments.
type AzureEnvironmentSpecConfig struct {
CloudName string `json:"cloudName,omitempty"`
KubernetesSpecConfig KubernetesSpecConfig `json:"kubernetesSpecConfig,omitempty"`
EndpointConfig AzureEndpointConfig `json:"endpointConfig,omitempty"`
OSImageConfig map[Distro]AzureOSImageConfig `json:"osImageConfig,omitempty"`
}
//KubernetesSpecConfig is the kubernetes container images used.
type KubernetesSpecConfig struct {
AzureTelemetryPID string `json:"azureTelemetryPID,omitempty"`
// KubernetesImageBase defines a base image URL substring to source images that originate from upstream k8s.gcr.io
KubernetesImageBase string `json:"kubernetesImageBase,omitempty"`
TillerImageBase string `json:"tillerImageBase,omitempty"`
ACIConnectorImageBase string `json:"aciConnectorImageBase,omitempty"` // Deprecated
// MCRKubernetesImageBase defines a base image URL substring to source MS-curated images that originate from MCR
MCRKubernetesImageBase string `json:"mcrKubernetesImageBase,omitempty"`
NVIDIAImageBase string `json:"nvidiaImageBase,omitempty"`
AzureCNIImageBase string `json:"azureCNIImageBase,omitempty"`
CalicoImageBase string `json:"CalicoImageBase,omitempty"`
EtcdDownloadURLBase string `json:"etcdDownloadURLBase,omitempty"`
KubeBinariesSASURLBase string `json:"kubeBinariesSASURLBase,omitempty"`
WindowsTelemetryGUID string `json:"windowsTelemetryGUID,omitempty"`
CNIPluginsDownloadURL string `json:"cniPluginsDownloadURL,omitempty"`
VnetCNILinuxPluginsDownloadURL string `json:"vnetCNILinuxPluginsDownloadURL,omitempty"`
VnetCNIWindowsPluginsDownloadURL string `json:"vnetCNIWindowsPluginsDownloadURL,omitempty"`
ContainerdDownloadURLBase string `json:"containerdDownloadURLBase,omitempty"`
CSIProxyDownloadURL string `json:"csiProxyDownloadURL,omitempty"`
WindowsProvisioningScriptsPackageURL string `json:"windowsProvisioningScriptsPackageURL,omitempty"`
WindowsPauseImageURL string `json:"windowsPauseImageURL,omitempty"`
AlwaysPullWindowsPauseImage bool `json:"alwaysPullWindowsPauseImage,omitempty"`
}
//AzureEndpointConfig describes an Azure endpoint
type AzureEndpointConfig struct {
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix,omitempty"`
}
//AzureOSImageConfig describes an Azure OS image
type AzureOSImageConfig struct {
ImageOffer string `json:"imageOffer,omitempty"`
ImageSku string `json:"imageSku,omitempty"`
ImagePublisher string `json:"imagePublisher,omitempty"`
ImageVersion string `json:"imageVersion,omitempty"`
}
// AzureTelemetryPID represents the current telemetry ID
// See more information here https://docs.microsoft.com/en-us/azure/marketplace/azure-partner-customer-usage-attribution
// PID is maintained to keep consistent with Azure Stack Telemetry Terminologies
type AzureTelemetryPID string
const (
// DefaultAzureStackDeployTelemetryPID tracking ID for Deployment
DefaultAzureStackDeployTelemetryPID = "pid-1bda96ec-adf4-4eea-bb9a-8462de5475c0"
// DefaultAzureStackScaleTelemetryPID tracking ID for Scale
DefaultAzureStackScaleTelemetryPID = "pid-bbbafa53-d6a7-4022-84a2-86fcbaec7030"
// DefaultAzureStackUpgradeTelemetryPID tracking ID for Upgrade
DefaultAzureStackUpgradeTelemetryPID = "pid-0d9b5198-7cd7-4252-a890-5658eaf874be"
)
var (
// DefaultKubernetesSpecConfig is the default Docker image source of Kubernetes
DefaultKubernetesSpecConfig = KubernetesSpecConfig{
KubernetesImageBase: "k8s.gcr.io/",
TillerImageBase: "mcr.microsoft.com/",
NVIDIAImageBase: "mcr.microsoft.com/",
CalicoImageBase: "mcr.microsoft.com/oss/calico/",
AzureCNIImageBase: "mcr.microsoft.com/containernetworking/",
MCRKubernetesImageBase: "mcr.microsoft.com/",
EtcdDownloadURLBase: "mcr.microsoft.com/oss/etcd-io/",
KubeBinariesSASURLBase: "https://kubernetesartifacts.azureedge.net/kubernetes/",
WindowsTelemetryGUID: "fb801154-36b9-41bc-89c2-f4d4f05472b0",
CNIPluginsDownloadURL: "https://kubernetesartifacts.azureedge.net/cni-plugins/" + CNIPluginVer + "/binaries/cni-plugins-linux-amd64-" + CNIPluginVer + ".tgz",
VnetCNILinuxPluginsDownloadURL: "https://kubernetesartifacts.azureedge.net/azure-cni/" + AzureCniPluginVerLinux + "/binaries/azure-vnet-cni-linux-amd64-" + AzureCniPluginVerLinux + ".tgz",
VnetCNIWindowsPluginsDownloadURL: "https://kubernetesartifacts.azureedge.net/azure-cni/" + AzureCniPluginVerWindows + "/binaries/azure-vnet-cni-singletenancy-windows-amd64-" + AzureCniPluginVerWindows + ".zip",
ContainerdDownloadURLBase: "https://storage.googleapis.com/cri-containerd-release/",
CSIProxyDownloadURL: "https://kubernetesartifacts.azureedge.net/csi-proxy/" + DefaultWindowsCsiProxyVersion + "/binaries/csi-proxy-" + DefaultWindowsCsiProxyVersion + ".tar.gz",
WindowsProvisioningScriptsPackageURL: "https://kubernetesartifacts.azureedge.net/aks-engine/windows/provisioning/signedscripts-" + DefaultWindowsProvisioningScriptsPackageVersion + ".zip",
WindowsPauseImageURL: "mcr.microsoft.com/oss/kubernetes/pause:" + WindowsPauseImageVersion,
AlwaysPullWindowsPauseImage: DefaultAlwaysPullWindowsPauseImage,
}
//Ubuntu1604OSImageConfig is the default Linux distribution.
Ubuntu1604OSImageConfig = AzureOSImageConfig{
ImageOffer: "UbuntuServer",
ImageSku: "16.04-LTS", | ImageVersion: "latest",
}
//Ubuntu1804OSImageConfig is the Ubunutu 18.04-LTS Linux distribution.
Ubuntu1804OSImageConfig = AzureOSImageConfig{
ImageOffer: "UbuntuServer",
ImageSku: "18.04-LTS",
ImagePublisher: "Canonical",
ImageVersion: "latest",
}
//Ubuntu1804Gen2OSImageConfig is Gen2 flavor the Ubunutu 18.04-LTS Linux distribution.
Ubuntu1804Gen2OSImageConfig = AzureOSImageConfig{
ImageOffer: "UbuntuServer",
ImageSku: "18_04-lts-gen2",
ImagePublisher: "Canonical",
ImageVersion: "latest",
}
//FlatcarImageConfig is the Flatcar Linux distribution.
FlatcarImageConfig = AzureOSImageConfig{
ImageOffer: "flatcar-container-linux-free",
ImageSku: "stable",
ImagePublisher: "kinvolk",
ImageVersion: "latest",
}
// AKSUbuntu1604OSImageConfig is the AKS image based on Ubuntu 16.04-LTS.
// Ubuntu 16.04-LTS has reached EOL as of April 2021, the below image reference should never be updated
// Eventually this VHD reference will be deprecated altogether
AKSUbuntu1604OSImageConfig = AzureOSImageConfig{
ImageOffer: "aks",
ImageSku: "aks-engine-ubuntu-1604-202007",
ImagePublisher: "microsoft-aks",
ImageVersion: "2021.04.13",
}
// AKSUbuntu1804OSImageConfig is the AKS image based on Ubuntu 18.04-LTS.
AKSUbuntu1804OSImageConfig = AzureOSImageConfig{
ImageOffer: "aks",
ImageSku: "aks-engine-ubuntu-1804-202007",
ImagePublisher: "microsoft-aks",
ImageVersion: "2021.09.27",
}
// AKSWindowsServer2019OSImageConfig is the aks-engine image based on Windows Server 2019
AKSWindowsServer2019OSImageConfig = AzureOSImageConfig{
ImageOffer: "aks-windows",
ImageSku: "2019-datacenter-core-smalldisk-2109",
ImagePublisher: "microsoft-aks",
ImageVersion: "17763.2213.210927",
}
// AKSWindowsServer2019ContainerDOSImageConfig is the aks-engine image based on Windows Server 2019
// configured with containerd
AKSWindowsServer2019ContainerDOSImageConfig = AzureOSImageConfig{
ImageOffer: "aks-windows",
ImageSku: "2019-datacenter-core-ctrd-2109",
ImagePublisher: "microsoft-aks",
ImageVersion: "17763.2213.210927",
}
// WindowsServer2019OSImageConfig is the 'vanilla' Windows Server 2019 image
WindowsServer2019OSImageConfig = AzureOSImageConfig{
ImageOffer: "WindowsServer",
ImageSku: "2019-Datacenter-Core-with-Containers-smalldisk",
ImagePublisher: "MicrosoftWindowsServer",
ImageVersion: "17763.2183.2109130127",
}
// ACC1604OSImageConfig is the ACC image based on Ubuntu 16.04.
ACC1604OSImageConfig = AzureOSImageConfig{
ImageOffer: "confidential-compute-preview",
ImageSku: "16.04-LTS",
ImagePublisher: "Canonical",
ImageVersion: "latest",
}
//AzureCloudSpec is the default configurations for global azure.
AzureCloudSpec = AzureEnvironmentSpecConfig{
CloudName: AzurePublicCloud,
//KubernetesSpecConfig is the default kubernetes container image url.
KubernetesSpecConfig: DefaultKubernetesSpecConfig,
EndpointConfig: AzureEndpointConfig{
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
},
OSImageConfig: map[Distro]AzureOSImageConfig{
Ubuntu: Ubuntu1604OSImageConfig,
Ubuntu1804: Ubuntu1804OSImageConfig,
Ubuntu1804Gen2: Ubuntu1804Gen2OSImageConfig,
Flatcar: FlatcarImageConfig,
AKSUbuntu1604: AKSUbuntu1604OSImageConfig,
AKS1604Deprecated: AKSUbuntu1604OSImageConfig, // for back-compat
AKSUbuntu1804: AKSUbuntu1804OSImageConfig,
AKS1804Deprecated: AKSUbuntu1804OSImageConfig, // for back-compat
ACC1604: ACC1604OSImageConfig,
},
}
//AzureGermanCloudSpec is the German cloud config.
AzureGermanCloudSpec = AzureEnvironmentSpecConfig{
CloudName: AzureGermanCloud,
KubernetesSpecConfig: DefaultKubernetesSpecConfig,
EndpointConfig: AzureEndpointConfig{
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
},
OSImageConfig: map[Distro]AzureOSImageConfig{
Ubuntu: Ubuntu1604OSImageConfig,
Ubuntu1804: Ubuntu1804OSImageConfig,
Ubuntu1804Gen2: Ubuntu1804Gen2OSImageConfig,
Flatcar: FlatcarImageConfig,
AKSUbuntu1604: Ubuntu1604OSImageConfig,
AKS1604Deprecated: Ubuntu1604OSImageConfig, // for back-compat
AKSUbuntu1804: Ubuntu1604OSImageConfig, // workaround for https://github.com/Azure/aks-engine/issues/761
AKS1804Deprecated: Ubuntu1604OSImageConfig, // for back-compat
},
}
//AzureUSGovernmentCloudSpec is the US government config.
AzureUSGovernmentCloudSpec = AzureEnvironmentSpecConfig{
CloudName: AzureUSGovernmentCloud,
KubernetesSpecConfig: DefaultKubernetesSpecConfig,
EndpointConfig: AzureEndpointConfig{
ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net",
},
OSImageConfig: map[Distro]AzureOSImageConfig{
Ubuntu: Ubuntu1604OSImageConfig,
Ubuntu1804: Ubuntu1804OSImageConfig,
Ubuntu1804Gen2: Ubuntu1804Gen2OSImageConfig,
Flatcar: FlatcarImageConfig,
AKSUbuntu1604: AKSUbuntu1604OSImageConfig,
AKS1604Deprecated: AKSUbuntu1604OSImageConfig, // for back-compat
AKSUbuntu1804: AKSUbuntu1804OSImageConfig,
AKS1804Deprecated: AKSUbuntu1804OSImageConfig, // for back-compat
},
}
//AzureChinaCloudSpec is the configurations for Azure China (Mooncake)
AzureChinaCloudSpec = AzureEnvironmentSpecConfig{
CloudName: AzureChinaCloud,
//KubernetesSpecConfig - Due to Chinese firewall issue, the default containers from google is blocked, use the Chinese local mirror instead
KubernetesSpecConfig: KubernetesSpecConfig{
KubernetesImageBase: "gcr.azk8s.cn/google_containers/",
TillerImageBase: "mcr.microsoft.com/",
NVIDIAImageBase: "dockerhub.azk8s.cn/nvidia/",
AzureCNIImageBase: "mcr.azk8s.cn/containernetworking/",
MCRKubernetesImageBase: "mcr.microsoft.com/",
CalicoImageBase: "dockerhub.azk8s.cn/calico/",
EtcdDownloadURLBase: "mcr.microsoft.com/oss/etcd-io/",
// Keep the global default value since mirror for AzureChinaCloud does not have the binaries
KubeBinariesSASURLBase: DefaultKubernetesSpecConfig.KubeBinariesSASURLBase,
WindowsTelemetryGUID: DefaultKubernetesSpecConfig.WindowsTelemetryGUID,
CNIPluginsDownloadURL: "https://mirror.azk8s.cn/kubernetes/containernetworking-plugins/cni-plugins-linux-amd64-" + CNIPluginVer + ".tgz",
VnetCNILinuxPluginsDownloadURL: "https://mirror.azk8s.cn/azure-cni/" + AzureCniPluginVerLinux + "/binaries/azure-vnet-cni-linux-amd64-" + AzureCniPluginVerLinux + ".tgz",
VnetCNIWindowsPluginsDownloadURL: "https://mirror.azk8s.cn/azure-cni/" + AzureCniPluginVerWindows + "/binaries/azure-vnet-cni-singletenancy-windows-amd64-" + AzureCniPluginVerWindows + ".zip",
ContainerdDownloadURLBase: "https://mirror.azk8s.cn/kubernetes/containerd/",
CSIProxyDownloadURL: "https://mirror.azk8s.cn/csi-proxy/" + DefaultWindowsCsiProxyVersion + "/binaries/csi-proxy-" + DefaultWindowsCsiProxyVersion + ".tar.gz",
WindowsProvisioningScriptsPackageURL: "https://mirror.azk8s.cn/aks-engine/windows/provisioning/signedscripts-" + DefaultWindowsProvisioningScriptsPackageVersion + ".zip",
WindowsPauseImageURL: "mcr.microsoft.com/oss/kubernetes/pause:" + WindowsPauseImageVersion,
AlwaysPullWindowsPauseImage: DefaultAlwaysPullWindowsPauseImage,
},
EndpointConfig: AzureEndpointConfig{
ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn",
},
OSImageConfig: map[Distro]AzureOSImageConfig{
Ubuntu: Ubuntu1604OSImageConfig,
Ubuntu1804: Ubuntu1804OSImageConfig,
Ubuntu1804Gen2: Ubuntu1804Gen2OSImageConfig,
Flatcar: FlatcarImageConfig,
AKSUbuntu1604: AKSUbuntu1604OSImageConfig,
AKS1604Deprecated: AKSUbuntu1604OSImageConfig, // for back-compat
AKSUbuntu1804: AKSUbuntu1804OSImageConfig,
AKS1804Deprecated: AKSUbuntu1804OSImageConfig, // for back-compat
},
}
// AzureCloudSpecEnvMap is the environment configuration map for all the Azure cloud environments.
AzureCloudSpecEnvMap = map[string]AzureEnvironmentSpecConfig{
AzureChinaCloud: AzureChinaCloudSpec,
AzureGermanCloud: AzureGermanCloudSpec,
AzureUSGovernmentCloud: AzureUSGovernmentCloudSpec,
AzurePublicCloud: AzureCloudSpec,
}
) | ImagePublisher: "Canonical", |
compat.py | # -*- coding: utf-8 -*-
import sys
import itertools
import functools
import inspect
PY2 = int(sys.version_info[0]) == 2
PY26 = PY2 and int(sys.version_info[1]) < 7
if PY2:
import urlparse
urlparse = urlparse
text_type = unicode
binary_type = str
string_types = (str, unicode)
unicode = unicode
basestring = basestring
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
zip_longest = itertools.izip_longest
if PY26:
from .ordereddict import OrderedDict
else:
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.getargspec(func.func).args)
if inspect.isfunction(func) or inspect.ismethod(func):
return list(inspect.getargspec(func).args)
if callable(func):
return list(inspect.getargspec(func.__call__).args)
else:
import urllib.parse
urlparse = urllib.parse
text_type = str
binary_type = bytes
string_types = (str,)
unicode = str
basestring = (str, bytes)
iterkeys = lambda d: d.keys()
itervalues = lambda d: d.values()
iteritems = lambda d: d.items()
zip_longest = itertools.zip_longest
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.signature(func.func).parameters)
if inspect.isfunction(func):
return list(inspect.signature(func).parameters)
if callable(func) or inspect.ismethod(func):
return ['self'] + list(inspect.signature(func.__call__).parameters)
# From six
def | (meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # noqa
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| with_metaclass |
Table.ts | import { Column, ColumnDefinition, getColumnType, parseColumnFlags } from "./column";
import { readFieldValue, Value } from "./data";
import Database from "./Database";
import PageType, { assertPageType } from "./PageType";
import { uncompressText } from "./unicodeCompression";
import { findMapPages } from "./usage-map";
import { getBitmapValue, roundToFullByte } from "./util";
export default class Table {
private readonly definitionBuffer: Buffer;
private readonly dataPages: number[];
/**
* Number of rows.
*/
public readonly rowCount: number;
/**
* Number of columns.
*/
public readonly columnCount: number;
private readonly variableColumnCount: number;
private readonly fixedColumnCount: number;
private readonly logicalIndexCount: number;
private readonly realIndexCount: number;
/**
* @param name Table name. As this is stored in a MSysObjects, it has to be passed in
* @param db
* @param firstDefinitionPage The first page of the table definition referenced in the corresponding MSysObject
*/
public constructor(
public readonly name: string,
private readonly db: Database,
private readonly firstDefinitionPage: number
) {
// Concat all table definition pages
let nextDefinitionPage = this.firstDefinitionPage;
let buffer: Buffer | undefined;
while (nextDefinitionPage > 0) {
const curBuffer = this.db.getPage(nextDefinitionPage);
assertPageType(curBuffer, PageType.TableDefinition);
if (!buffer) {
buffer = curBuffer;
} else {
buffer = Buffer.concat([buffer, curBuffer.slice(8)]);
}
nextDefinitionPage = curBuffer.readUInt32LE(4);
}
this.definitionBuffer = buffer!;
// Read row, column, and index counts
this.rowCount = this.definitionBuffer.readUInt32LE(this.db.format.tableDefinitionPage.rowCountOffset);
this.columnCount = this.definitionBuffer.readUInt16LE(this.db.format.tableDefinitionPage.columnCountOffset);
this.variableColumnCount = this.definitionBuffer.readUInt16LE(
this.db.format.tableDefinitionPage.variableColumnCountOffset
);
this.fixedColumnCount = this.columnCount - this.variableColumnCount;
this.logicalIndexCount = this.definitionBuffer.readInt32LE(
this.db.format.tableDefinitionPage.logicalIndexCountOffset
);
this.realIndexCount = this.definitionBuffer.readInt32LE(this.db.format.tableDefinitionPage.realIndexCountOffset);
// Usage Map
const usageMapBuffer = this.db.findPageRow(
this.definitionBuffer.readUInt32LE(this.db.format.tableDefinitionPage.usageMapOffset)
);
this.dataPages = findMapPages(usageMapBuffer, this.db);
}
/**
* Returns a column definition by its name.
*
* @param name Name of the column. Case sensitive.
*/
public getColumn(name: string): Column {
const column = this.getColumns().find((c) => c.name === name);
if (column === undefined) {
throw new Error(`Could not find column with name ${name}`);
}
return column;
}
/**
* Returns an ordered array of all column definitions.
*/
public getColumns(): Column[] {
const columnDefinitions = this.getColumnDefinitions();
return columnDefinitions.map(({ index, variableIndex, fixedIndex, ...rest }) => rest);
}
private getColumnDefinitions(): ColumnDefinition[] {
const columns: ColumnDefinition[] = [];
let curDefinitionPos =
this.db.format.tableDefinitionPage.realIndexStartOffset +
this.realIndexCount * this.db.format.tableDefinitionPage.realIndexEntrySize;
let namesCursorPos =
curDefinitionPos + this.columnCount * this.db.format.tableDefinitionPage.columnsDefinition.entrySize;
for (let i = 0; i < this.columnCount; ++i) {
const columnBuffer = this.definitionBuffer.slice(
curDefinitionPos,
curDefinitionPos + this.db.format.tableDefinitionPage.columnsDefinition.entrySize
);
const type = getColumnType(
this.definitionBuffer.readUInt8(
curDefinitionPos + this.db.format.tableDefinitionPage.columnsDefinition.typeOffset
)
);
const nameLength = this.definitionBuffer.readUIntLE(
namesCursorPos,
this.db.format.tableDefinitionPage.columnNames.nameLengthSize
);
namesCursorPos += this.db.format.tableDefinitionPage.columnNames.nameLengthSize;
const name = uncompressText(
this.definitionBuffer.slice(namesCursorPos, namesCursorPos + nameLength),
this.db.format
);
namesCursorPos += nameLength;
const column: ColumnDefinition = {
name,
type,
index: columnBuffer.readUInt8(this.db.format.tableDefinitionPage.columnsDefinition.indexOffset),
variableIndex: columnBuffer.readUInt8(
this.db.format.tableDefinitionPage.columnsDefinition.variableIndexOffset
),
size:
type === "boolean"
? 0
: columnBuffer.readUInt16LE(this.db.format.tableDefinitionPage.columnsDefinition.sizeOffset),
fixedIndex: columnBuffer.readUInt16LE(this.db.format.tableDefinitionPage.columnsDefinition.fixedIndexOffset),
...parseColumnFlags(
columnBuffer.readUInt8(this.db.format.tableDefinitionPage.columnsDefinition.flagsOffset)
),
};
if (type === "numeric") {
column.precision = columnBuffer.readUInt8(11);
column.scale = columnBuffer.readUInt8(12);
}
columns.push(column);
curDefinitionPos += this.db.format.tableDefinitionPage.columnsDefinition.entrySize;
}
return columns.sort((a, b) => a.index - b.index);
}
/**
* Returns an ordered array of all column names.
*/
public getColumnNames(): string[] {
return this.getColumns().map((column) => column.name);
}
/**
* Returns data from the table.
*
* @param columns Columns to be returned. Defaults to all columns.
* @param rowOffset Index of the first row to be returned. 0-based. Defaults to 0.
* @param rowLimit Maximum number of rows to be returned. Defaults to Infinity.
*/
public getData<TRow extends { [column in TColumn]: Value }, TColumn extends string = string>(options?: {
columns?: ReadonlyArray<string>;
rowOffset?: number;
rowLimit?: number;
}): TRow[] {
const columnDefinitions = this.getColumnDefinitions();
const data = [];
const columns = columnDefinitions.filter((c) => options?.columns === undefined || options.columns!.includes(c.name));
const rowOffset = options?.rowOffset ?? 0;
const rowLimit = options?.rowLimit ?? Infinity;
for (const dataPage of this.dataPages) {
if (data.length >= rowOffset + rowLimit) {
continue;
}
data.push(...this.getDataFromPage(dataPage, columns));
}
return data.slice(rowOffset, rowOffset + rowLimit) as TRow[];
}
private getDataFromPage(page: number, columns: ReadonlyArray<ColumnDefinition>): { [column: string]: Value }[] {
const pageBuffer = this.db.getPage(page);
assertPageType(pageBuffer, PageType.DataPage);
if (pageBuffer.readUInt32LE(4) !== this.firstDefinitionPage) {
throw new Error(`Data page ${page} does not belong to table ${this.name}`);
}
const recordCount = pageBuffer.readUInt16LE(this.db.format.dataPage.recordCountOffset);
const recordOffsets: { start: number; end: number }[] = [];
for (let record = 0; record < recordCount; ++record) {
const offsetMask = 0x1fff;
let recordStart = pageBuffer.readUInt16LE(this.db.format.dataPage.record.countOffset + 2 + record * 2);
if (recordStart & 0x4000) {
// deleted record
continue;
}
recordStart &= offsetMask; // remove flags
const nextStart =
record === 0
? this.db.format.pageSize
: pageBuffer.readUInt16LE(this.db.format.dataPage.record.countOffset + record * 2) & offsetMask;
const recordLength = nextStart - recordStart;
const recordEnd = recordStart + recordLength - 1;
recordOffsets.push({
start: recordStart,
end: recordEnd,
});
}
const lastColumnIndex = Math.max(...columns.map((c) => c.index), 0);
const data: { [column: string]: Value }[] = [];
for (const recordOffset of recordOffsets) {
const recordStart = recordOffset.start;
const recordEnd = recordOffset.end;
const totalVariableCount = pageBuffer.readUIntLE(recordStart, this.db.format.dataPage.record.columnCountSize);
const bitmaskSize = roundToFullByte(totalVariableCount);
let variableColumnCount = 0;
const variableColumnOffsets: number[] = [];
if (this.variableColumnCount > 0) {
switch (this.db.format.legacyFormat) {
case "Jet3": {
variableColumnCount = pageBuffer.readUInt8(recordEnd - bitmaskSize);
// https://github.com/brianb/mdbtools/blob/d6f5745d949f37db969d5f424e69b54f0da60b9b/src/libmdb/write.c#L125-L147
const recordLength = recordEnd - recordStart + 1;
let jumpCount = Math.floor((recordLength - 1) / 256);
const columnPointer = recordEnd - bitmaskSize - jumpCount - 1;
/* If last jump is a dummy value, ignore it */
if ((columnPointer - recordStart - variableColumnCount) / 256 < jumpCount) { |
let jumpsUsed = 0;
for (let i = 0; i < variableColumnCount + 1; ++i) {
while (
jumpsUsed < jumpCount &&
i === pageBuffer.readUInt8(recordEnd - bitmaskSize - jumpsUsed - 1)
) {
++jumpsUsed;
}
variableColumnOffsets.push(pageBuffer.readUInt8(columnPointer - i) + jumpsUsed * 256);
}
break;
}
case "Jet4": {
variableColumnCount = pageBuffer.readUInt16LE(recordEnd - bitmaskSize - 1);
// https://github.com/brianb/mdbtools/blob/d6f5745d949f37db969d5f424e69b54f0da60b9b/src/libmdb/write.c#L115-L124
for (let i = 0; i < variableColumnCount + 1; ++i) {
variableColumnOffsets.push(pageBuffer.readUInt16LE(recordEnd - bitmaskSize - 3 - i * 2));
}
break;
}
}
}
const fixedColumnCount = totalVariableCount - variableColumnCount;
const nullMask = pageBuffer.slice(
recordEnd - bitmaskSize + 1,
recordEnd - bitmaskSize + 1 + roundToFullByte(lastColumnIndex + 1)
);
let fixedColumnsFound = 0;
const recordValues: { [column: string]: Value } = {};
for (const column of [...columns].sort((a, b) => a.index - b.index)) {
/**
* undefined = will be set later. Undefined will never be returned to the user.
* null = actually null
*/
let value: Value | undefined = undefined;
let start: number;
let size: number;
if (!getBitmapValue(nullMask, column.index)) {
value = null;
}
if (column.fixedLength && fixedColumnsFound < fixedColumnCount) {
const colStart = column.fixedIndex + this.db.format.dataPage.record.columnCountSize;
start = recordStart + colStart;
size = column.size;
++fixedColumnsFound;
} else if (!column.fixedLength && column.variableIndex < variableColumnCount) {
const colStart = variableColumnOffsets[column.variableIndex];
start = recordStart + colStart;
size = variableColumnOffsets[column.variableIndex + 1] - colStart;
} else {
start = 0;
value = null;
size = 0;
}
if (column.type === "boolean") {
value = value === undefined;
} else if (value !== null) {
value = readFieldValue(pageBuffer.slice(start, start + size), column, this.db);
}
recordValues[column.name] = value;
}
data.push(recordValues);
}
return data;
}
} | --jumpCount;
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.