prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>list.py<|end_file_name|><|fim▁begin|>from recon.core.module import BaseModule
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'List Creator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Creates a file containing a list of records from the database.',
'options': (
('table', 'hosts', True, 'source table of data for the list'),<|fim▁hole|> ('unique', True, True, 'only return unique items from the dataset'),
('nulls', False, True, 'include nulls in the dataset'),
('filename', os.path.join(BaseModule.workspace, 'list.txt'), True, 'path and filename for output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# handle the source of information for the report
column = self.options['column']
table = self.options['table']
nulls = ' WHERE "%s" IS NOT NULL' % (column) if not self.options['nulls'] else ''
unique = 'DISTINCT ' if self.options['unique'] else ''
values = (unique, column, table, nulls)
query = 'SELECT %s"%s" FROM "%s"%s ORDER BY 1' % values
rows = self.query(query)
for row in [x[0] for x in rows]:
row = row if row else ''
outfile.write('%s\n' % (row))
print(row)
self.output('%d items added to \'%s\'.' % (len(rows), filename))<|fim▁end|> | ('column', 'ip_address', True, 'source column of data for the list'), |
<|file_name|>generics.go<|end_file_name|><|fim▁begin|>// Copyright 2015, Cyrill @ Schumacher.fm and the CoreStore contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tpl
// @todo hide password and other sensitive fields in JSON struct tags
const Type = `
// {{.Struct}} and {{.Slice}}, a type for DB table {{ .TableName }}
type (
{{.Slice}} []*{{.Struct}}
{{.Struct}} struct {
{{ range .GoColumns }}{{.GoName}} {{.GoType}} {{ $.Tick }}db:"{{.Field.String}}" json:",omitempty"{{ $.Tick }} {{.Comment}}
{{ end }} }
)
`
// Generics defines the available templates
type Generics int
// Options to be used to define which generic functions you need in a package.
const (
OptSQL Generics = 1 << iota
OptFindBy
OptSort
OptSliceFunctions
OptExtractFromSlice
OptAll = OptSQL | OptFindBy | OptSort | OptSliceFunctions | OptExtractFromSlice<|fim▁hole|>const SQL = `
// {{ typePrefix "SQLSelect" }} fills this slice with data from the database
func (s *{{.Slice}}) {{ typePrefix "SQLSelect" }}(dbrSess dbr.SessionRunner, cbs ...csdb.DbrSelectCb) (int, error) {
return csdb.LoadSlice(dbrSess, TableCollection, TableIndex{{.Name}}, &(*s), cbs...)
}
// {{ typePrefix "SQLInsert" }} inserts all records into the database @todo
func (s *{{.Slice}}) {{ typePrefix "SQLInsert" }}(dbrSess dbr.SessionRunner, cbs ...csdb.DbrInsertCb) (int, error) {
return 0, nil
}
// {{ typePrefix "SQLUpdate" }} updates all record in the database @todo
func (s *{{.Slice}}) {{ typePrefix "SQLUpdate" }}(dbrSess dbr.SessionRunner, cbs ...csdb.DbrUpdateCb) (int, error) {
return 0, nil
}
// {{ typePrefix "SQLDelete" }} deletes all record from the database @todo
func (s *{{.Slice}}) {{ typePrefix "SQLDelete" }}(dbrSess dbr.SessionRunner, cbs ...csdb.DbrDeleteCb) (int, error) {
return 0, nil
}
`
const FindBy = `
{{if (.FindByPk) ne ""}}
// {{ typePrefix .FindByPk }} searches the primary keys and returns a *{{.Struct}} if found or an error
func (s {{.Slice}}) {{ typePrefix .FindByPk }}(
{{range $k,$v := .Columns.PrimaryKeys}} {{ $v.Name }} {{$v.GetGoPrimitive false}},
{{end}} ) (*{{.Struct}}, error) {
for _, u := range s {
if u != nil {{ range $c := .Columns.PrimaryKeys }} && u.{{ $c.Name | camelize }}{{dbrType $c}} == {{$c.Name}} {{ end }} {
return u, nil
}
}
return nil, csdb.NewError("ID not found in {{.Slice}}")
}
{{end}}
{{ range $k,$c := .Columns.UniqueKeys }}
// {{ findBy $c.Name | typePrefix }} searches through this unique key and returns
// a *{{$.Struct}} if found or an error
func (s {{$.Slice}}) {{ findBy $c.Name | typePrefix }} ( {{ $c.Name }} {{$c.GetGoPrimitive false}} ) (*{{$.Struct}}, error) {
for _, u := range s {
if u != nil && u.{{ $c.Name | camelize }}{{ dbrType $c }} == {{$c.Name}} {
return u, nil
}
}
return nil, csdb.NewError("ID not found in {{$.Slice}}")
}
{{ end }}
`
const Sort = `var _ sort.Interface = (*{{.Slice}})(nil)
// {{ typePrefix "Len" }} returns the length and will satisfy the sort.Interface
func (s {{.Slice}}) {{ typePrefix "Len" }}() int { return len(s) }
// {{ typePrefix "Less" }} will satisfy the sort.Interface and compares via
// the primary key
func (s {{.Slice}}) {{ typePrefix "Less" }}(i, j int) bool {
return {{ range $c := .Columns.PrimaryKeys }} s[i].{{ $c.Name | camelize }}{{dbrType $c}} < s[j].{{ $c.Name | camelize }}{{dbrType $c}} && {{ end }} 1 == 1
}
// {{ typePrefix "Swap" }} will satisfy the sort.Interface
func (s {{.Slice}}) {{ typePrefix "Swap" }}(i, j int) { s[i], s[j] = s[j], s[i] }
// {{ typePrefix "Sort" }} will sort {{.Slice}}
func (s {{.Slice}}) {{ typePrefix "Sort" }}() { sort.Sort(s) }
`
const SliceFunctions = `// {{ typePrefix "FilterThis" }} filters the current slice by predicate f without memory allocation
func (s {{.Slice}}) {{ typePrefix "FilterThis" }} (f func(*{{.Struct}}) bool) {{.Slice}} {
b := s[:0]
for _, x := range s {
if f(x) {
b = append(b, x)
}
}
return b
}
// {{ typePrefix "Filter" }} returns a new slice filtered by predicate f
func (s {{.Slice}}) {{ typePrefix "Filter" }} (f func(*{{.Struct}}) bool) {{.Slice}} {
sl := make({{.Slice}}, 0, len(s))
for _, w := range s {
if f(w) {
sl = append(sl, w)
}
}
return sl
}
// {{ typePrefix "FilterNot" }} will return a new {{.Slice}} that do not match
// by calling the function f
func (s {{.Slice}}) {{ typePrefix "FilterNot" }}(f func(*{{.Struct}}) bool) {{.Slice}} {
sl := make({{.Slice}}, 0, len(s))
for _, v := range s {
if f(v) == false {
sl = append(sl, v)
}
}
return sl
}
// {{ typePrefix "Map" }} will run function f on all items in {{.Slice}}
func (s {{.Slice}}) Map(f func(*{{.Struct}}) ) {{.Slice}} {
for i := range s {
f(s[i])
}
return s
}
// {{ typePrefix "Cut" }} will remove items i through j-1
func (s *{{.Slice}}) {{ typePrefix "Cut" }}(i, j int) {
z := *s // copy slice header
copy(z[i:], z[j:])
for k, n := len(z)-j+i, len(z); k < n; k++ {
z[k] = nil // this should avoid the memory leak
}
z = z[:len(z)-j+i]
*s = z
}
// {{ typePrefix "Delete" }} will remove item i
func (s *{{.Slice}}) {{ typePrefix "Delete" }}(i int) {
z := *s // copy the slice header
end := len(z) - 1
s.Swap(i, end)
copy(z[i:], z[i+1:])
z[end] = nil // this should avoid the memory leak
z = z[:end]
*s = z
}
// {{ typePrefix "Insert" }} will place a new item at position i
func (s *{{.Slice}}) {{ typePrefix "Insert" }}(n *{{.Struct}}, i int) {
z := *s // copy the slice header
z = append(z, &{{.Struct}}{})
copy(z[i+1:], z[i:])
z[i] = n
*s = z
}
// {{ typePrefix "Append" }} will add a new item at the end of {{.Slice}}
func (s *{{.Slice}}) {{ typePrefix "Append" }}(n ...*{{.Struct}}) {
*s = append(*s, n...)
}
// {{ typePrefix "Prepend" }} will add a new item at the beginning of {{.Slice}}
func (s *{{.Slice}}) {{ typePrefix "Prepend" }}(n *{{.Struct}}) {
s.Insert(n, 0)
}
`
const ExtractFromSlice = `
// Extract{{.Name | camelize}} functions for extracting fields
type Extract{{.Name | camelize}} struct {
{{ range $k,$c := .Columns }} {{$c.Name | camelize }} func() []{{$c.GetGoPrimitive false}}
{{end}} }
// {{ typePrefix "Extract" }} generates slices from all fields
func (s {{$.Slice}}) {{ typePrefix "Extract" }}() Extract{{.Name | camelize}} {
return Extract{{.Name | camelize}} {
{{ range $k,$c := .Columns }} {{$c.Name | camelize }} : func() []{{$c.GetGoPrimitive false}} {
ext := make([]{{$c.GetGoPrimitive false}}, 0, len(s))
for _, v := range s {
ext = append(ext, v.{{ $c.Name | camelize }}{{dbrType $c}})
}
return ext
},
{{end}} }
}
`<|fim▁end|> | )
|
<|file_name|>registryaccess.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3 as published by the Free Software
** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
****************************************************************************/
#include "registryaccess.h"
#include <QApplication>
#include <QDir>
#include <QTextStream>
namespace RegistryAccess {
static QString winErrorMessage(unsigned long error)
{
QString rc = QString::fromLatin1("#%1: ").arg(error);
ushort *lpMsgBuf;
const int len = FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, error, 0, (LPTSTR)&lpMsgBuf, 0, NULL);
if (len) {
rc = QString::fromUtf16(lpMsgBuf, len);
LocalFree(lpMsgBuf);
} else {
rc += QString::fromLatin1("<unknown error>");
}
return rc;
}
QString msgFunctionFailed(const char *f, unsigned long error)
{
return QString::fromLatin1("\"%1\" failed: %2").arg(QLatin1String(f), winErrorMessage(error));
}
static bool registryReadBinaryKey(HKEY handle, // HKEY_LOCAL_MACHINE, etc.
const WCHAR *valueName,
QByteArray *data,
QString *errorMessage)
{
data->clear();
DWORD type;
DWORD size;
// get size and retrieve
LONG rc = RegQueryValueEx(handle, valueName, 0, &type, 0, &size);
if (rc != ERROR_SUCCESS) {
*errorMessage = msgRegistryOperationFailed("read", valueName, msgFunctionFailed("RegQueryValueEx1", rc));
return false;
}
BYTE *dataC = new BYTE[size + 1];
// Will be Utf16 in case of a string
rc = RegQueryValueEx(handle, valueName, 0, &type, dataC, &size);
if (rc != ERROR_SUCCESS) {
*errorMessage = msgRegistryOperationFailed("read", valueName, msgFunctionFailed("RegQueryValueEx2", rc));
return false;
}
*data = QByteArray(reinterpret_cast<const char*>(dataC), size);
delete [] dataC;
return true;
}
bool registryReadStringKey(HKEY handle, // HKEY_LOCAL_MACHINE, etc.
const WCHAR *valueName,
QString *s,
QString *errorMessage)
{
QByteArray data;
if (!registryReadBinaryKey(handle, valueName, &data, errorMessage))
return false;
data += '\0';
data += '\0';
*s = QString::fromUtf16(reinterpret_cast<const unsigned short*>(data.data()));
return true;
}
bool openRegistryKey(HKEY category, // HKEY_LOCAL_MACHINE, etc.
const WCHAR *key,
bool readWrite,
HKEY *keyHandle,
AccessMode mode,
QString *errorMessage)
{
Q_UNUSED(debuggerRegistryKeyC); // avoid warning from MinGW
REGSAM accessRights = KEY_READ;
if (readWrite)
accessRights |= KEY_SET_VALUE;
switch (mode) {
case RegistryAccess::DefaultAccessMode:
break;
case RegistryAccess::Registry32Mode:<|fim▁hole|> break;
case RegistryAccess::Registry64Mode:
accessRights |= KEY_WOW64_64KEY;
break;
}
const LONG rc = RegOpenKeyEx(category, key, 0, accessRights, keyHandle);
if (rc != ERROR_SUCCESS) {
*errorMessage = msgFunctionFailed("RegOpenKeyEx", rc);
if (readWrite)
*errorMessage += QLatin1String("You need administrator privileges to edit the registry.");
return false;
}
return true;
}
// Installation helpers: Format the debugger call with placeholders for PID and event
// '"[path]\qtcdebugger" [-wow] %ld %ld'.
QString debuggerCall(const QString &additionalOption)
{
QString rc;
QTextStream str(&rc);
str << '"' << QDir::toNativeSeparators(QApplication::applicationDirPath() + QLatin1Char('/')
+ QLatin1String(debuggerApplicationFileC) + QLatin1String(".exe")) << '"';
if (!additionalOption.isEmpty())
str << ' ' << additionalOption;
str << " %ld %ld";
return rc;
}
bool isRegistered(HKEY handle, const QString &call, QString *errorMessage, QString *oldDebugger)
{
QString registeredDebugger;
registryReadStringKey(handle, debuggerRegistryValueNameC, ®isteredDebugger, errorMessage);
if (oldDebugger)
*oldDebugger = registeredDebugger;
return !registeredDebugger.compare(call, Qt::CaseInsensitive);
}
} // namespace RegistryAccess<|fim▁end|> | accessRights |= KEY_WOW64_32KEY; |
<|file_name|>httpcache.py<|end_file_name|><|fim▁begin|>from email.utils import formatdate
from typing import Optional, Type, TypeVar
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
ConnectionRefusedError,
DNSLookupError,
TCPTimedOutError,
TimeoutError,
)
from twisted.web.client import ResponseFailed
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
from scrapy.utils.misc import load_object
HttpCacheMiddlewareTV = TypeVar("HttpCacheMiddlewareTV", bound="HttpCacheMiddleware")
class HttpCacheMiddleware:
DOWNLOAD_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings: Settings, stats: StatsCollector) -> None:
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings)
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.stats = stats
@classmethod
def from_crawler(cls: Type[HttpCacheMiddlewareTV], crawler: Crawler) -> HttpCacheMiddlewareTV:
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider: Spider) -> None:
self.storage.open_spider(spider)
def spider_closed(self, spider: Spider) -> None:
self.storage.close_spider(spider)
def process_request(self, request: Request, spider: Spider) -> Optional[Response]:
if request.meta.get('dont_cache', False):
return None
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta['_dont_cache'] = True # flag as uncacheable
return None
# Look for cached response and check if expired
cachedresponse = self.storage.retrieve_response(spider, request)
if cachedresponse is None:
self.stats.inc_value('httpcache/miss', spider=spider)
if self.ignore_missing:
self.stats.inc_value('httpcache/ignore', spider=spider)
raise IgnoreRequest("Ignored request not in cache: %s" % request)
return None # first time request
# Return cached response only if not expired
cachedresponse.flags.append('cached')
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value('httpcache/hit', spider=spider)
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on<|fim▁hole|> # process_response hook
request.meta['cached_response'] = cachedresponse
return None
def process_response(self, request: Request, response: Response, spider: Spider) -> Response:
if request.meta.get('dont_cache', False):
return response
# Skip cached responses and uncacheable requests
if 'cached' in response.flags or '_dont_cache' in request.meta:
request.meta.pop('_dont_cache', None)
return response
# RFC2616 requires origin server to set Date header,
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if 'Date' not in response.headers:
response.headers['Date'] = formatdate(usegmt=True)
# Do not validate first-hand responses
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is None:
self.stats.inc_value('httpcache/firsthand', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value('httpcache/revalidate', spider=spider)
return cachedresponse
self.stats.inc_value('httpcache/invalidate', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
def process_exception(
self, request: Request, exception: Exception, spider: Spider
) -> Optional[Response]:
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is not None and isinstance(exception, self.DOWNLOAD_EXCEPTIONS):
self.stats.inc_value('httpcache/errorrecovery', spider=spider)
return cachedresponse
return None
def _cache_response(
self, spider: Spider, response: Response, request: Request, cachedresponse: Optional[Response]
) -> None:
if self.policy.should_cache_response(response, request):
self.stats.inc_value('httpcache/store', spider=spider)
self.storage.store_response(spider, request, response)
else:
self.stats.inc_value('httpcache/uncacheable', spider=spider)<|fim▁end|> | |
<|file_name|>test_pitchtools_PitchArrayCell_pitches.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_PitchArrayCell_pitches_01():
array = pitchtools.PitchArray([[1, 2, 1], [2, 1, 1]])
array[0].cells[0].pitches.append(NamedPitch(0))
array[0].cells[1].pitches.append(NamedPitch(2))
'''
[c'] [d' ] []
[ ] [] []
'''
assert array[0].cells[0].pitches == [NamedPitch(0)]
assert array[0].cells[1].pitches == [NamedPitch(2)]
assert array[0].cells[2].pitches == []
assert array[1].cells[0].pitches == []<|fim▁hole|>
def test_pitchtools_PitchArrayCell_pitches_02():
cell = pitchtools.PitchArrayCell([NamedPitch(0)])
assert cell.pitches == [NamedPitch(0)]<|fim▁end|> | assert array[1].cells[1].pitches == []
assert array[1].cells[2].pitches == []
|
<|file_name|>csv.rs<|end_file_name|><|fim▁begin|>extern crate nalgebra;
use nalgebra::*;
use om::koe::*;
use om::cb::*;
use tick::*;
use push::*;
use std::rc::*;
/// #Cartesian State Vectors
/// This structure represents an orbit using a
/// radius vector and a velocity vector.
/// It holds a reference to the central body.
#[derive(Clone)]
pub struct CSV {
/// Radius vector.
pub r: Vec3<f64>,
/// Velocity.
pub v: Vec3<f64>,
/// Reference to the central body.
pub cb: Rc<CB>,
}
impl Tick for CSV {
fn tick(&self, dt: f64) -> Self {
CSV {
r: self.r + self.v * dt,
cb: self.cb.clone(),
..*self
}
}
}
impl Push for CSV {
fn push(&self, dv: Vec3<f64>) -> Self {
CSV {
v: self.v + dv,
cb: self.cb.clone(),
..*self<|fim▁hole|>
impl CSV {
/// Construct CSV from position and velocity.
pub fn new(r: Vec3<f64>, v: Vec3<f64>, cb: Rc<CB>) -> CSV {
CSV {
r: r,
v: v,
cb: cb,
}
}
/// Construct CSV from KOE.
pub fn from_koe(koe: KOE) -> CSV {
// Mean anomaly
let m0 = koe.m0;
// Number of iterations for newton_raphson
let iterations = 10;
// Eccentric anomaly
let ea = CSV::newton_raphson(&m0, &koe.e, &iterations);
// True anomaly
let ta = 2.0*((1.0+koe.e).sqrt()*(ea/2.0).sin())
.atan2((1.0-koe.e).sqrt()*(ea/2.0).cos());
// Distance to the center of the central body
let dist = koe.a*(1.0-koe.e*ea.cos());
// Radius vector in i, j plane
let mut r = (koe.cb.i*ta.cos() + koe.cb.j*ta.sin()) * dist;
// Velocity in i, j plane
let mut v = (koe.cb.i*(-ea.sin()) +
koe.cb.j*((1.0-koe.e.powf(2.0)).sqrt()*ea.cos())) * ((koe.cb.mu*koe.a).sqrt()/dist);
// Radius vector in orbital plane
r = koe.rot.transform(&r);
// Velocity in orbital plane
v = koe.rot.transform(&v);
CSV::new(r, v, koe.cb.clone())
}
// Function that numerically solves Kepler's equation
fn newton_raphson(m0: &f64, e: &f64, iterations: &i32) -> f64 {
let mut ea = m0.clone();
for _ in 0..*iterations {
ea = ea - (ea - e*ea.sin() - m0)/(1.0 - e*ea.cos());
}
ea
}
}<|fim▁end|> | }
}
} |
<|file_name|>proxy.cpp<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2007-2015 Contributors as noted in the AUTHORS file
This file is part of libzmq, the ZeroMQ core engine in C++.
libzmq is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License (LGPL) as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
As a special exception, the Contributors give you permission to link
this library with independent modules to produce an executable,
regardless of the license terms of these independent modules, and to
copy and distribute the resulting executable under terms of your choice,
provided that you also meet, for each linked independent module, the
terms and conditions of the license of that module. An independent
module is a module which is not derived from or based on this library.
If you modify this library, you must extend this exception to your
version of the library.
libzmq is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stddef.h>
#include "poller.hpp"
#include "proxy.hpp"
#include "likely.hpp"
// On AIX platform, poll.h has to be included first to get consistent
// definition of pollfd structure (AIX uses 'reqevents' and 'retnevents'
// instead of 'events' and 'revents' and defines macros to map from POSIX-y
// names to AIX-specific names).
#if defined ZMQ_POLL_BASED_ON_POLL
#include <poll.h>
#endif
// These headers end up pulling in zmq.h somewhere in their include
// dependency chain
#include "socket_base.hpp"
#include "err.hpp"
// zmq.h must be included *after* poll.h for AIX to build properly
#include "../include/zmq.h"
int capture(
class zmq::socket_base_t *capture_,
zmq::msg_t& msg_,
int more_ = 0)
{
// Copy message to capture socket if any
if (capture_) {
zmq::msg_t ctrl;
int rc = ctrl.init ();
if (unlikely (rc < 0))
return -1;<|fim▁hole|> rc = capture_->send (&ctrl, more_? ZMQ_SNDMORE: 0);
if (unlikely (rc < 0))
return -1;
}
return 0;
}
int forward(
class zmq::socket_base_t *from_,
class zmq::socket_base_t *to_,
class zmq::socket_base_t *capture_,
zmq::msg_t& msg_)
{
int more;
size_t moresz;
while (true) {
int rc = from_->recv (&msg_, 0);
if (unlikely (rc < 0))
return -1;
moresz = sizeof more;
rc = from_->getsockopt (ZMQ_RCVMORE, &more, &moresz);
if (unlikely (rc < 0))
return -1;
// Copy message to capture socket if any
rc = capture(capture_, msg_, more);
if (unlikely (rc < 0))
return -1;
rc = to_->send (&msg_, more? ZMQ_SNDMORE: 0);
if (unlikely (rc < 0))
return -1;
if (more == 0)
break;
}
return 0;
}
int zmq::proxy (
class socket_base_t *frontend_,
class socket_base_t *backend_,
class socket_base_t *capture_,
class socket_base_t *control_)
{
msg_t msg;
int rc = msg.init ();
if (rc != 0)
return -1;
// The algorithm below assumes ratio of requests and replies processed
// under full load to be 1:1.
int more;
size_t moresz;
zmq_pollitem_t items [] = {
{ frontend_, 0, ZMQ_POLLIN, 0 },
{ backend_, 0, ZMQ_POLLIN, 0 },
{ control_, 0, ZMQ_POLLIN, 0 }
};
int qt_poll_items = (control_ ? 3 : 2);
zmq_pollitem_t itemsout [] = {
{ frontend_, 0, ZMQ_POLLOUT, 0 },
{ backend_, 0, ZMQ_POLLOUT, 0 }
};
// Proxy can be in these three states
enum {
active,
paused,
terminated
} state = active;
while (state != terminated) {
// Wait while there are either requests or replies to process.
rc = zmq_poll (&items [0], qt_poll_items, -1);
if (unlikely (rc < 0))
return -1;
// Get the pollout separately because when combining this with pollin it maxes the CPU
// because pollout shall most of the time return directly
rc = zmq_poll (&itemsout [0], 2, 0);
if (unlikely (rc < 0))
return -1;
// Process a control command if any
if (control_ && items [2].revents & ZMQ_POLLIN) {
rc = control_->recv (&msg, 0);
if (unlikely (rc < 0))
return -1;
moresz = sizeof more;
rc = control_->getsockopt (ZMQ_RCVMORE, &more, &moresz);
if (unlikely (rc < 0) || more)
return -1;
// Copy message to capture socket if any
rc = capture(capture_, msg);
if (unlikely (rc < 0))
return -1;
if (msg.size () == 5 && memcmp (msg.data (), "PAUSE", 5) == 0)
state = paused;
else
if (msg.size () == 6 && memcmp (msg.data (), "RESUME", 6) == 0)
state = active;
else
if (msg.size () == 9 && memcmp (msg.data (), "TERMINATE", 9) == 0)
state = terminated;
else {
// This is an API error, we should assert
puts ("E: invalid command sent to proxy");
zmq_assert (false);
}
}
// Process a request
if (state == active
&& items [0].revents & ZMQ_POLLIN
&& itemsout [1].revents & ZMQ_POLLOUT) {
rc = forward(frontend_, backend_, capture_,msg);
if (unlikely (rc < 0))
return -1;
}
// Process a reply
if (state == active
&& items [1].revents & ZMQ_POLLIN
&& itemsout [0].revents & ZMQ_POLLOUT) {
rc = forward(backend_, frontend_, capture_,msg);
if (unlikely (rc < 0))
return -1;
}
}
return 0;
}<|fim▁end|> | rc = ctrl.copy (msg_);
if (unlikely (rc < 0))
return -1; |
<|file_name|>decision.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from botocore.vendored.requests.exceptions import ReadTimeout
from py_swf.errors import NoTaskFound
__all__ = ['DecisionClient', 'DecisionTask']
DecisionTask = namedtuple('DecisionTask', 'events task_token workflow_id workflow_run_id workflow_type')
"""Contains the metadata to execute a decision task.
See the response syntax in :meth:`~SWF.Client.poll_for_decision_task`.
"""
def nametuplefy(thing):
"""Recursively turns a dict into namedtuples."""
if type(thing) == dict:
# Only supports string keys
Dict = namedtuple('Dict', ' '.join(thing.keys()))
nametuplefied_children = {}
for k, v in thing.items():
nametuplefied_children[k] = nametuplefy(v)
return Dict(**nametuplefied_children)
if type(thing) == list:
return list(map(nametuplefy, thing))
else:
return thing
class DecisionClient(object):
"""A client that provides a pythonic API for polling and responding to decision tasks through an SWF boto3 client.
:param decision_config: Contains SWF values commonly used when making SWF api calls.
:type decision_config: :class:`~py_swf.config_definitions.DecisionConfig`
:param boto_client: A raw SWF boto3 client.
:type boto_client: :class:`~SWF.Client`
"""
def __init__(self, decision_config, boto_client):
self.decision_config = decision_config
self.boto_client = boto_client
def poll(self, identity=None, use_raw_event_history=False):
"""Opens a connection to AWS and long-polls for decision tasks.
When a decision is available, this function will return with exactly one decision task to execute.
Only returns a contiguous subset of the most recent events.
If you want to grab the entire history for a workflow, use :meth:`~py_swf.decision.DecisionClient.walk_execution_history`
Passthrough to :meth:`~SWF.Client.poll_for_decision_task`.
:param identity: A freeform text that identifies the client that performed the longpoll. Useful for debugging history.
:type identity: string
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:return: A decision task to execute.
:rtype: DecisionTask
:raises py_swf.errors.NoTaskFound: Raised when polling for a decision task times out without receiving any tasks.
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=True,
taskList={
'name': self.decision_config.task_list,
},
)
# boto doesn't like None values for optional kwargs
if identity is not None:
kwargs['identity'] = identity
try:
results = self.boto_client.poll_for_decision_task(
**kwargs
)
except ReadTimeout as e:
raise NoTaskFound(e)
# Sometimes SWF gives us an incomplete response, ignore these.
if not results.get('taskToken', None):
raise NoTaskFound('Received results with no taskToken')
events = results['events']
if not use_raw_event_history:
events = nametuplefy(events)
return DecisionTask(
events=events,
task_token=results['taskToken'],
workflow_id=results['workflowExecution']['workflowId'],
workflow_run_id=results['workflowExecution']['runId'],
workflow_type=results['workflowType'],
)
def walk_execution_history(
self,
workflow_id,
workflow_run_id,
reverse_order=True,
use_raw_event_history=False,
maximum_page_size=1000,
):
"""Lazily walks through the entire workflow history for a given workflow_id. This will make successive calls
to SWF on demand when pagination is needed.
See :meth:`~SWF.Client.get_workflow_execution_history` for more information.
:param workflow_id: The workflow_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param workflow_run_id: The workflow_run_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param reverse_order: Passthru for reverseOrder to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: bool
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:param maximum_page_size: Passthru for maximumPageSize to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: int
<|fim▁hole|> """
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=reverse_order,
execution=dict(
workflowId=workflow_id,
runId=workflow_run_id,
),
maximumPageSize=maximum_page_size,
)
while True:
results = self.boto_client.get_workflow_execution_history(
**kwargs
)
next_page_token = results.get('nextPageToken', None)
events = results['events']
for event in events:
if not use_raw_event_history:
event = nametuplefy(event)
yield event
if next_page_token is None:
break
kwargs['nextPageToken'] = next_page_token
def finish_decision_with_activity(self, task_token, activity_id, activity_name, activity_version, activity_input):
"""Responds to a given decision task's task_token to schedule an activity task to run.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param activity_id: A unique identifier for the activity task.
:type identity: string
:param activity_name: Which activity name to execute.
:type identity: string
:param activity_name: Version of the activity name.
:type identity: string
:param activity_input: Freeform text of the input for the activity
:type identity: string
:return: None
:rtype: NoneType
"""
activity_task = build_activity_task(
activity_id,
activity_name,
activity_version,
activity_input,
self.decision_config,
)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[activity_task],
)
def finish_workflow(self, task_token, result):
"""Responds to a given decision task's task_token to finish and terminate the workflow.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param result: Freeform text that represents the final result of the workflow.
:type identity: string
:return: None
:rtype: NoneType
"""
workflow_complete = build_workflow_complete(result)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[workflow_complete],
)
def build_workflow_complete(result):
return {
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': result,
},
}
def build_activity_task(activity_id, activity_name, activity_version, input, decision_config):
return {
'decisionType': 'ScheduleActivityTask',
'scheduleActivityTaskDecisionAttributes': {
'activityType': {
'name': activity_name,
'version': activity_version,
},
'activityId': activity_id,
'input': input,
'taskList': {
'name': decision_config.task_list,
},
'scheduleToCloseTimeout': str(decision_config.schedule_to_close_timeout),
'scheduleToStartTimeout': str(decision_config.schedule_to_start_timeout),
'startToCloseTimeout': str(decision_config.start_to_close_timeout),
'heartbeatTimeout': str(decision_config.heartbeat_timeout),
},
}<|fim▁end|> | :return: A generator that returns successive elements in the workflow execution history.
:rtype: collections.Iterable |
<|file_name|>jdb_dbinfo.cpp<|end_file_name|><|fim▁begin|>INTERFACE:
#include "initcalls.h"
#include "types.h"
class Jdb_symbol_info;
class Jdb_lines_info;
class Jdb_dbinfo
{
};
//---------------------------------------------------------------------------
IMPLEMENTATION:
#include "config.h"
// We have to do this here because Jdb_symbol and Jdb_lines must not depend
// on Kmem_alloc.
PRIVATE static inline NOEXPORT
void
Jdb_dbinfo::init_symbols_lines ()
{
Mword p;
p = (sizeof(Jdb_symbol_info)*Jdb_symbol::Max_tasks) >> Config::PAGE_SHIFT;
Jdb_symbol::init(Kmem_alloc::allocator()
->unaligned_alloc(p*Config::PAGE_SIZE), p);
p = (sizeof(Jdb_lines_info) *Jdb_lines::Max_tasks) >> Config::PAGE_SHIFT;
Jdb_lines::init(Kmem_alloc::allocator()
->unaligned_alloc(p*Config::PAGE_SIZE), p);
}
//---------------------------------------------------------------------------
IMPLEMENTATION[ia32,amd64]:
#include "cpu_lock.h"
#include "jdb_lines.h"
#include "jdb_symbol.h"
#include "kmem.h"
#include "kmem_alloc.h"
#include "mem_layout.h"
#include "mem_unit.h"
#include "paging.h"
#include "space.h"
#include "static_init.h"
const Address area_start = Mem_layout::Jdb_debug_start;
const Address area_end = Mem_layout::Jdb_debug_end;
const unsigned area_size = area_end - area_start;
const unsigned bitmap_size = (area_size / Config::PAGE_SIZE) / 8;
// We don't use the amm library here anymore since it is nearly impossible
// to debug it and I got some strange behavior. Instead of this we use a
// simple bitfield here that takes 2k for a virtual memory size of 64MB
// which is enough for the Jdb debug info. Speed for allocating/deallocating
// pages is not an issue here.
static unsigned char bitmap[bitmap_size];
STATIC_INITIALIZE(Jdb_dbinfo);
//---------------------------------------------------------------------------
IMPLEMENTATION[ia32, amd64]:
PUBLIC static FIASCO_INIT
void
Jdb_dbinfo::init()
{
Address addr;
for (addr = area_start; addr < area_end; addr += Config::SUPERPAGE_SIZE)
Kmem::kdir->walk(Virt_addr(addr), 100, pdir_alloc(Kmem_alloc::allocator()));
init_symbols_lines();
}
PRIVATE static
Address
Jdb_dbinfo::reserve_pages(unsigned pages)
{
auto guard = lock_guard(cpu_lock);
Unsigned8 *ptr, bit;
for (ptr=bitmap, bit=0; ptr<bitmap+bitmap_size;)
{
Unsigned8 *ptr1, bit1, c;
unsigned pages1;
for (ptr1=ptr, bit1=bit, pages1=pages;;)
{
if (ptr1>=bitmap+bitmap_size)
return 0;
c = *ptr1 & (1<<bit1);
if (++bit1 >= 8)
{
bit1 = 0;
ptr1++;
}
if (c)
{
ptr = ptr1;
bit = bit1;
break;
}
if (!--pages1)
{
// found area -- make it as reserved
for (ptr1=ptr, bit1=bit, pages1=pages; pages1>0; pages1--)
{
*ptr1 |= (1<<bit1);
if (++bit1 >= 8)
{
bit1 = 0;
ptr1++;
}
}
return area_start + Config::PAGE_SIZE * (8*(ptr-bitmap) + bit);
}
}
}
return 0;
}
PRIVATE static
void
Jdb_dbinfo::return_pages(Address addr, unsigned pages)
{
auto guard = lock_guard(cpu_lock);
unsigned nr_page = (addr-area_start) / Config::PAGE_SIZE;
Unsigned8 *ptr = bitmap + nr_page/8, bit = nr_page % 8;
for (; pages && ptr < bitmap+bitmap_size; pages--)
{
assert (*ptr & (1<<bit));
*ptr &= ~(1<<bit);
if (++bit >= 8)
{
bit = 0;
ptr++;
}
}
}
//---------------------------------------------------------------------------
IMPLEMENTATION[ia32, amd64]:
PUBLIC static
bool
Jdb_dbinfo::map(Address phys, size_t &size, Address &virt)
{
Address offs = phys & ~Config::PAGE_MASK;
size = (offs + size + Config::PAGE_SIZE - 1) & Config::PAGE_MASK;
virt = reserve_pages (size / Config::PAGE_SIZE);
if (!virt)
return false;
phys &= Config::PAGE_MASK;
Kmem::kdir->map(phys, Virt_addr(virt), Virt_size(size),
Pt_entry::Valid | Pt_entry::Writable | Pt_entry::Referenced
| Pt_entry::Dirty, 100, Ptab::Null_alloc());
virt += offs;
return true;
}
PUBLIC static
void
Jdb_dbinfo::unmap(Address virt, size_t size)
{
if (virt && size)
{
virt &= Config::PAGE_MASK;
Kmem::kdir->unmap(Virt_addr(virt), Virt_size(size), 100);
Mem_unit::tlb_flush ();
return_pages(virt, size/Config::PAGE_SIZE);
}
}
PUBLIC static
void
Jdb_dbinfo::set(Jdb_symbol_info *sym, Address phys, size_t size)
{
Address virt;
if (!sym)
return;
if (!phys)
{
sym->get (virt, size);
if (! virt)
return;
unmap (virt, size);
sym->reset ();
return;
}
if (! map (phys, size, virt))
return;
if (! sym->set (virt, size))
{
unmap (virt, size);
sym->reset ();
}
}
PUBLIC static
void
Jdb_dbinfo::set(Jdb_lines_info *lin, Address phys, size_t size)
{
Address virt;
if (!lin)
return;
if (!phys)
{
lin->get(virt, size);
if (! virt)
return;
unmap(virt, size);
lin->reset ();
}
if (!map(phys, size, virt))
return;
if (!lin->set(virt, size))
{
unmap(virt, size);
lin->reset();
}
}
//---------------------------------------------------------------------------
IMPLEMENTATION[ux]:
// No special mapping required for UX since all physical memory is mapped
#include "jdb_lines.h"
#include "jdb_symbol.h"
#include "kmem_alloc.h"
#include "mem_layout.h"
#include "static_init.h"
<|fim▁hole|>
PUBLIC static
void
Jdb_dbinfo::init()
{
init_symbols_lines();
}
PUBLIC static
void
Jdb_dbinfo::set(Jdb_symbol_info *sym, Address phys, size_t size)
{
if (!sym)
return;
if (!phys)
sym->reset();
else
sym->set(Mem_layout::phys_to_pmem(phys), size);
}
PUBLIC static
void
Jdb_dbinfo::set(Jdb_lines_info *lin, Address phys, size_t size)
{
if (!lin)
return;
if (!phys)
lin->reset();
else
lin->set(Mem_layout::phys_to_pmem(phys), size);
}<|fim▁end|> | STATIC_INITIALIZE(Jdb_dbinfo); |
<|file_name|>climsig.py<|end_file_name|><|fim▁begin|>r"""
Modeling and inversion of temperature residuals measured in wells due to
temperature perturbations in the surface.
Perturbations can be of two kinds: **abrupt** or **linear**.
Forward modeling of these types of changes is done with functions:
* :func:`~fatiando.geothermal.climsig.abrupt`
* :func:`~fatiando.geothermal.climsig.linear`
Assumeing that the temperature perturbation was abrupt. The residual
temperature at a depth :math:`z_i` in the well at a time :math:`t` after the
perturbation is given by
.. math::
T_i(z_i) = A \left[1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)\right]
where :math:`A` is the amplitude of the perturbation, :math:`\lambda` is the
thermal diffusivity of the medium, and :math:`\mathrm{erf}` is the error
function.
For the case of a linear change, the temperature is
.. math::
T_i(z_i) = A \left[
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
\right]
Given the temperature measured at different depths, we can **invert** for the
amplitude and age of the change. The available inversion solvers are:
* :class:`~fatiando.geothermal.climsig.SingleChange`: inverts for the
parameters of a single temperature change. Can use both abrupt and linear
models.
----
"""
from __future__ import division
import numpy
import scipy.special
from ..inversion.base import Misfit
from ..constants import THERMAL_DIFFUSIVITY_YEAR
def linear(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to a linear
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
The depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
tmp = zp / numpy.sqrt(4. * diffus * age)
res = amp * ((1. + 2 * tmp ** 2) * scipy.special.erfc(tmp)
- 2. / numpy.sqrt(numpy.pi) * tmp * numpy.exp(-tmp ** 2))
return res
def abrupt(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to an abrupt
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
Arry with the depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array<|fim▁hole|> """
return amp * (1. - scipy.special.erf(zp / numpy.sqrt(4. * diffus * age)))
class SingleChange(Misfit):
r"""
Invert the well temperature data for a single change in temperature.
The parameters of the change are its amplitude and age.
See the docstring of :mod:`fatiando.geothermal.climsig` for more
information and examples.
Parameters:
* temp : array
The temperature profile
* zp : array
Depths along the profile
* mode : string
The type of change: ``'abrupt'`` for an abrupt change, ``'linear'`` for
a linear change.
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the
:meth:`~fatiando.inversion.base.FitMixin.config` method.
See the example bellow.
Example with synthetic data:
>>> import numpy
>>> zp = numpy.arange(0, 100, 1)
>>> # For an ABRUPT change
>>> amp = 2
>>> age = 100 # Uses years to avoid overflows
>>> temp = abrupt(amp, age, zp)
>>> # Run the inversion for the amplitude and time
>>> # This is a non-linear problem, so use the Levemberg-Marquardt
>>> # algorithm with an initial estimate
>>> solver = SingleChange(temp, zp, mode='abrupt').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 2.00 age: 100.00
>>> # For a LINEAR change
>>> amp = 3.45
>>> age = 52.5
>>> temp = linear(amp, age, zp)
>>> solver = SingleChange(temp, zp, mode='linear').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 3.45 age: 52.50
Notes:
For **abrupt** changes, derivatives with respect to the amplitude and age
are calculated using the formula
.. math::
\frac{\partial T_i}{\partial A} = 1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)
and
.. math::
\frac{\partial T_i}{\partial t} = \frac{A}{t\sqrt{\pi}}
\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\exp\left[-\left(\frac{z_i}{\sqrt{4\lambda t}}\right)^2\right]
respectively.
For **linear** changes, derivatives with respect to the age are calculated
using a 2-point finite difference approximation. Derivatives with respect
to amplitude are calculate using the formula
.. math::
\frac{\partial T_i}{\partial A} =
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
"""
def __init__(self, temp, zp, mode, diffus=THERMAL_DIFFUSIVITY_YEAR):
if len(temp) != len(zp):
raise ValueError("temp and zp must be of same length")
if mode not in ['abrupt', 'linear']:
raise ValueError("Invalid mode: %s. Must be 'abrupt' or 'linear'"
% (mode))
super(SingleChange, self).__init__(
data=temp,
positional=dict(zp=zp),
model=dict(diffus=float(diffus), mode=mode),
nparams=2, islinear=False)
def _get_predicted(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
if self.model['mode'] == 'abrupt':
return abrupt(amp, age, zp, diffus)
if self.model['mode'] == 'linear':
return linear(amp, age, zp, diffus)
def _get_jacobian(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
mode = self.model['mode']
if mode == 'abrupt':
tmp = zp / numpy.sqrt(4. * diffus * age)
jac = numpy.transpose([
abrupt(1., age, zp, diffus),
(amp * tmp * numpy.exp(-(tmp ** 2)) /
(numpy.sqrt(numpy.pi) * age))])
if mode == 'linear':
delta = 0.5
at_p = linear(amp, age, zp, diffus)
jac = numpy.transpose([
linear(1., age, zp, diffus),
(linear(amp, age + delta, zp, diffus) -
linear(amp, age - delta, zp, diffus)) / (2 * delta)])
return jac<|fim▁end|> | The residual temperatures measured along the well
|
<|file_name|>model.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | Parkings = new Meteor.Collection("parkings"); |
<|file_name|>dict.oxford.ts<|end_file_name|><|fim▁begin|>import { NetDict } from "./dict/net-dict"
const { debug } = require("b-logger")("copilot.dict.oxford")
export interface ISenses {
definitions: string[],
examples?: [{ text: string }],
subsenses?: ISenses[],
}
export interface IOxfordParam {
results: [{
id: string,
language: string,
lexicalEntries: [{
entries: [
{
etymologies: string[],
homographNumber: string,
senses: ISenses[],
variantForms: [{ text: string }]
}
],
pronunciations: [{
dialects: string[],
phoneticSpelling: string
}]
}]
}]
id: string,
}
class Oxford extends NetDict<IOxfordParam> {
constructor() {
super({
format: (ret) => {
let list = []
for (let result of ret.results) {
for (let lex of result.lexicalEntries) {
let p = lex.pronunciations
.map(e => `${e.dialects.join(" ")}[${e.phoneticSpelling}]`)
.join(";")
list.push({
title: ret.id,
text: p,
value: p
})
for (let entry of lex.entries) {
let sense = entry.senses
while (sense.length) {
let s = sense.shift()
if (s.subsenses) {
sense.push(...s.subsenses)
}
list.push(...s.definitions.map(d => ({
text: `[Definition]${d}`
})))
if (s.examples) {
list.push(...s.examples.map(e => ({
text: `[example]${e.text}`
})))
}
}
}
}
}
return list
}
})
}
public init(param: any) {
super.init(param)
this.header = {
Accept: "application/json",
app_id: this.cfg.id,
app_key: this.cfg.key
}
}
}
let oxford: any = new Oxford()
oxford.default = oxford.lookup
export default oxford
/*
{
"metadata": {
"provider": "Oxford University Press"
},
"results": [
{
"id": "hello",
"language": "en",
"lexicalEntries": [
{
"entries": [
{
"etymologies": [
"early 19th century: variant of earlier hollo; related to holla"
],
"homographNumber": "000",
"senses": [
{
"definitions": [
"used as a greeting or to begin a telephone conversation"
],
"examples": [
{
"text": "hello there, Katie!"
}
],
"id": "m_en_gbus0460730.012",
"subsenses": [
{
"definitions": [
"used to express surprise"
],
"examples": [
{
"text": "hello, what's all this then?"
}
],
"id": "m_en_gbus0460730.017",
"regions": [
"British"
]
},
{
"definitions": [
"used as a cry to attract someone's attention"
],
"examples": [
{
"text": "‘Hello below!’ he cried"
}
],
"id": "m_en_gbus0460730.018"
},
{
"definitions": [
"used informally to express sarcasm or anger"
],
"examples": [
{
"text": "Hello! Did you even get what the play was about?"
}
],
"id": "m_en_gbus0460730.019"
}
]
}
],
"variantForms": [
{
"text": "hallo"
},
{
"text": "hullo"
}
]
}
],
"language": "en",
"lexicalCategory": "Interjection",
"pronunciations": [
{
"audioFile": "http://audio.oxforddictionaries.com/en/mp3/hello_gb_1.mp3",
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "həˈləʊ"
},
{
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "hɛˈləʊ"
}
],
"text": "hello"
},
{
"entries": [
{
"grammaticalFeatures": [
{
"text": "Singular",
"type": "Number"
}
],
"homographNumber": "001",
"senses": [
{
"definitions": [
"an utterance of ‘hello’; a greeting"
],<|fim▁hole|> }
],
"id": "m_en_gbus0460730.025"
}
],
"variantForms": [
{
"text": "hullo"
},
{
"text": "hallo"
}
]
}
],
"language": "en",
"lexicalCategory": "Noun",
"pronunciations": [
{
"audioFile": "http://audio.oxforddictionaries.com/en/mp3/hello_gb_1.mp3",
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "həˈləʊ"
},
{
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "hɛˈləʊ"
}
],
"text": "hello"
},
{
"entries": [
{
"grammaticalFeatures": [
{
"text": "Intransitive",
"type": "Subcategorization"
},
{
"text": "Present",
"type": "Tense"
}
],
"homographNumber": "002",
"senses": [
{
"definitions": [
"say or shout ‘hello’"
],
"examples": [
{
"text": "I pressed the phone button and helloed"
}
],
"id": "m_en_gbus0460730.034"
}
],
"variantForms": [
{
"text": "hallo"
},
{
"text": "hullo"
}
]
}
],
"language": "en",
"lexicalCategory": "Verb",
"pronunciations": [
{
"audioFile": "http://audio.oxforddictionaries.com/en/mp3/hello_gb_1.mp3",
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "həˈləʊ"
},
{
"dialects": [
"British English"
],
"phoneticNotation": "IPA",
"phoneticSpelling": "hɛˈləʊ"
}
],
"text": "hello"
}
],
"type": "headword",
"word": "hello"
}
]
}
*/<|fim▁end|> | "examples": [
{
"text": "she was getting polite nods and hellos from people" |
<|file_name|>default_auth.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
logger = logging.getLogger('magiccontent.default_auth')
def naive_can_edit(request):
logger.warning(<|fim▁hole|> 'GALLERY_PAGE_IS_OWNER_METHOD to improve the content security'))
if request.user.is_authenticated() and request.user.is_staff:
return True
return False<|fim▁end|> | ('naive_can_edit method has been used, please provide a ' |
<|file_name|>KeyStore.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.security;
import java.io.*;
import java.net.URI;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.security.cert.CertificateException;
import java.security.spec.AlgorithmParameterSpec;
import java.util.*;
import javax.crypto.SecretKey;
import javax.security.auth.DestroyFailedException;
import javax.security.auth.callback.*;
/**
* This class represents a storage facility for cryptographic
* keys and certificates.
*
* <p> A {@code KeyStore} manages different types of entries.
* Each type of entry implements the {@code KeyStore.Entry} interface.
* Three basic {@code KeyStore.Entry} implementations are provided:
*
* <ul>
* <li><b>KeyStore.PrivateKeyEntry</b>
* <p> This type of entry holds a cryptographic {@code PrivateKey},
* which is optionally stored in a protected format to prevent
* unauthorized access. It is also accompanied by a certificate chain
* for the corresponding public key.
*
* <p> Private keys and certificate chains are used by a given entity for
* self-authentication. Applications for this authentication include software
* distribution organizations which sign JAR files as part of releasing
* and/or licensing software.
*
* <li><b>KeyStore.SecretKeyEntry</b>
* <p> This type of entry holds a cryptographic {@code SecretKey},
* which is optionally stored in a protected format to prevent
* unauthorized access.
*
* <li><b>KeyStore.TrustedCertificateEntry</b>
* <p> This type of entry contains a single public key {@code Certificate}
* belonging to another party. It is called a <i>trusted certificate</i>
* because the keystore owner trusts that the public key in the certificate
* indeed belongs to the identity identified by the <i>subject</i> (owner)
* of the certificate.
*
* <p>This type of entry can be used to authenticate other parties.
* </ul>
*
* <p> Each entry in a keystore is identified by an "alias" string. In the
* case of private keys and their associated certificate chains, these strings
* distinguish among the different ways in which the entity may authenticate
* itself. For example, the entity may authenticate itself using different
* certificate authorities, or using different public key algorithms.
*
* <p> Whether aliases are case sensitive is implementation dependent. In order
* to avoid problems, it is recommended not to use aliases in a KeyStore that
* only differ in case.
*
* <p> Whether keystores are persistent, and the mechanisms used by the
* keystore if it is persistent, are not specified here. This allows
* use of a variety of techniques for protecting sensitive (e.g., private or
* secret) keys. Smart cards or other integrated cryptographic engines
* (SafeKeyper) are one option, and simpler mechanisms such as files may also
* be used (in a variety of formats).
*
* <p> Typical ways to request a KeyStore object include
* relying on the default type and providing a specific keystore type.
*
* <ul>
* <li>To rely on the default type:
* <pre>
* KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
* </pre>
* The system will return a keystore implementation for the default type.
*
* <li>To provide a specific keystore type:
* <pre>
* KeyStore ks = KeyStore.getInstance("JKS");
* </pre>
* The system will return the most preferred implementation of the
* specified keystore type available in the environment. <p>
* </ul>
*
* <p> Before a keystore can be accessed, it must be
* {@link #load(java.io.InputStream, char[]) loaded}.
* <pre>
* KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
*
* // get user password and file input stream
* char[] password = getPassword();
*
* try (FileInputStream fis = new FileInputStream("keyStoreName")) {
* ks.load(fis, password);
* }
* </pre>
*
* To create an empty keystore using the above {@code load} method,
* pass {@code null} as the {@code InputStream} argument.
*
* <p> Once the keystore has been loaded, it is possible
* to read existing entries from the keystore, or to write new entries
* into the keystore:
* <pre>
* KeyStore.ProtectionParameter protParam =
* new KeyStore.PasswordProtection(password);
*
* // get my private key
* KeyStore.PrivateKeyEntry pkEntry = (KeyStore.PrivateKeyEntry)
* ks.getEntry("privateKeyAlias", protParam);
* PrivateKey myPrivateKey = pkEntry.getPrivateKey();
*
* // save my secret key
* javax.crypto.SecretKey mySecretKey;
* KeyStore.SecretKeyEntry skEntry =
* new KeyStore.SecretKeyEntry(mySecretKey);
* ks.setEntry("secretKeyAlias", skEntry, protParam);
*
* // store away the keystore
* try (FileOutputStream fos = new FileOutputStream("newKeyStoreName")) {
* ks.store(fos, password);
* }
* </pre>
*
* Note that although the same password may be used to
* load the keystore, to protect the private key entry,
* to protect the secret key entry, and to store the keystore
* (as is shown in the sample code above),
* different passwords or other protection parameters
* may also be used.
*
* <p> Android provides the following <code>KeyStore</code> types:
* <table>
* <thead>
* <tr>
* <th>Algorithm</th>
* <th>Supported API Levels</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <td>AndroidCAStore</td>
* <td>14+</td>
* </tr>
* <tr>
* <td>AndroidKeyStore</td>
* <td>18+</td>
* </tr>
* <tr class="deprecated">
* <td>BCPKCS12</td>
* <td>1-8</td>
* </tr>
* <tr>
* <td>BKS</td>
* <td>1+</td>
* </tr>
* <tr>
* <td>BouncyCastle</td>
* <td>1+</td>
* </tr>
* <tr>
* <td>PKCS12</td>
* <td>1+</td>
* </tr>
* <tr class="deprecated">
* <td>PKCS12-DEF</td>
* <td>1-8</td>
* </tr>
* </tbody>
* </table>
*
* These types are described in the <a href=
* "{@docRoot}/../technotes/guides/security/StandardNames.html#KeyStore">
* KeyStore section</a> of the
* Java Cryptography Architecture Standard Algorithm Name Documentation.
*
* @author Jan Luehe
*
* @see java.security.PrivateKey
* @see javax.crypto.SecretKey
* @see java.security.cert.Certificate
*
* @since 1.2
*/
public class KeyStore {
// BEGIN Android-removed: this debugging mechanism is not supported in Android.
/*
private static final Debug pdebug =
Debug.getInstance("provider", "Provider");
private static final boolean skipDebug =
Debug.isOn("engine=") && !Debug.isOn("keystore");
*/
// END Android-removed: this debugging mechanism is not supported in Android.
/*
* Constant to lookup in the Security properties file to determine
* the default keystore type.
* In the Security properties file, the default keystore type is given as:
* <pre>
* keystore.type=jks
* </pre>
*/
private static final String KEYSTORE_TYPE = "keystore.type";
// The keystore type
private String type;
// The provider
private Provider provider;
// The provider implementation
private KeyStoreSpi keyStoreSpi;
// Has this keystore been initialized (loaded)?
private boolean initialized = false;
/**
* A marker interface for {@code KeyStore}
* {@link #load(KeyStore.LoadStoreParameter) load}
* and
* {@link #store(KeyStore.LoadStoreParameter) store}
* parameters.
*
* @since 1.5
*/
public static interface LoadStoreParameter {
/**
* Gets the parameter used to protect keystore data.
*
* @return the parameter used to protect keystore data, or null
*/
public ProtectionParameter getProtectionParameter();
}
/**
* A marker interface for keystore protection parameters.
*
* <p> The information stored in a {@code ProtectionParameter}
* object protects the contents of a keystore.
* For example, protection parameters may be used to check
* the integrity of keystore data, or to protect the
* confidentiality of sensitive keystore data
* (such as a {@code PrivateKey}).
*
* @since 1.5
*/
public static interface ProtectionParameter { }
/**
* A password-based implementation of {@code ProtectionParameter}.
*
* @since 1.5
*/
public static class PasswordProtection implements
ProtectionParameter, javax.security.auth.Destroyable {
private final char[] password;
private final String protectionAlgorithm;
private final AlgorithmParameterSpec protectionParameters;
private volatile boolean destroyed = false;
/**
* Creates a password parameter.
*
* <p> The specified {@code password} is cloned before it is stored
* in the new {@code PasswordProtection} object.
*
* @param password the password, which may be {@code null}
*/
public PasswordProtection(char[] password) {
this.password = (password == null) ? null : password.clone();
this.protectionAlgorithm = null;
this.protectionParameters = null;
}
/**
* Creates a password parameter and specifies the protection algorithm
* and associated parameters to use when encrypting a keystore entry.
* <p>
* The specified {@code password} is cloned before it is stored in the
* new {@code PasswordProtection} object.
*
* @param password the password, which may be {@code null}
* @param protectionAlgorithm the encryption algorithm name, for
* example, {@code PBEWithHmacSHA256AndAES_256}.
* See the Cipher section in the <a href=
* "{@docRoot}/../technotes/guides/security/StandardNames.html#Cipher">
* Java Cryptography Architecture Standard Algorithm Name
* Documentation</a>
* for information about standard encryption algorithm names.
* @param protectionParameters the encryption algorithm parameter
* specification, which may be {@code null}
* @exception NullPointerException if {@code protectionAlgorithm} is
* {@code null}
*
* @since 1.8
*/
public PasswordProtection(char[] password, String protectionAlgorithm,
AlgorithmParameterSpec protectionParameters) {
if (protectionAlgorithm == null) {
throw new NullPointerException("invalid null input");
}
this.password = (password == null) ? null : password.clone();
this.protectionAlgorithm = protectionAlgorithm;
this.protectionParameters = protectionParameters;
}
/**
* Gets the name of the protection algorithm.
* If none was set then the keystore provider will use its default
* protection algorithm. The name of the default protection algorithm
* for a given keystore type is set using the
* {@code 'keystore.<type>.keyProtectionAlgorithm'} security property.
* For example, the
* {@code keystore.PKCS12.keyProtectionAlgorithm} property stores the
* name of the default key protection algorithm used for PKCS12
* keystores. If the security property is not set, an
* implementation-specific algorithm will be used.
*
* @return the algorithm name, or {@code null} if none was set
*
* @since 1.8
*/
public String getProtectionAlgorithm() {
return protectionAlgorithm;
}
/**
* Gets the parameters supplied for the protection algorithm.
*
* @return the algorithm parameter specification, or {@code null},
* if none was set
*
* @since 1.8
*/
public AlgorithmParameterSpec getProtectionParameters() {
return protectionParameters;
}
/**
* Gets the password.
*
* <p>Note that this method returns a reference to the password.
* If a clone of the array is created it is the caller's
* responsibility to zero out the password information
* after it is no longer needed.
*
* @see #destroy()
* @return the password, which may be {@code null}
* @exception IllegalStateException if the password has
* been cleared (destroyed)
*/
public synchronized char[] getPassword() {
if (destroyed) {
throw new IllegalStateException("password has been cleared");
}
return password;
}
/**
* Clears the password.
*
* @exception DestroyFailedException if this method was unable
* to clear the password
*/
public synchronized void destroy() throws DestroyFailedException {
destroyed = true;
if (password != null) {
Arrays.fill(password, ' ');
}
}
/**
* Determines if password has been cleared.
*
* @return true if the password has been cleared, false otherwise
*/
public synchronized boolean isDestroyed() {
return destroyed;
}
}
/**
* A ProtectionParameter encapsulating a CallbackHandler.
*
* @since 1.5
*/
public static class CallbackHandlerProtection
implements ProtectionParameter {
private final CallbackHandler handler;
/**
* Constructs a new CallbackHandlerProtection from a
* CallbackHandler.
*
* @param handler the CallbackHandler
* @exception NullPointerException if handler is null
*/
public CallbackHandlerProtection(CallbackHandler handler) {
if (handler == null) {
throw new NullPointerException("handler must not be null");
}
this.handler = handler;
}
/**
* Returns the CallbackHandler.
*
* @return the CallbackHandler.
*/
public CallbackHandler getCallbackHandler() {
return handler;
}
}
/**
* A marker interface for {@code KeyStore} entry types.
*
* @since 1.5
*/
public static interface Entry {
/**
* Retrieves the attributes associated with an entry.
* <p>
* The default implementation returns an empty {@code Set}.
*
* @return an unmodifiable {@code Set} of attributes, possibly empty
*
* @since 1.8
*/
public default Set<Attribute> getAttributes() {
return Collections.<Attribute>emptySet();
}
/**
* An attribute associated with a keystore entry.
* It comprises a name and one or more values.
*
* @since 1.8
*/
public interface Attribute {
/**
* Returns the attribute's name.
*
* @return the attribute name
*/
public String getName();
/**
* Returns the attribute's value.
* Multi-valued attributes encode their values as a single string.
*
* @return the attribute value
*/
public String getValue();
}
}
/**
* A {@code KeyStore} entry that holds a {@code PrivateKey}
* and corresponding certificate chain.
*
* @since 1.5
*/
public static final class PrivateKeyEntry implements Entry {
private final PrivateKey privKey;
private final Certificate[] chain;
private final Set<Attribute> attributes;
/**
* Constructs a {@code PrivateKeyEntry} with a
* {@code PrivateKey} and corresponding certificate chain.
*
* <p> The specified {@code chain} is cloned before it is stored
* in the new {@code PrivateKeyEntry} object.
*
* @param privateKey the {@code PrivateKey}
* @param chain an array of {@code Certificate}s
* representing the certificate chain.
* The chain must be ordered and contain a
* {@code Certificate} at index 0
* corresponding to the private key.
*
* @exception NullPointerException if
* {@code privateKey} or {@code chain}
* is {@code null}
* @exception IllegalArgumentException if the specified chain has a
* length of 0, if the specified chain does not contain
* {@code Certificate}s of the same type,
* or if the {@code PrivateKey} algorithm
* does not match the algorithm of the {@code PublicKey}
* in the end entity {@code Certificate} (at index 0)
*/
public PrivateKeyEntry(PrivateKey privateKey, Certificate[] chain) {
this(privateKey, chain, Collections.<Attribute>emptySet());
}
/**
* Constructs a {@code PrivateKeyEntry} with a {@code PrivateKey} and
* corresponding certificate chain and associated entry attributes.
*
* <p> The specified {@code chain} and {@code attributes} are cloned
* before they are stored in the new {@code PrivateKeyEntry} object.
*
* @param privateKey the {@code PrivateKey}
* @param chain an array of {@code Certificate}s
* representing the certificate chain.
* The chain must be ordered and contain a
* {@code Certificate} at index 0
* corresponding to the private key.
* @param attributes the attributes
*
* @exception NullPointerException if {@code privateKey}, {@code chain}
* or {@code attributes} is {@code null}
* @exception IllegalArgumentException if the specified chain has a
* length of 0, if the specified chain does not contain
* {@code Certificate}s of the same type,
* or if the {@code PrivateKey} algorithm
* does not match the algorithm of the {@code PublicKey}
* in the end entity {@code Certificate} (at index 0)
*
* @since 1.8
*/
public PrivateKeyEntry(PrivateKey privateKey, Certificate[] chain,
Set<Attribute> attributes) {
if (privateKey == null || chain == null || attributes == null) {
throw new NullPointerException("invalid null input");
}
if (chain.length == 0) {
throw new IllegalArgumentException
("invalid zero-length input chain");
}
Certificate[] clonedChain = chain.clone();
String certType = clonedChain[0].getType();
for (int i = 1; i < clonedChain.length; i++) {
if (!certType.equals(clonedChain[i].getType())) {
throw new IllegalArgumentException
("chain does not contain certificates " +
"of the same type");
}
}
if (!privateKey.getAlgorithm().equals
(clonedChain[0].getPublicKey().getAlgorithm())) {
throw new IllegalArgumentException
("private key algorithm does not match " +
"algorithm of public key in end entity " +
"certificate (at index 0)");
}
this.privKey = privateKey;
if (clonedChain[0] instanceof X509Certificate &&
!(clonedChain instanceof X509Certificate[])) {
this.chain = new X509Certificate[clonedChain.length];
System.arraycopy(clonedChain, 0,
this.chain, 0, clonedChain.length);
} else {
this.chain = clonedChain;
}
this.attributes =
Collections.unmodifiableSet(new HashSet<>(attributes));
}
/**
* Gets the {@code PrivateKey} from this entry.
*
* @return the {@code PrivateKey} from this entry
*/
public PrivateKey getPrivateKey() {
return privKey;
}
/**
* Gets the {@code Certificate} chain from this entry.
*
* <p> The stored chain is cloned before being returned.
*
* @return an array of {@code Certificate}s corresponding
* to the certificate chain for the public key.
* If the certificates are of type X.509,
* the runtime type of the returned array is
* {@code X509Certificate[]}.
*/
public Certificate[] getCertificateChain() {
return chain.clone();
}
/**
* Gets the end entity {@code Certificate}
* from the certificate chain in this entry.
*
* @return the end entity {@code Certificate} (at index 0)
* from the certificate chain in this entry.
* If the certificate is of type X.509,
* the runtime type of the returned certificate is
* {@code X509Certificate}.
*/
public Certificate getCertificate() {
return chain[0];
}
/**
* Retrieves the attributes associated with an entry.
* <p>
*
* @return an unmodifiable {@code Set} of attributes, possibly empty
*
* @since 1.8
*/
@Override
public Set<Attribute> getAttributes() {
return attributes;
}
/**
* Returns a string representation of this PrivateKeyEntry.
* @return a string representation of this PrivateKeyEntry.
*/
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Private key entry and certificate chain with "
+ chain.length + " elements:\r\n");
for (Certificate cert : chain) {
sb.append(cert);
sb.append("\r\n");
}
return sb.toString();
}
}
/**
* A {@code KeyStore} entry that holds a {@code SecretKey}.
*
* @since 1.5
*/
public static final class SecretKeyEntry implements Entry {
private final SecretKey sKey;
private final Set<Attribute> attributes;
/**
* Constructs a {@code SecretKeyEntry} with a
* {@code SecretKey}.
*
* @param secretKey the {@code SecretKey}
*
* @exception NullPointerException if {@code secretKey}
* is {@code null}
*/
public SecretKeyEntry(SecretKey secretKey) {
if (secretKey == null) {
throw new NullPointerException("invalid null input");
}
this.sKey = secretKey;
this.attributes = Collections.<Attribute>emptySet();
}
/**
* Constructs a {@code SecretKeyEntry} with a {@code SecretKey} and
* associated entry attributes.
*
* <p> The specified {@code attributes} is cloned before it is stored
* in the new {@code SecretKeyEntry} object.
*
* @param secretKey the {@code SecretKey}
* @param attributes the attributes
*
* @exception NullPointerException if {@code secretKey} or
* {@code attributes} is {@code null}
*
* @since 1.8
*/
public SecretKeyEntry(SecretKey secretKey, Set<Attribute> attributes) {
if (secretKey == null || attributes == null) {
throw new NullPointerException("invalid null input");
}
this.sKey = secretKey;
this.attributes =
Collections.unmodifiableSet(new HashSet<>(attributes));
}
/**
* Gets the {@code SecretKey} from this entry.
*
* @return the {@code SecretKey} from this entry
*/
public SecretKey getSecretKey() {
return sKey;
}
/**
* Retrieves the attributes associated with an entry.
* <p>
*
* @return an unmodifiable {@code Set} of attributes, possibly empty
*
* @since 1.8
*/
@Override
public Set<Attribute> getAttributes() {
return attributes;
}
/**
* Returns a string representation of this SecretKeyEntry.
* @return a string representation of this SecretKeyEntry.
*/
public String toString() {
return "Secret key entry with algorithm " + sKey.getAlgorithm();
}
}
/**
* A {@code KeyStore} entry that holds a trusted
* {@code Certificate}.
*
* @since 1.5
*/
public static final class TrustedCertificateEntry implements Entry {
private final Certificate cert;
private final Set<Attribute> attributes;
/**
* Constructs a {@code TrustedCertificateEntry} with a
* trusted {@code Certificate}.
*
* @param trustedCert the trusted {@code Certificate}
*
* @exception NullPointerException if
* {@code trustedCert} is {@code null}
*/
public TrustedCertificateEntry(Certificate trustedCert) {
if (trustedCert == null) {
throw new NullPointerException("invalid null input");
}
this.cert = trustedCert;
this.attributes = Collections.<Attribute>emptySet();
}
<|fim▁hole|> * Constructs a {@code TrustedCertificateEntry} with a
* trusted {@code Certificate} and associated entry attributes.
*
* <p> The specified {@code attributes} is cloned before it is stored
* in the new {@code TrustedCertificateEntry} object.
*
* @param trustedCert the trusted {@code Certificate}
* @param attributes the attributes
*
* @exception NullPointerException if {@code trustedCert} or
* {@code attributes} is {@code null}
*
* @since 1.8
*/
public TrustedCertificateEntry(Certificate trustedCert,
Set<Attribute> attributes) {
if (trustedCert == null || attributes == null) {
throw new NullPointerException("invalid null input");
}
this.cert = trustedCert;
this.attributes =
Collections.unmodifiableSet(new HashSet<>(attributes));
}
/**
* Gets the trusted {@code Certficate} from this entry.
*
* @return the trusted {@code Certificate} from this entry
*/
public Certificate getTrustedCertificate() {
return cert;
}
/**
* Retrieves the attributes associated with an entry.
* <p>
*
* @return an unmodifiable {@code Set} of attributes, possibly empty
*
* @since 1.8
*/
@Override
public Set<Attribute> getAttributes() {
return attributes;
}
/**
* Returns a string representation of this TrustedCertificateEntry.
* @return a string representation of this TrustedCertificateEntry.
*/
public String toString() {
return "Trusted certificate entry:\r\n" + cert.toString();
}
}
/**
* Creates a KeyStore object of the given type, and encapsulates the given
* provider implementation (SPI object) in it.
*
* @param keyStoreSpi the provider implementation.
* @param provider the provider.
* @param type the keystore type.
*/
protected KeyStore(KeyStoreSpi keyStoreSpi, Provider provider, String type)
{
this.keyStoreSpi = keyStoreSpi;
this.provider = provider;
this.type = type;
// BEGIN Android-removed: this debugging mechanism is not supported in Android.
/*
if (!skipDebug && pdebug != null) {
pdebug.println("KeyStore." + type.toUpperCase() + " type from: " +
this.provider.getName());
}
*/
// END Android-removed: this debugging mechanism is not supported in Android.
}
/**
* Returns a keystore object of the specified type.
*
* <p> This method traverses the list of registered security Providers,
* starting with the most preferred Provider.
* A new KeyStore object encapsulating the
* KeyStoreSpi implementation from the first
* Provider that supports the specified type is returned.
*
* <p> Note that the list of registered providers may be retrieved via
* the {@link Security#getProviders() Security.getProviders()} method.
*
* @param type the type of keystore.
* See the KeyStore section in the <a href=
* "{@docRoot}/../technotes/guides/security/StandardNames.html#KeyStore">
* Java Cryptography Architecture Standard Algorithm Name Documentation</a>
* for information about standard keystore types.
*
* @return a keystore object of the specified type.
*
* @exception KeyStoreException if no Provider supports a
* KeyStoreSpi implementation for the
* specified type.
*
* @see Provider
*/
public static KeyStore getInstance(String type)
throws KeyStoreException
{
try {
Object[] objs = Security.getImpl(type, "KeyStore", (String)null);
return new KeyStore((KeyStoreSpi)objs[0], (Provider)objs[1], type);
} catch (NoSuchAlgorithmException nsae) {
throw new KeyStoreException(type + " not found", nsae);
} catch (NoSuchProviderException nspe) {
throw new KeyStoreException(type + " not found", nspe);
}
}
/**
* Returns a keystore object of the specified type.
*
* <p> A new KeyStore object encapsulating the
* KeyStoreSpi implementation from the specified provider
* is returned. The specified provider must be registered
* in the security provider list.
*
* <p> Note that the list of registered providers may be retrieved via
* the {@link Security#getProviders() Security.getProviders()} method.
*
* @param type the type of keystore.
* See the KeyStore section in the <a href=
* "{@docRoot}/../technotes/guides/security/StandardNames.html#KeyStore">
* Java Cryptography Architecture Standard Algorithm Name Documentation</a>
* for information about standard keystore types.
*
* @param provider the name of the provider.
*
* @return a keystore object of the specified type.
*
* @exception KeyStoreException if a KeyStoreSpi
* implementation for the specified type is not
* available from the specified provider.
*
* @exception NoSuchProviderException if the specified provider is not
* registered in the security provider list.
*
* @exception IllegalArgumentException if the provider name is null
* or empty.
*
* @see Provider
*/
public static KeyStore getInstance(String type, String provider)
throws KeyStoreException, NoSuchProviderException
{
if (provider == null || provider.length() == 0)
throw new IllegalArgumentException("missing provider");
try {
Object[] objs = Security.getImpl(type, "KeyStore", provider);
return new KeyStore((KeyStoreSpi)objs[0], (Provider)objs[1], type);
} catch (NoSuchAlgorithmException nsae) {
throw new KeyStoreException(type + " not found", nsae);
}
}
/**
* Returns a keystore object of the specified type.
*
* <p> A new KeyStore object encapsulating the
* KeyStoreSpi implementation from the specified Provider
* object is returned. Note that the specified Provider object
* does not have to be registered in the provider list.
*
* @param type the type of keystore.
* See the KeyStore section in the <a href=
* "{@docRoot}/../technotes/guides/security/StandardNames.html#KeyStore">
* Java Cryptography Architecture Standard Algorithm Name Documentation</a>
* for information about standard keystore types.
*
* @param provider the provider.
*
* @return a keystore object of the specified type.
*
* @exception KeyStoreException if KeyStoreSpi
* implementation for the specified type is not available
* from the specified Provider object.
*
* @exception IllegalArgumentException if the specified provider is null.
*
* @see Provider
*
* @since 1.4
*/
public static KeyStore getInstance(String type, Provider provider)
throws KeyStoreException
{
if (provider == null)
throw new IllegalArgumentException("missing provider");
try {
Object[] objs = Security.getImpl(type, "KeyStore", provider);
return new KeyStore((KeyStoreSpi)objs[0], (Provider)objs[1], type);
} catch (NoSuchAlgorithmException nsae) {
throw new KeyStoreException(type + " not found", nsae);
}
}
/**
* Returns the default keystore type as specified by the
* {@code keystore.type} security property, or the string
* {@literal "jks"} (acronym for {@literal "Java keystore"})
* if no such property exists.
*
* <p>The default keystore type can be used by applications that do not
* want to use a hard-coded keystore type when calling one of the
* {@code getInstance} methods, and want to provide a default keystore
* type in case a user does not specify its own.
*
* <p>The default keystore type can be changed by setting the value of the
* {@code keystore.type} security property to the desired keystore type.
*
* @return the default keystore type as specified by the
* {@code keystore.type} security property, or the string {@literal "jks"}
* if no such property exists.
* @see java.security.Security security properties
*/
public final static String getDefaultType() {
String kstype;
kstype = AccessController.doPrivileged(new PrivilegedAction<String>() {
public String run() {
return Security.getProperty(KEYSTORE_TYPE);
}
});
if (kstype == null) {
kstype = "jks";
}
return kstype;
}
/**
* Returns the provider of this keystore.
*
* @return the provider of this keystore.
*/
public final Provider getProvider()
{
return this.provider;
}
/**
* Returns the type of this keystore.
*
* @return the type of this keystore.
*/
public final String getType()
{
return this.type;
}
/**
* Returns the key associated with the given alias, using the given
* password to recover it. The key must have been associated with
* the alias by a call to {@code setKeyEntry},
* or by a call to {@code setEntry} with a
* {@code PrivateKeyEntry} or {@code SecretKeyEntry}.
*
* @param alias the alias name
* @param password the password for recovering the key
*
* @return the requested key, or null if the given alias does not exist
* or does not identify a key-related entry.
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
* @exception NoSuchAlgorithmException if the algorithm for recovering the
* key cannot be found
* @exception UnrecoverableKeyException if the key cannot be recovered
* (e.g., the given password is wrong).
*/
public final Key getKey(String alias, char[] password)
throws KeyStoreException, NoSuchAlgorithmException,
UnrecoverableKeyException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetKey(alias, password);
}
/**
* Returns the certificate chain associated with the given alias.
* The certificate chain must have been associated with the alias
* by a call to {@code setKeyEntry},
* or by a call to {@code setEntry} with a
* {@code PrivateKeyEntry}.
*
* @param alias the alias name
*
* @return the certificate chain (ordered with the user's certificate first
* followed by zero or more certificate authorities), or null if the given alias
* does not exist or does not contain a certificate chain
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final Certificate[] getCertificateChain(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetCertificateChain(alias);
}
/**
* Returns the certificate associated with the given alias.
*
* <p> If the given alias name identifies an entry
* created by a call to {@code setCertificateEntry},
* or created by a call to {@code setEntry} with a
* {@code TrustedCertificateEntry},
* then the trusted certificate contained in that entry is returned.
*
* <p> If the given alias name identifies an entry
* created by a call to {@code setKeyEntry},
* or created by a call to {@code setEntry} with a
* {@code PrivateKeyEntry},
* then the first element of the certificate chain in that entry
* is returned.
*
* @param alias the alias name
*
* @return the certificate, or null if the given alias does not exist or
* does not contain a certificate.
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final Certificate getCertificate(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetCertificate(alias);
}
/**
* Returns the creation date of the entry identified by the given alias.
*
* @param alias the alias name
*
* @return the creation date of this entry, or null if the given alias does
* not exist
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final Date getCreationDate(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetCreationDate(alias);
}
/**
* Assigns the given key to the given alias, protecting it with the given
* password.
*
* <p>If the given key is of type {@code java.security.PrivateKey},
* it must be accompanied by a certificate chain certifying the
* corresponding public key.
*
* <p>If the given alias already exists, the keystore information
* associated with it is overridden by the given key (and possibly
* certificate chain).
*
* @param alias the alias name
* @param key the key to be associated with the alias
* @param password the password to protect the key
* @param chain the certificate chain for the corresponding public
* key (only required if the given key is of type
* {@code java.security.PrivateKey}).
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded), the given key cannot be protected, or this operation fails
* for some other reason
*/
public final void setKeyEntry(String alias, Key key, char[] password,
Certificate[] chain)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
if ((key instanceof PrivateKey) &&
(chain == null || chain.length == 0)) {
throw new IllegalArgumentException("Private key must be "
+ "accompanied by certificate "
+ "chain");
}
keyStoreSpi.engineSetKeyEntry(alias, key, password, chain);
}
/**
* Assigns the given key (that has already been protected) to the given
* alias.
*
* <p>If the protected key is of type
* {@code java.security.PrivateKey}, it must be accompanied by a
* certificate chain certifying the corresponding public key. If the
* underlying keystore implementation is of type {@code jks},
* {@code key} must be encoded as an
* {@code EncryptedPrivateKeyInfo} as defined in the PKCS #8 standard.
*
* <p>If the given alias already exists, the keystore information
* associated with it is overridden by the given key (and possibly
* certificate chain).
*
* @param alias the alias name
* @param key the key (in protected format) to be associated with the alias
* @param chain the certificate chain for the corresponding public
* key (only useful if the protected key is of type
* {@code java.security.PrivateKey}).
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded), or if this operation fails for some other reason.
*/
public final void setKeyEntry(String alias, byte[] key,
Certificate[] chain)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineSetKeyEntry(alias, key, chain);
}
/**
* Assigns the given trusted certificate to the given alias.
*
* <p> If the given alias identifies an existing entry
* created by a call to {@code setCertificateEntry},
* or created by a call to {@code setEntry} with a
* {@code TrustedCertificateEntry},
* the trusted certificate in the existing entry
* is overridden by the given certificate.
*
* @param alias the alias name
* @param cert the certificate
*
* @exception KeyStoreException if the keystore has not been initialized,
* or the given alias already exists and does not identify an
* entry containing a trusted certificate,
* or this operation fails for some other reason.
*/
public final void setCertificateEntry(String alias, Certificate cert)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineSetCertificateEntry(alias, cert);
}
/**
* Deletes the entry identified by the given alias from this keystore.
*
* @param alias the alias name
*
* @exception KeyStoreException if the keystore has not been initialized,
* or if the entry cannot be removed.
*/
public final void deleteEntry(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineDeleteEntry(alias);
}
/**
* Lists all the alias names of this keystore.
*
* @return enumeration of the alias names
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final Enumeration<String> aliases()
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineAliases();
}
/**
* Checks if the given alias exists in this keystore.
*
* @param alias the alias name
*
* @return true if the alias exists, false otherwise
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final boolean containsAlias(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineContainsAlias(alias);
}
/**
* Retrieves the number of entries in this keystore.
*
* @return the number of entries in this keystore
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final int size()
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineSize();
}
/**
* Returns true if the entry identified by the given alias
* was created by a call to {@code setKeyEntry},
* or created by a call to {@code setEntry} with a
* {@code PrivateKeyEntry} or a {@code SecretKeyEntry}.
*
* @param alias the alias for the keystore entry to be checked
*
* @return true if the entry identified by the given alias is a
* key-related entry, false otherwise.
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final boolean isKeyEntry(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineIsKeyEntry(alias);
}
/**
* Returns true if the entry identified by the given alias
* was created by a call to {@code setCertificateEntry},
* or created by a call to {@code setEntry} with a
* {@code TrustedCertificateEntry}.
*
* @param alias the alias for the keystore entry to be checked
*
* @return true if the entry identified by the given alias contains a
* trusted certificate, false otherwise.
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final boolean isCertificateEntry(String alias)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineIsCertificateEntry(alias);
}
/**
* Returns the (alias) name of the first keystore entry whose certificate
* matches the given certificate.
*
* <p> This method attempts to match the given certificate with each
* keystore entry. If the entry being considered was
* created by a call to {@code setCertificateEntry},
* or created by a call to {@code setEntry} with a
* {@code TrustedCertificateEntry},
* then the given certificate is compared to that entry's certificate.
*
* <p> If the entry being considered was
* created by a call to {@code setKeyEntry},
* or created by a call to {@code setEntry} with a
* {@code PrivateKeyEntry},
* then the given certificate is compared to the first
* element of that entry's certificate chain.
*
* @param cert the certificate to match with.
*
* @return the alias name of the first entry with a matching certificate,
* or null if no such entry exists in this keystore.
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
*/
public final String getCertificateAlias(Certificate cert)
throws KeyStoreException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetCertificateAlias(cert);
}
/**
* Stores this keystore to the given output stream, and protects its
* integrity with the given password.
*
* @param stream the output stream to which this keystore is written.
* @param password the password to generate the keystore integrity check
*
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
* @exception IOException if there was an I/O problem with data
* @exception NoSuchAlgorithmException if the appropriate data integrity
* algorithm could not be found
* @exception CertificateException if any of the certificates included in
* the keystore data could not be stored
*/
public final void store(OutputStream stream, char[] password)
throws KeyStoreException, IOException, NoSuchAlgorithmException,
CertificateException
{
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineStore(stream, password);
}
/**
* Stores this keystore using the given {@code LoadStoreParameter}.
*
* @param param the {@code LoadStoreParameter}
* that specifies how to store the keystore,
* which may be {@code null}
*
* @exception IllegalArgumentException if the given
* {@code LoadStoreParameter}
* input is not recognized
* @exception KeyStoreException if the keystore has not been initialized
* (loaded)
* @exception IOException if there was an I/O problem with data
* @exception NoSuchAlgorithmException if the appropriate data integrity
* algorithm could not be found
* @exception CertificateException if any of the certificates included in
* the keystore data could not be stored
*
* @since 1.5
*/
public final void store(LoadStoreParameter param)
throws KeyStoreException, IOException,
NoSuchAlgorithmException, CertificateException {
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineStore(param);
}
/**
* Loads this KeyStore from the given input stream.
*
* <p>A password may be given to unlock the keystore
* (e.g. the keystore resides on a hardware token device),
* or to check the integrity of the keystore data.
* If a password is not given for integrity checking,
* then integrity checking is not performed.
*
* <p>In order to create an empty keystore, or if the keystore cannot
* be initialized from a stream, pass {@code null}
* as the {@code stream} argument.
*
* <p> Note that if this keystore has already been loaded, it is
* reinitialized and loaded again from the given input stream.
*
* @param stream the input stream from which the keystore is loaded,
* or {@code null}
* @param password the password used to check the integrity of
* the keystore, the password used to unlock the keystore,
* or {@code null}
*
* @exception IOException if there is an I/O or format problem with the
* keystore data, if a password is required but not given,
* or if the given password was incorrect. If the error is due to a
* wrong password, the {@link Throwable#getCause cause} of the
* {@code IOException} should be an
* {@code UnrecoverableKeyException}
* @exception NoSuchAlgorithmException if the algorithm used to check
* the integrity of the keystore cannot be found
* @exception CertificateException if any of the certificates in the
* keystore could not be loaded
*/
public final void load(InputStream stream, char[] password)
throws IOException, NoSuchAlgorithmException, CertificateException
{
keyStoreSpi.engineLoad(stream, password);
initialized = true;
}
/**
* Loads this keystore using the given {@code LoadStoreParameter}.
*
* <p> Note that if this KeyStore has already been loaded, it is
* reinitialized and loaded again from the given parameter.
*
* @param param the {@code LoadStoreParameter}
* that specifies how to load the keystore,
* which may be {@code null}
*
* @exception IllegalArgumentException if the given
* {@code LoadStoreParameter}
* input is not recognized
* @exception IOException if there is an I/O or format problem with the
* keystore data. If the error is due to an incorrect
* {@code ProtectionParameter} (e.g. wrong password)
* the {@link Throwable#getCause cause} of the
* {@code IOException} should be an
* {@code UnrecoverableKeyException}
* @exception NoSuchAlgorithmException if the algorithm used to check
* the integrity of the keystore cannot be found
* @exception CertificateException if any of the certificates in the
* keystore could not be loaded
*
* @since 1.5
*/
public final void load(LoadStoreParameter param)
throws IOException, NoSuchAlgorithmException,
CertificateException {
keyStoreSpi.engineLoad(param);
initialized = true;
}
/**
* Gets a keystore {@code Entry} for the specified alias
* with the specified protection parameter.
*
* @param alias get the keystore {@code Entry} for this alias
* @param protParam the {@code ProtectionParameter}
* used to protect the {@code Entry},
* which may be {@code null}
*
* @return the keystore {@code Entry} for the specified alias,
* or {@code null} if there is no such entry
*
* @exception NullPointerException if
* {@code alias} is {@code null}
* @exception NoSuchAlgorithmException if the algorithm for recovering the
* entry cannot be found
* @exception UnrecoverableEntryException if the specified
* {@code protParam} were insufficient or invalid
* @exception UnrecoverableKeyException if the entry is a
* {@code PrivateKeyEntry} or {@code SecretKeyEntry}
* and the specified {@code protParam} does not contain
* the information needed to recover the key (e.g. wrong password)
* @exception KeyStoreException if the keystore has not been initialized
* (loaded).
* @see #setEntry(String, KeyStore.Entry, KeyStore.ProtectionParameter)
*
* @since 1.5
*/
public final Entry getEntry(String alias, ProtectionParameter protParam)
throws NoSuchAlgorithmException, UnrecoverableEntryException,
KeyStoreException {
if (alias == null) {
throw new NullPointerException("invalid null input");
}
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineGetEntry(alias, protParam);
}
/**
* Saves a keystore {@code Entry} under the specified alias.
* The protection parameter is used to protect the
* {@code Entry}.
*
* <p> If an entry already exists for the specified alias,
* it is overridden.
*
* @param alias save the keystore {@code Entry} under this alias
* @param entry the {@code Entry} to save
* @param protParam the {@code ProtectionParameter}
* used to protect the {@code Entry},
* which may be {@code null}
*
* @exception NullPointerException if
* {@code alias} or {@code entry}
* is {@code null}
* @exception KeyStoreException if the keystore has not been initialized
* (loaded), or if this operation fails for some other reason
*
* @see #getEntry(String, KeyStore.ProtectionParameter)
*
* @since 1.5
*/
public final void setEntry(String alias, Entry entry,
ProtectionParameter protParam)
throws KeyStoreException {
if (alias == null || entry == null) {
throw new NullPointerException("invalid null input");
}
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
keyStoreSpi.engineSetEntry(alias, entry, protParam);
}
/**
* Determines if the keystore {@code Entry} for the specified
* {@code alias} is an instance or subclass of the specified
* {@code entryClass}.
*
* @param alias the alias name
* @param entryClass the entry class
*
* @return true if the keystore {@code Entry} for the specified
* {@code alias} is an instance or subclass of the
* specified {@code entryClass}, false otherwise
*
* @exception NullPointerException if
* {@code alias} or {@code entryClass}
* is {@code null}
* @exception KeyStoreException if the keystore has not been
* initialized (loaded)
*
* @since 1.5
*/
public final boolean
entryInstanceOf(String alias,
Class<? extends KeyStore.Entry> entryClass)
throws KeyStoreException
{
if (alias == null || entryClass == null) {
throw new NullPointerException("invalid null input");
}
if (!initialized) {
throw new KeyStoreException("Uninitialized keystore");
}
return keyStoreSpi.engineEntryInstanceOf(alias, entryClass);
}
/**
* A description of a to-be-instantiated KeyStore object.
*
* <p>An instance of this class encapsulates the information needed to
* instantiate and initialize a KeyStore object. That process is
* triggered when the {@linkplain #getKeyStore} method is called.
*
* <p>This makes it possible to decouple configuration from KeyStore
* object creation and e.g. delay a password prompt until it is
* needed.
*
* @see KeyStore
* @see javax.net.ssl.KeyStoreBuilderParameters
* @since 1.5
*/
public static abstract class Builder {
// maximum times to try the callbackhandler if the password is wrong
static final int MAX_CALLBACK_TRIES = 3;
/**
* Construct a new Builder.
*/
protected Builder() {
// empty
}
/**
* Returns the KeyStore described by this object.
*
* @return the {@code KeyStore} described by this object
* @exception KeyStoreException if an error occurred during the
* operation, for example if the KeyStore could not be
* instantiated or loaded
*/
public abstract KeyStore getKeyStore() throws KeyStoreException;
/**
* Returns the ProtectionParameters that should be used to obtain
* the {@link KeyStore.Entry Entry} with the given alias.
* The {@code getKeyStore} method must be invoked before this
* method may be called.
*
* @return the ProtectionParameters that should be used to obtain
* the {@link KeyStore.Entry Entry} with the given alias.
* @param alias the alias of the KeyStore entry
* @throws NullPointerException if alias is null
* @throws KeyStoreException if an error occurred during the
* operation
* @throws IllegalStateException if the getKeyStore method has
* not been invoked prior to calling this method
*/
public abstract ProtectionParameter getProtectionParameter(String alias)
throws KeyStoreException;
/**
* Returns a new Builder that encapsulates the given KeyStore.
* The {@linkplain #getKeyStore} method of the returned object
* will return {@code keyStore}, the {@linkplain
* #getProtectionParameter getProtectionParameter()} method will
* return {@code protectionParameters}.
*
* <p> This is useful if an existing KeyStore object needs to be
* used with Builder-based APIs.
*
* @return a new Builder object
* @param keyStore the KeyStore to be encapsulated
* @param protectionParameter the ProtectionParameter used to
* protect the KeyStore entries
* @throws NullPointerException if keyStore or
* protectionParameters is null
* @throws IllegalArgumentException if the keyStore has not been
* initialized
*/
public static Builder newInstance(final KeyStore keyStore,
final ProtectionParameter protectionParameter) {
if ((keyStore == null) || (protectionParameter == null)) {
throw new NullPointerException();
}
if (keyStore.initialized == false) {
throw new IllegalArgumentException("KeyStore not initialized");
}
return new Builder() {
private volatile boolean getCalled;
public KeyStore getKeyStore() {
getCalled = true;
return keyStore;
}
public ProtectionParameter getProtectionParameter(String alias)
{
if (alias == null) {
throw new NullPointerException();
}
if (getCalled == false) {
throw new IllegalStateException
("getKeyStore() must be called first");
}
return protectionParameter;
}
};
}
/**
* Returns a new Builder object.
*
* <p>The first call to the {@link #getKeyStore} method on the returned
* builder will create a KeyStore of type {@code type} and call
* its {@link KeyStore#load load()} method.
* The {@code inputStream} argument is constructed from
* {@code file}.
* If {@code protection} is a
* {@code PasswordProtection}, the password is obtained by
* calling the {@code getPassword} method.
* Otherwise, if {@code protection} is a
* {@code CallbackHandlerProtection}, the password is obtained
* by invoking the CallbackHandler.
*
* <p>Subsequent calls to {@link #getKeyStore} return the same object
* as the initial call. If the initial call to failed with a
* KeyStoreException, subsequent calls also throw a
* KeyStoreException.
*
* <p>The KeyStore is instantiated from {@code provider} if
* non-null. Otherwise, all installed providers are searched.
*
* <p>Calls to {@link #getProtectionParameter getProtectionParameter()}
* will return a {@link KeyStore.PasswordProtection PasswordProtection}
* object encapsulating the password that was used to invoke the
* {@code load} method.
*
* <p><em>Note</em> that the {@link #getKeyStore} method is executed
* within the {@link AccessControlContext} of the code invoking this
* method.
*
* @return a new Builder object
* @param type the type of KeyStore to be constructed
* @param provider the provider from which the KeyStore is to
* be instantiated (or null)
* @param file the File that contains the KeyStore data
* @param protection the ProtectionParameter securing the KeyStore data
* @throws NullPointerException if type, file or protection is null
* @throws IllegalArgumentException if protection is not an instance
* of either PasswordProtection or CallbackHandlerProtection; or
* if file does not exist or does not refer to a normal file
*/
public static Builder newInstance(String type, Provider provider,
File file, ProtectionParameter protection) {
if ((type == null) || (file == null) || (protection == null)) {
throw new NullPointerException();
}
if ((protection instanceof PasswordProtection == false) &&
(protection instanceof CallbackHandlerProtection == false)) {
throw new IllegalArgumentException
("Protection must be PasswordProtection or " +
"CallbackHandlerProtection");
}
if (file.isFile() == false) {
throw new IllegalArgumentException
("File does not exist or it does not refer " +
"to a normal file: " + file);
}
return new FileBuilder(type, provider, file, protection,
AccessController.getContext());
}
private static final class FileBuilder extends Builder {
private final String type;
private final Provider provider;
private final File file;
private ProtectionParameter protection;
private ProtectionParameter keyProtection;
private final AccessControlContext context;
private KeyStore keyStore;
private Throwable oldException;
FileBuilder(String type, Provider provider, File file,
ProtectionParameter protection,
AccessControlContext context) {
this.type = type;
this.provider = provider;
this.file = file;
this.protection = protection;
this.context = context;
}
public synchronized KeyStore getKeyStore() throws KeyStoreException
{
if (keyStore != null) {
return keyStore;
}
if (oldException != null) {
throw new KeyStoreException
("Previous KeyStore instantiation failed",
oldException);
}
PrivilegedExceptionAction<KeyStore> action =
new PrivilegedExceptionAction<KeyStore>() {
public KeyStore run() throws Exception {
if (protection instanceof CallbackHandlerProtection == false) {
return run0();
}
// when using a CallbackHandler,
// reprompt if the password is wrong
int tries = 0;
while (true) {
tries++;
try {
return run0();
} catch (IOException e) {
if ((tries < MAX_CALLBACK_TRIES)
&& (e.getCause() instanceof UnrecoverableKeyException)) {
continue;
}
throw e;
}
}
}
public KeyStore run0() throws Exception {
KeyStore ks;
if (provider == null) {
ks = KeyStore.getInstance(type);
} else {
ks = KeyStore.getInstance(type, provider);
}
InputStream in = null;
char[] password = null;
try {
in = new FileInputStream(file);
if (protection instanceof PasswordProtection) {
password =
((PasswordProtection)protection).getPassword();
keyProtection = protection;
} else {
CallbackHandler handler =
((CallbackHandlerProtection)protection)
.getCallbackHandler();
PasswordCallback callback = new PasswordCallback
("Password for keystore " + file.getName(),
false);
handler.handle(new Callback[] {callback});
password = callback.getPassword();
if (password == null) {
throw new KeyStoreException("No password" +
" provided");
}
callback.clearPassword();
keyProtection = new PasswordProtection(password);
}
ks.load(in, password);
return ks;
} finally {
if (in != null) {
in.close();
}
}
}
};
try {
keyStore = AccessController.doPrivileged(action, context);
return keyStore;
} catch (PrivilegedActionException e) {
oldException = e.getCause();
throw new KeyStoreException
("KeyStore instantiation failed", oldException);
}
}
public synchronized ProtectionParameter
getProtectionParameter(String alias) {
if (alias == null) {
throw new NullPointerException();
}
if (keyStore == null) {
throw new IllegalStateException
("getKeyStore() must be called first");
}
return keyProtection;
}
}
/**
* Returns a new Builder object.
*
* <p>Each call to the {@link #getKeyStore} method on the returned
* builder will return a new KeyStore object of type {@code type}.
* Its {@link KeyStore#load(KeyStore.LoadStoreParameter) load()}
* method is invoked using a
* {@code LoadStoreParameter} that encapsulates
* {@code protection}.
*
* <p>The KeyStore is instantiated from {@code provider} if
* non-null. Otherwise, all installed providers are searched.
*
* <p>Calls to {@link #getProtectionParameter getProtectionParameter()}
* will return {@code protection}.
*
* <p><em>Note</em> that the {@link #getKeyStore} method is executed
* within the {@link AccessControlContext} of the code invoking this
* method.
*
* @return a new Builder object
* @param type the type of KeyStore to be constructed
* @param provider the provider from which the KeyStore is to
* be instantiated (or null)
* @param protection the ProtectionParameter securing the Keystore
* @throws NullPointerException if type or protection is null
*/
public static Builder newInstance(final String type,
final Provider provider, final ProtectionParameter protection) {
if ((type == null) || (protection == null)) {
throw new NullPointerException();
}
final AccessControlContext context = AccessController.getContext();
return new Builder() {
private volatile boolean getCalled;
private IOException oldException;
private final PrivilegedExceptionAction<KeyStore> action
= new PrivilegedExceptionAction<KeyStore>() {
public KeyStore run() throws Exception {
KeyStore ks;
if (provider == null) {
ks = KeyStore.getInstance(type);
} else {
ks = KeyStore.getInstance(type, provider);
}
LoadStoreParameter param = new SimpleLoadStoreParameter(protection);
if (protection instanceof CallbackHandlerProtection == false) {
ks.load(param);
} else {
// when using a CallbackHandler,
// reprompt if the password is wrong
int tries = 0;
while (true) {
tries++;
try {
ks.load(param);
break;
} catch (IOException e) {
if (e.getCause() instanceof UnrecoverableKeyException) {
if (tries < MAX_CALLBACK_TRIES) {
continue;
} else {
oldException = e;
}
}
throw e;
}
}
}
getCalled = true;
return ks;
}
};
public synchronized KeyStore getKeyStore()
throws KeyStoreException {
if (oldException != null) {
throw new KeyStoreException
("Previous KeyStore instantiation failed",
oldException);
}
try {
return AccessController.doPrivileged(action, context);
} catch (PrivilegedActionException e) {
Throwable cause = e.getCause();
throw new KeyStoreException
("KeyStore instantiation failed", cause);
}
}
public ProtectionParameter getProtectionParameter(String alias)
{
if (alias == null) {
throw new NullPointerException();
}
if (getCalled == false) {
throw new IllegalStateException
("getKeyStore() must be called first");
}
return protection;
}
};
}
}
static class SimpleLoadStoreParameter implements LoadStoreParameter {
private final ProtectionParameter protection;
SimpleLoadStoreParameter(ProtectionParameter protection) {
this.protection = protection;
}
public ProtectionParameter getProtectionParameter() {
return protection;
}
}
}<|fim▁end|> | /** |
<|file_name|>scripts.rs<|end_file_name|><|fim▁begin|>//! This integration test runs the test scripts found in
//! the scripts/ directory
//! See image-worker/README.md for a full description of
//! the test script syntax and details about this test
//! runner.
extern crate image;
use std::fs::{File, read_dir, remove_file, copy};
use std::io::{BufReader, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::Child;
use std::process::{Command, Stdio};
use std::thread;
use std::sync::mpsc;
#[test]
fn run_scripts() {
let scripts_dir = Path::new(file!()).with_file_name("scripts");
let mut threads = Vec::new();
// We use channels here so we can immediately exit if we get
// a failure message
let (tx, rx) = mpsc::channel();
for entry in read_dir(scripts_dir).unwrap() {
let test_script = entry.unwrap().path();
let tx = tx.clone();
let th = (test_script.clone(), thread::spawn(move || {
let result = run_test_script(test_script);
tx.send(result).unwrap();
}));
threads.push(th);
}
for _ in threads.iter() {
let result = rx.recv().unwrap();
match result {
Err(error) => panic!(error),
_ => (),
}
}
for (script, th) in threads {
match th.join() {
Ok(_) => (),
Err(_) => panic!("thread for {} ended in a panic", script.to_str().unwrap()),
}
}
}
fn run_test_script(script: PathBuf) -> Result<(), String> {
let file = File::open(script.clone()).unwrap();
let reader = BufReader::new(file);
let mut child = spawn_worker();
let filename = script.file_name().unwrap().to_str().unwrap();
println!("Starting {}...", filename);
let mut last_response: Option<String> = None;
for (num, line) in reader.lines().enumerate() {
let line = line.unwrap();
let line = line.trim();
if line.is_empty() {
continue;
}
//println!("{}", line);
let (first, arg) = line.split_at(1);
let result = match first {
"%" => check_file_match(arg),
"-" => remove_file(arg.trim()).map_err(|e| format!("{}", e)),
"=" => copy_file(arg),
">" => {
let res = check_output(last_response.as_ref(), arg);
last_response = None;
res
},
"#" => Ok(()),
_ => {
// The test script has until the next command to check its output using the
// > command. If they do not, we will check here for success
let response_checked = if last_response.is_some() {
check_success(last_response.as_ref())
}
else {
Ok(())
};
response_checked.and_then(|_| {
send_input(&mut child, line).and_then(|_| {
last_response = Some(read_output(&mut child)?);
Ok(())
})
})
},
};
if let Err(error) = result {
return Err(format!("{}#{}: {}", filename, num + 1, error));
}
}
<|fim▁hole|> }
}
if !child.wait().unwrap().success() {
return Err(
format!("{}: Worker process did not complete successfully after test script", filename)
);
}
println!("Completed {}.", filename);
Ok(())
}
fn spawn_worker() -> Child {
Command::new("cargo")
.args(&["run", "-q"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap()
}
fn check_file_match(arg: &str) -> Result<(), String> {
let delimiter = arg.find("=>")
.ok_or("Could not find => in % command")?;
let (output_path, expected_path) = arg.split_at(delimiter);
// get rid of the "=>"
let expected_path = expected_path.chars().skip(2).collect::<String>();
// ignore any extra whitespace
let output_path = output_path.trim();
let expected_path = expected_path.trim();
let output = image::open(output_path).map_err(|e| format!("{}", e))?;
let expected = image::open(expected_path).map_err(|e| format!("{}", e))?;
let output = output.raw_pixels();
let expected = expected.raw_pixels();
if output == expected {
remove_file(output_path)
.map_err(|e| format!("Failed to remove output after test passed: {}", e))?;
Ok(())
}
else {
Err(format!("{} did not match {}", output_path, expected_path))
}
}
fn copy_file(arg: &str) -> Result<(), String> {
let args: Vec<_> = arg.trim().split_whitespace().collect();
if args.len() != 2 {
return Err("= command requires only 2 arguments".to_owned());
}
let source = args[0];
let destination = args[1];
copy(source, destination).map_err(|e| format!("{}", e))?;
Ok(())
}
fn check_success(output: Option<&String>) -> Result<(), String> {
if let Some(response) = output {
// This check is not foolproof and may eventually cause problems.
// It is good enough for now though we're running with it
if response.starts_with("{\"Success\":") {
Ok(())
}
else {
Err(format!("Worker did not produce Success. Script failed at \
the last input *before* this line. Actual result: {}", response))
}
}
else {
panic!("check_success should have been called only when last_response had a value");
}
}
fn check_output(output: Option<&String>, arg: &str) -> Result<(), String> {
// This is brittle, but it doesn't seem worth it to implement something
// more robust for now. You will need to exactly match the output in your
// test script if you want to test output
if let Some(response) = output {
let arg = arg.trim();
let response = response.trim();
if response == arg {
Ok(())
}
else {
Err(format!("Worker produced output not equal to expected output.\
\nExpected: {:?}\nReceived: {:?}", arg, response))
}
}
else {
panic!("check_output should have been called only when last_response had a value");
}
}
fn send_input(child: &mut Child, line: &str) -> Result<(), String> {
if let Some(ref mut stdin) = child.stdin {
match write!(stdin, "{}\n", line) {
Ok(_) => Ok(()),
Err(error) => Err(format!("{}", error)),
}
}
else {
// should not happen. This panic is just in case.
panic!("stdin was not open for writing".to_string());
}
}
fn read_output(child: &mut Child) -> Result<String, String> {
let mut stdout = BufReader::new(match child.stdout {
Some(ref mut handle) => Ok(handle),
None => Err("Worker child process stdout was never open"),
}?);
let mut response = String::new();
stdout.read_line(&mut response).map_err(|e| format!("{}", e))?;
Ok(response)
}<|fim▁end|> | if last_response.is_some() {
if let Err(error) = check_success(last_response.as_ref()) {
return Err(format!("{}#EOF: {}", filename, error)); |
<|file_name|>cfg.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> println!("You are running linux!")
}
// 而这个函数仅当操作系统**不是** Linux 时才会编译
#[cfg(not(target_os = "linux"))]
fn are_you_on_linux() {
println!("You are *not* running linux!")
}
fn main() {
are_you_on_linux();
println!("Are you sure?");
if cfg!(target_os = "linux") {
println!("Yes. It's definitely linux!");
} else {
println!("Yes. It's definitely *not* linux!");
}
}<|fim▁end|> | // 这个函数仅当操作系统是 Linux 的时候才会编译
#[cfg(target_os = "linux")]
fn are_you_on_linux() { |
<|file_name|>_jax_backend.py<|end_file_name|><|fim▁begin|>import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v)
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
<|fim▁hole|> for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)<|fim▁end|> | def expand_dims(self, a, axis=0, number=1): |
<|file_name|>iss118-spec.js<|end_file_name|><|fim▁begin|>/** @babel */
/* eslint-env jasmine, atomtest */
/*
This file contains verifying specs for:
https://github.com/sindresorhus/atom-editorconfig/issues/118
*/
import fs from 'fs';
import path from 'path';
const testPrefix = path.basename(__filename).split('-').shift();
const projectRoot = path.join(__dirname, 'fixtures');
const filePath = path.join(projectRoot, `test.${testPrefix}`);
describe('editorconfig', () => {
let textEditor;
const textWithoutTrailingWhitespaces = 'I\nam\nProvidence.';
const textWithManyTrailingWhitespaces = 'I \t \nam \t \nProvidence.';
beforeEach(() => {
waitsForPromise(() =>
Promise.all([
atom.packages.activatePackage('editorconfig'),
atom.workspace.open(filePath)
]).then(results => {
textEditor = results[1];
})
);
});
afterEach(() => {
// remove the created fixture, if it exists
runs(() => {
fs.stat(filePath, (err, stats) => {
if (!err && stats.isFile()) {
fs.unlink(filePath);
}
});
});
waitsFor(() => {
try {
return fs.statSync(filePath).isFile() === false;
} catch (err) {
return true;
}
}, 5000, `removed ${filePath}`);
});
describe('Atom being set to remove trailing whitespaces', () => {
beforeEach(() => {
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.trim_trailing_whitespace = true;
// eslint-disable-next-line camelcase
textEditor.getBuffer().editorconfig.settings.insert_final_newline = false;
});
it('should strip trailing whitespaces on save.', () => {
textEditor.setText(textWithManyTrailingWhitespaces);
textEditor.save();<|fim▁hole|> });
});
});<|fim▁end|> | expect(textEditor.getText().length).toEqual(textWithoutTrailingWhitespaces.length); |
<|file_name|>StreamParser.hh<|end_file_name|><|fim▁begin|>/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2007 Live Networks, Inc. All rights reserved.
// Abstract class for parsing a byte stream
// C++ header
#ifndef _STREAM_PARSER_HH
#define _STREAM_PARSER_HH
#ifndef _FRAMED_SOURCE_HH
#include "FramedSource.hh"
#endif
class StreamParser {
public:
virtual void flushInput();
protected: // we're a virtual base class
typedef void (clientContinueFunc)(void* clientData,
unsigned char* ptr, unsigned size,
struct timeval presentationTime);
StreamParser(FramedSource* inputSource,
FramedSource::onCloseFunc* onInputCloseFunc,
void* onInputCloseClientData,
clientContinueFunc* clientContinueFunc,
void* clientContinueClientData);<|fim▁hole|> virtual void restoreSavedParserState();
u_int32_t get4Bytes() { // byte-aligned; returned in big-endian order
u_int32_t result = test4Bytes();
fCurParserIndex += 4;
fRemainingUnparsedBits = 0;
return result;
}
u_int32_t test4Bytes() { // as above, but doesn't advance ptr
ensureValidBytes(4);
unsigned char const* ptr = nextToParse();
return (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3];
}
u_int16_t get2Bytes() {
ensureValidBytes(2);
unsigned char const* ptr = nextToParse();
u_int16_t result = (ptr[0]<<8)|ptr[1];
fCurParserIndex += 2;
fRemainingUnparsedBits = 0;
return result;
}
u_int8_t get1Byte() { // byte-aligned
ensureValidBytes(1);
fRemainingUnparsedBits = 0;
return curBank()[fCurParserIndex++];
}
void getBytes(u_int8_t* to, unsigned numBytes) {
ensureValidBytes(numBytes);
memmove(to, nextToParse(), numBytes);
fCurParserIndex += numBytes;
fRemainingUnparsedBits = 0;
}
void skipBytes(unsigned numBytes) {
ensureValidBytes(numBytes);
fCurParserIndex += numBytes;
}
void skipBits(unsigned numBits);
unsigned getBits(unsigned numBits);
// numBits <= 32; returns data into low-order bits of result
unsigned curOffset() const { return fCurParserIndex; }
unsigned& totNumValidBytes() { return fTotNumValidBytes; }
private:
unsigned char* curBank() { return fCurBank; }
unsigned char* nextToParse() { return &curBank()[fCurParserIndex]; }
unsigned char* lastParsed() { return &curBank()[fCurParserIndex-1]; }
// makes sure that at least "numBytes" valid bytes remain:
void ensureValidBytes(unsigned numBytesNeeded) {
// common case: inlined:
if (fCurParserIndex + numBytesNeeded <= fTotNumValidBytes) return;
ensureValidBytes1(numBytesNeeded);
}
void ensureValidBytes1(unsigned numBytesNeeded);
static void afterGettingBytes(void* clientData, unsigned numBytesRead,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
private:
FramedSource* fInputSource; // should be a byte-stream source??
FramedSource::onCloseFunc* fOnInputCloseFunc;
void* fOnInputCloseClientData;
clientContinueFunc* fClientContinueFunc;
void* fClientContinueClientData;
// Use a pair of 'banks', and swap between them as they fill up:
unsigned char* fBank[2];
unsigned char fCurBankNum;
unsigned char* fCurBank;
// The most recent 'saved' parse position:
unsigned fSavedParserIndex; // <= fCurParserIndex
unsigned char fSavedRemainingUnparsedBits;
// The current position of the parser within the current bank:
unsigned fCurParserIndex; // <= fTotNumValidBytes
unsigned char fRemainingUnparsedBits; // in previous byte: [0,7]
// The total number of valid bytes stored in the current bank:
unsigned fTotNumValidBytes; // <= BANK_SIZE
};
#endif<|fim▁end|> | virtual ~StreamParser();
void saveParserState(); |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># coding=utf-8
"""
Widgets Module
"""
<|fim▁hole|><|fim▁end|> | __author__ = 'Matt Eland' |
<|file_name|>categories.client.service.js<|end_file_name|><|fim▁begin|>//Categories service used to communicate Categories REST endpoints
(function () {
'use strict';
angular
.module('categories')
.factory('CategoriesService', CategoriesService);
CategoriesService.$inject = ['$resource'];
<|fim▁hole|> return $resource('api/categories/:categoryId', {
categoryId: '@_id'
}, {
update: {
method: 'PUT'
}
});
}
})();<|fim▁end|> | function CategoriesService($resource) { |
<|file_name|>macros.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Entry point of task panic, for details, see std::macros
#[macro_export]
macro_rules! panic {
() => (
panic!("explicit panic")
);
($msg:expr) => ({
static _MSG_FILE_LINE: (&'static str, &'static str, u32) = ($msg, file!(), line!());
::core::panicking::panic(&_MSG_FILE_LINE)
});
($fmt:expr, $($arg:tt)*) => ({
// The leading _'s are to avoid dead code warnings if this is
// used inside a dead function. Just `#[allow(dead_code)]` is
// insufficient, since the user may have
// `#[forbid(dead_code)]` and which cannot be overridden.
static _FILE_LINE: (&'static str, u32) = (file!(), line!());
::core::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
});
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// assert!(true);
///
/// fn some_computation() -> bool { true } // a very simple function
///
/// assert!(some_computation());
///
/// // assert with a custom message
/// let x = true;
/// assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert {
($cond:expr) => (
if !$cond {
panic!(concat!("assertion failed: ", stringify!($cond)))
}
);
($cond:expr, $($arg:tt)+) => (
if !$cond {
panic!($($arg)+)
}
);
}
/// Asserts that two expressions are equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// assert_eq!(a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert_eq {
($left:expr , $right:expr) => ({
match (&($left), &($right)) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
panic!("assertion failed: `(left == right)` \
(left: `{:?}`, right: `{:?}`)", *left_val, *right_val)
}
}
}
})
}
/// Ensure that a boolean expression is `true` at runtime.
///
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
/// Unlike `assert!`, `debug_assert!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// // the panic message for these assertions is the stringified value of the
/// // expression given.
/// debug_assert!(true);
///
/// fn some_expensive_computation() -> bool { true } // a very simple function
/// debug_assert!(some_expensive_computation());
///
/// // assert with a custom message
/// let x = true;
/// debug_assert!(x, "x wasn't true!");
///
/// let a = 3; let b = 27;
/// debug_assert!(a + b == 30, "a = {}, b = {}", a, b);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! debug_assert {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert!($($arg)*); })
}
/// Asserts that two expressions are equal to each other, testing equality in
/// both directions.
///
/// On panic, this macro will print the values of the expressions.
///
/// Unlike `assert_eq!`, `debug_assert_eq!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the
/// compiler. This makes `debug_assert_eq!` useful for checks that are too
/// expensive to be present in a release build but may be helpful during
/// development.
///
/// # Examples
///
/// ```
/// let a = 3;
/// let b = 1 + 2;
/// debug_assert_eq!(a, b);
/// ```
#[macro_export]
macro_rules! debug_assert_eq {
($($arg:tt)*) => (if cfg!(debug_assertions) { assert_eq!($($arg)*); })<|fim▁hole|>
/// Short circuiting evaluation on Err
///
/// `libstd` contains a more general `try!` macro that uses `From<E>`.
#[macro_export]
macro_rules! try {
($e:expr) => ({
use $crate::result::Result::{Ok, Err};
match $e {
Ok(e) => e,
Err(e) => return Err(e),
}
})
}
/// Use the `format!` syntax to write data into a buffer of type `&mut Writer`.
/// See `std::fmt` for more information.
///
/// # Examples
///
/// ```
/// # #![allow(unused_must_use)]
/// use std::io::Write;
///
/// let mut w = Vec::new();
/// write!(&mut w, "test");
/// write!(&mut w, "formatted {}", "arguments");
/// ```
#[macro_export]
macro_rules! write {
($dst:expr, $($arg:tt)*) => ($dst.write_fmt(format_args!($($arg)*)))
}
/// Equivalent to the `write!` macro, except that a newline is appended after
/// the message is written.
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! writeln {
($dst:expr, $fmt:expr) => (
write!($dst, concat!($fmt, "\n"))
);
($dst:expr, $fmt:expr, $($arg:tt)*) => (
write!($dst, concat!($fmt, "\n"), $($arg)*)
);
}
/// A utility macro for indicating unreachable code.
///
/// This is useful any time that the compiler can't determine that some code is unreachable. For
/// example:
///
/// * Match arms with guard conditions.
/// * Loops that dynamically terminate.
/// * Iterators that dynamically terminate.
///
/// # Panics
///
/// This will always panic.
///
/// # Examples
///
/// Match arms:
///
/// ```
/// fn foo(x: Option<i32>) {
/// match x {
/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
/// Some(n) if n < 0 => println!("Some(Negative)"),
/// Some(_) => unreachable!(), // compile error if commented out
/// None => println!("None")
/// }
/// }
/// ```
///
/// Iterators:
///
/// ```
/// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3
/// for i in 0.. {
/// if 3*i < i { panic!("u32 overflow"); }
/// if x < 3*i { return i-1; }
/// }
/// unreachable!();
/// }
/// ```
#[macro_export]
#[unstable(feature = "core",
reason = "relationship with panic is unclear")]
macro_rules! unreachable {
() => ({
panic!("internal error: entered unreachable code")
});
($msg:expr) => ({
unreachable!("{}", $msg)
});
($fmt:expr, $($arg:tt)*) => ({
panic!(concat!("internal error: entered unreachable code: ", $fmt), $($arg)*)
});
}
/// A standardised placeholder for marking unfinished code. It panics with the
/// message `"not yet implemented"` when executed.
#[macro_export]
#[unstable(feature = "core",
reason = "relationship with panic is unclear")]
macro_rules! unimplemented {
() => (panic!("not yet implemented"))
}<|fim▁end|> | } |
<|file_name|>DefaultApiTest.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright 2017 Xoriant Corporation.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
/*
* Doctor Appointment
* Appointment
*
* OpenAPI spec version: 1.0.0
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package io.swagger.client.api;
import io.swagger.client.ApiException;
import io.swagger.client.model.Payload;
import org.junit.Test;
import org.junit.Ignore;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* API tests for DefaultApi
*/
@Ignore
public class DefaultApiTest {
private final DefaultApi api = new DefaultApi();
/**
* Post new Doctor info
*
* endpoint for posting a newly created Doctor entity to the server
*
* @throws ApiException
* if the Api call fails
*/
@Test
public void createDoctorTest() throws ApiException {
Payload payload = null;
api.createDoctor(payload);
<|fim▁hole|> }
}<|fim▁end|> |
// TODO: test validations
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
import collections
import difflib
import json
import os
import signal
import subprocess
from collections import OrderedDict, defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
from threading import Lock, RLock, Thread, Timer
from time import sleep, time
from urlparse import urlparse
import pytest
import zmq
from _pytest import runner
from functools32 import wraps
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from utils import at_exit, conf
from utils.appliance import IPAppliance, stack as appliance_stack
from utils.log import create_sublogger
from utils.net import random_port
from utils.path import conf_path, project_path
from utils.sprout import SproutClient, SproutException
from utils.wait import wait_for
_appliance_help = '''specify appliance URLs to use for distributed testing.
this option can be specified more than once, and must be specified at least two times'''
env_base_urls = conf.env.get('parallel_base_urls', [])
if env_base_urls:
conf.runtime['env']['base_url'] = env_base_urls[0]
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
# lock for protecting mutation of recv queue
recv_lock = Lock()
# lock for protecting zmq socket access
zmq_lock = Lock()
def pytest_addoption(parser):
group = parser.getgroup("cfme")
group._addoption('--appliance', dest='appliances', action='append',
default=env_base_urls, metavar='base_url', help=_appliance_help)
group._addoption('--use-sprout', dest='use_sprout', action='store_true',
default=False, help="Use Sprout for provisioning appliances.")
group._addoption('--sprout-appliances', dest='sprout_appliances', type=int,
default=1, help="How many Sprout appliances to use?.")
group._addoption('--sprout-timeout', dest='sprout_timeout', type=int,
default=60, help="How many minutes is the lease timeout.")
group._addoption('--sprout-provision-timeout', dest='sprout_provision_timeout', type=int,
default=60, help="How many minutes to wait for appliances provisioned.")
group._addoption(
'--sprout-group', dest='sprout_group', default=None, help="Which stream to use.")
group._addoption(
'--sprout-version', dest='sprout_version', default=None, help="Which version to use.")
group._addoption(
'--sprout-date', dest='sprout_date', default=None, help="Which date to use.")
group._addoption(
'--sprout-desc', dest='sprout_desc', default=None, help="Set description of the pool.")
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.addhooks(hooks)
@pytest.mark.hookwrapper
def pytest_configure(config):
# configures the parallel session, then fires pytest_parallel_configured
yield
if (config.option.appliances or (config.option.use_sprout and
config.option.sprout_appliances > 1)):
session = ParallelSession(config)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
config.hook.pytest_parallel_configured(parallel_session=session)
else:
config.hook.pytest_parallel_configured(parallel_session=None)
def dump_pool_info(printf, pool_data):
printf("Fulfilled: {}".format(pool_data["fulfilled"]))
printf("Progress: {}%".format(pool_data["progress"]))
printf("Appliances:")
for appliance in pool_data["appliances"]:
name = appliance.pop("name")
printf("\t{}:".format(name))
for key in sorted(appliance.keys()):
printf("\t\t{}: {}".format(key, appliance[key]))
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
class SlaveDict(dict):
"""A normal dict, but with a special "add" method that autogenerated slaveids"""
# intentionally in the class scope so all instances share the slave counter
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
lock = RLock()
_instances = []
def __init__(self, *args, **kwargs):
super(SlaveDict, self).__init__(*args, **kwargs)
with self.lock:
SlaveDict._instances.append(self)
# autoincrement the slaveids when something is added
def add(self, value):
self[next(self.slaveid_generator)] = value
# when removing a slave with this method, it is removed from all instances
# use the normal `del` behavior to only remove from one instances
def remove(self, key):
with self.lock:
for instance in self._instances:
if key in instance:
del(instance[key])
# helper to wrap dict method wrapper to generate methods protected by a lock
# like a decorator, but takes a method name instead of wrapping
def _lock_wrap(method_name):
wrapped = getattr(dict, method_name)
@wraps(wrapped)
def wrapper(self, *args, **kwargs):
with self.lock:
return wrapped(self, *args, **kwargs)
return wrapper
# all mutating methods should be wrapped; if one is missing here that isn't intentional
__setitem__ = _lock_wrap('__setitem__')
__delitem__ = _lock_wrap('__delitem__')
# destroy now-useless lock wrapper function
del(_lock_wrap)
class ParallelSession(object):
def __init__(self, config):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = OrderedDict()
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = SlaveDict()
self.slave_urls = SlaveDict()
self.slave_tests = defaultdict(set)
self.test_groups = self._test_item_generator()
self._pool = []
self.pool_lock = Lock()
from utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.slave_allocation = collections.defaultdict(list)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.sprout_client = None
self.sprout_timer = None
self.sprout_pool = None
if not self.config.option.use_sprout:
# Without Sprout
self.appliances = self.config.option.appliances
else:
# Using sprout
self.sprout_client = SproutClient.from_config()
self.terminal.write(
"Requesting {} appliances from Sprout at {}\n".format(
self.config.option.sprout_appliances, self.sprout_client.api_entry))
pool_id = self.sprout_client.request_appliances(
self.config.option.sprout_group,
count=self.config.option.sprout_appliances,
version=self.config.option.sprout_version,
date=self.config.option.sprout_date,
lease_time=self.config.option.sprout_timeout
)
self.terminal.write("Pool {}. Waiting for fulfillment ...\n".format(pool_id))
self.sprout_pool = pool_id
at_exit(self.sprout_client.destroy_pool, self.sprout_pool)
if self.config.option.sprout_desc is not None:
self.sprout_client.set_pool_description(
pool_id, str(self.config.option.sprout_desc))
try:
result = wait_for(
lambda: self.sprout_client.request_check(self.sprout_pool)["fulfilled"],
num_sec=self.config.option.sprout_provision_timeout * 60,
delay=5,
message="requesting appliances was fulfilled"
)
except:
pool = self.sprout_client.request_check(self.sprout_pool)
dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)), pool)
self.terminal.write("Destroying the pool on error.\n")
self.sprout_client.destroy_pool(pool_id)
raise
else:
pool = self.sprout_client.request_check(self.sprout_pool)
dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)), pool)
self.terminal.write("Provisioning took {0:.1f} seconds\n".format(result.duration))
request = self.sprout_client.request_check(self.sprout_pool)
self.appliances = []
# Push an appliance to the stack to have proper reference for test collection
# FIXME: this is a bad hack based on the need for controll of collection partitioning
appliance_stack.push(
IPAppliance(address=request["appliances"][0]["ip_address"]))
self.terminal.write("Appliances were provided:\n")
for appliance in request["appliances"]:
url = "https://{}/".format(appliance["ip_address"])
self.appliances.append(url)
self.terminal.write("- {} is {}\n".format(url, appliance['name']))
map(lambda a: "https://{}/".format(a["ip_address"]), request["appliances"])
self._reset_timer()
# Set the base_url for collection purposes on the first appliance
conf.runtime["env"]["base_url"] = self.appliances[0]
# Retrieve and print the template_name for Jenkins to pick up
template_name = request["appliances"][0]["template_name"]
conf.runtime["cfme_data"]["basic_info"]["appliance_template"] = template_name
self.terminal.write("appliance_template=\"{}\";\n".format(template_name))
with project_path.join('.appliance_template').open('w') as template_file:
template_file.write('export appliance_template="{}"'.format(template_name))
self.terminal.write("Parallelized Sprout setup finished.\n")
self.slave_appliances_data = {}
for appliance in request["appliances"]:
self.slave_appliances_data[appliance["ip_address"]] = (
appliance["template_name"], appliance["provider"]
)
# set up the ipc socket
zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind('{}'.format(zmq_endpoint))
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': self.config.option.__dict__,
'zmq_endpoint': zmq_endpoint,
'sprout': self.sprout_client is not None and self.sprout_pool is not None,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.runtime['slave_config']['options']['use_sprout'] = False # Slaves don't use sprout
conf.save('slave_config')
for i, base_url in enumerate(self.appliances):
self.slave_urls.add(base_url)
for slave in sorted(self.slave_urls):
self.print_message("using appliance {}".format(self.slave_urls[slave]),
slave, green=True)
# Start the recv queue
self._recv_queue = deque()
recv_queuer = Thread(target=_recv_queue, args=(self,))
recv_queuer.daemon = True
recv_queuer.start()
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slaveid, slave in self.slaves.items():
returncode = slave.poll()
if returncode:
del(self.slaves[slaveid])
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slaveid)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slaveid, returncode)
if self.slave_tests[slaveid]:
num_failed_tests = len(self.slave_tests[slaveid])
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
with SlaveDict.lock:
self.failed_slave_test_groups.append(self.slave_tests.pop(slaveid))
self.print_message(msg, purple=True)
# Make sure we have a slave for every slave_url
for slaveid in list(self.slave_urls):
if slaveid not in self.slaves:
self._start_slave(slaveid)
# If a slave has lost its base_url for any reason, kill that slave
# Losing a base_url means the associated appliance died :(
for slaveid in list(self.slaves):
if slaveid not in self.slave_urls:
self.print_message("{}'s appliance has died, deactivating slave".format(slaveid))
self.interrupt(slaveid)
def _start_slave(self, slaveid):
devnull = open(os.devnull, 'w')
try:
base_url = self.slave_urls[slaveid]
except KeyError:
# race condition: slave was removed from slave_urls when something else decided to
# start it; in this case slave_urls wins and the slave should not start
return
# worker output redirected to null; useful info comes via messages and logs
slave = subprocess.Popen(
['python', remote.__file__, slaveid, base_url],
stdout=devnull, stderr=devnull,
)
self.slaves[slaveid] = slave
self.slave_spawn_count += 1
at_exit(slave.kill)
def _reset_timer(self):
if not (self.sprout_client is not None and self.sprout_pool is not None):
if self.sprout_timer:
self.sprout_timer.cancel() # Cancel it anyway
self.terminal.write("Sprout timer cancelled\n")
return
if self.sprout_timer:
self.sprout_timer.cancel()
self.sprout_timer = Timer(
(self.config.option.sprout_timeout / 2) * 60,
self.sprout_ping_pool)
self.sprout_timer.daemon = True
self.sprout_timer.start()
def sprout_ping_pool(self):
try:
self.sprout_client.prolong_appliance_pool_lease(self.sprout_pool)
except SproutException as e:
self.terminal.write(
"Pool {} does not exist any more, disabling the timer.\n".format(self.sprout_pool))
self.terminal.write(
"This can happen before the tests are shut down "
"(last deleted appliance deleted the pool")
self.terminal.write("> The exception was: {}".format(str(e)))
self.sprout_pool = None # Will disable the timer in next reset call.
self._reset_timer()
def send(self, slaveid, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
with zmq_lock:
self.sock.send_multipart([slaveid, '', event_json])
def recv(self):
"""Return any unproccesed events from the recv queue"""
try:
with recv_lock:
return self._recv_queue.popleft()
except IndexError:
return None, None, None
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
slaveid: Can be a slaveid or any string, e.g. ``'master'`` is also useful here.
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix('({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slaveid, event_name):
"""Acknowledge a slave's message"""
self.send(slaveid, 'ack {}'.format(event_name))
def monitor_shutdown(self, slaveid, respawn=False):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t, args=(slaveid, respawn))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, respawn):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
try:
slave = self.slaves[slaveid]
except KeyError:
self.log.warning('Slave was missing when trying to monitor shutdown')
return
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
# time spent waiting
def poll_walltime():
return time() - start_time
# start the poll
while poll_walltime() < poll_num_sec:
polls += 1
ec = slave.poll()
if ec is None:
# process still running, report if needed and continue polling
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - poll_walltime())
self.print_message('{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
sleep(poll_sleep_time)
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
slave.kill()
if not respawn and slaveid in self.slave_urls:
self.slave_urls.remove(slaveid)
elif slaveid in self.slaves:
del(self.slaves[slaveid])
def interrupt(self, slaveid, **kwargs):
"""Nicely ask a slave to terminate"""
slave = self.slaves.pop(slaveid, None)
if slave and slave.poll() is None:
slave.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slaveid, **kwargs)
def kill(self, slaveid, **kwargs):
"""Rudely kill a slave"""
slave = self.slaves.pop(slaveid, None)
if slave and slave.poll() is None:
slave.kill()
self.monitor_shutdown(slaveid, **kwargs)
def send_tests(self, slaveid):
"""Send a slave a group of tests"""
try:
with SlaveDict.lock:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
try:
tests = self.get(slaveid)
# To return to the old parallelizer distributor, remove the line above
# and replace it with the line below.
# tests = self.test_groups.next()
except StopIteration:
tests = []
self.send(slaveid, tests)
self.slave_tests[slaveid] |= set(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slaveid, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
for item in self.session.items:
self.collection[item.nodeid] = item
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
self._slave_audit()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slaveid, event_data, event_name = self.recv()
if event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slaveid))
diff_err = report_collection_diff(slaveid, self.collection.keys(),
slave_collection)
if diff_err:
self.print_message('collection differs, respawning', slaveid,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slaveid)
self._start_slave(slaveid)
else:
self.ack(slaveid, event_name)
elif event_name == 'need_tests':
self.send_tests(slaveid)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slaveid, event_name)
self.trdist.runtest_logstart(slaveid,
event_data['nodeid'], event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slaveid, event_name)
report = unserialize_report(event_data['report'])
if (report.when in ('call', 'teardown')
and report.nodeid in self.slave_tests[slaveid]):
self.slave_tests[slaveid].remove(report.nodeid)
self.trdist.runtest_logreport(slaveid, report)
elif event_name == 'internalerror':
self.ack(slaveid, event_name)
self.print_message(event_data['message'], slaveid, purple=True)
with SlaveDict.lock:
if slaveid in self.slaves:
# If this slave hasn't already quit, kill it with fire (signal 9)
self.slaves[slaveid].send_signal(9)
elif event_name == 'shutdown':
self.ack(slaveid, event_name)
self.monitor_shutdown(slaveid)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message('too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
module_items_cache = []
collection_ids = self.collection.keys()
collection_len = len(collection_ids)
for i, item_id in enumerate(collection_ids):
# everything before the first '::' is the module fspath
i_fspath = item_id.split('::')[0]
try:
nextitem_id = collection_ids[i + 1]
ni_fspath = nextitem_id.split('::')[0]
except IndexError:
nextitem_id = ni_fspath = None
module_items_cache.append(item_id)
if i_fspath == ni_fspath:
# This item and the next item are in the same module
# loop to the next item
continue
else:
# This item and the next item are in different modules,
# yield the indices if any items were generated
if not module_items_cache:
continue
for tests in self._modscope_id_splitter(module_items_cache):
tests_len = len(tests)
sent_tests += tests_len
self.log.info('%d tests remaining to send'
% (collection_len - sent_tests))
if tests:
yield tests
# Then clear the cache in-place
module_items_cache[:] = []
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
try:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'<|fim▁hole|> parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if id is None:
id = 'no params'
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
with self.pool_lock:
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
for test in test_group:
if '[' in test:
found_prov = []
for pv in self.provs:
if pv in test:
found_prov.append(pv)
break
provs = list(set(found_prov).intersection(self.provs))
if provs:
self.used_prov = self.used_prov.union(set(provs))
if self.used_prov:
self.ratio = float(len(self.slaves)) / float(len(self.used_prov))
else:
self.ratio = 0.0
if not self._pool:
raise StopIteration
current_allocate = self.slave_allocation.get(slave, None)
# num_provs_list = [len(v) for k, v in self.slave_allocation.iteritems()]
# average_num_provs = sum(num_provs_list) / float(len(self.slaves))
appliance_num_limit = 2
for test_group in self._pool:
for test in test_group:
# If the test is parametrized...
if '[' in test:
found_prov = []
for pv in self.provs:
if pv in test:
found_prov.append(pv)
break
# The line below can probably be removed now, since we compare
# providers in the loop above with self.provs, which is a list
# of all providers.
provs = list(set(found_prov).intersection(self.provs))
# If the parametrization contains a provider...
if provs:
prov = provs[0]
# num_slave_with_prov = len([sl for sl, provs_list
# in self.slave_allocation.iteritems()
# if prov in provs_list])
# If this slave/appliance already has providers then...
if current_allocate:
# If the slave has _our_ provider
if prov in current_allocate:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
# If the slave doesn't have _our_ provider
else:
# Check to see how many slaves there are with this provider
if len(self.slave_allocation[slave]) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
self.slave_allocation[slave].append(prov)
self._pool.remove(test_group)
return test_group
# If this slave doesn't have any providers...
else:
# Adding provider to slave
self.slave_allocation[slave].append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
self._pool.remove(test_group)
return test_group
else:
# No params, so no need to think about providers
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
for test in test_group:
# If the test is parametrized...
if '[' in test:
found_prov = []
for pv in self.provs:
if pv in test:
found_prov.append(pv)
break
# The line below can probably be removed now, since we compare
# providers in the loop above with self.provs, which is a list
# of all providers.
provs = list(set(found_prov).intersection(self.provs))
# If the parametrization contains a provider...
if provs:
# Already too many slaves with provider
app_url = self.slave_urls[slave]
app_ip = urlparse(app_url).netloc
app = IPAppliance(app_ip)
self.print_message('cleansing appliance', slave,
purple=True)
try:
app.delete_all_providers()
except:
self.print_message('cloud not cleanse', slave,
red=True)
self.slave_allocation[slave] = [prov]
self._pool.remove(test_group)
return test_group
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
def _recv_queue(session):
# poll the zmq socket, populate the recv queue deque with responses
while not session.session_finished:
try:
with zmq_lock:
slaveid, empty, event_json = session.sock.recv_multipart(flags=zmq.NOBLOCK)
except zmq.Again:
continue
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if event_name == 'message':
message = event_data.pop('message')
# messages are special, handle them immediately
session.print_message(message, slaveid, **event_data)
session.ack(slaveid, event_name)
else:
with recv_lock:
session._recv_queue.append((slaveid, event_data, event_name))
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)<|fim▁end|> | parametrized_id = item.split('[')[1].rsplit(']')[0]
except IndexError:
# splits failed, item has no parametrized id
parametrized_id = None |
<|file_name|>TestObject.java<|end_file_name|><|fim▁begin|>package com.couchbase.lite.testapp.ektorp.tests;
import org.ektorp.support.OpenCouchDbDocument;
import java.util.List;
import java.util.Set;
@SuppressWarnings("serial")
public class TestObject extends OpenCouchDbDocument {
private Integer foo;
private Boolean bar;
private String baz;
private String status;
private String key;
private List<String> stuff;
private Set<String> stuffSet;
public Set<String> getStuffSet() {
return stuffSet;
}
public void setStuffSet(Set<String> stuffSet) {
this.stuffSet = stuffSet;
}
public List<String> getStuff() {
return stuff;
}
public void setStuff(List<String> stuff) {
this.stuff = stuff;
}
public Integer getFoo() {
return foo;
}
public void setFoo(Integer foo) {
this.foo = foo;
}
public Boolean getBar() {
return bar;
}
public void setBar(Boolean bar) {
this.bar = bar;
}
public String getBaz() {
return baz;
}
public void setBaz(String baz) {
this.baz = baz;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public TestObject() {
}
public TestObject(Integer foo, Boolean bar, String baz) {
this.foo = foo;
this.bar = bar;
this.baz = baz;
this.status = null;
}
public TestObject(Integer foo, Boolean bar, String baz, String status) {
this.foo = foo;
this.bar = bar;
this.baz = baz;
this.status = status;
}
public TestObject(String id, String key) {
this.setId(id);
this.key = key;
}
@Override
public boolean equals(Object o) {
if(o instanceof TestObject) {
TestObject other = (TestObject)o;
if(getId() != null && other.getId() != null && getId().equals(other.getId())) {
return true;
}
}<|fim▁hole|>
}<|fim▁end|> | return false;
} |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from .fixtures import * # noqa, pylint: disable=wildcard-import |
<|file_name|>term_x.go<|end_file_name|><|fim▁begin|>// +build linux darwin freebsd netbsd openbsd solaris
package clif
<|fim▁hole|>import (
"os"
"runtime"
"syscall"
"unsafe"
)
func init() {
TermWidthCall = func() (int, error) {
w := new(termWindow)
tio := syscall.TIOCGWINSZ
if runtime.GOOS == "darwin" {
tio = TERM_TIOCGWINSZ_OSX
}
res, _, err := syscall.Syscall(sys_ioctl,
uintptr(syscall.Stdin),
uintptr(tio),
uintptr(unsafe.Pointer(w)),
)
if err != 0 || int(res) == -1 {
return TERM_DEFAULT_WIDTH, os.NewSyscallError("GetWinsize", err)
}
return int(w.Col) - 4, nil
}
TermWidthCurrent, _ = TermWidthCall()
}<|fim▁end|> | |
<|file_name|>fancy-select.component.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
@Component({
selector: 'cs-fancy-select',
templateUrl: 'fancy-select.component.html',
styleUrls: ['fancy-select.component.scss'],<|fim▁hole|><|fim▁end|> | })
export class FancySelectComponent {} |
<|file_name|>EtcdJsonBundle.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2015 meltmedia ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.meltmedia.dropwizard.etcd.json;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.Function;
import java.util.function.Supplier;
import com.codahale.metrics.MetricRegistry;
import mousio.etcd4j.EtcdClient;
import io.dropwizard.Configuration;
import io.dropwizard.ConfiguredBundle;
import io.dropwizard.setup.Bootstrap;
import io.dropwizard.setup.Environment;
public class EtcdJsonBundle<C extends Configuration> implements ConfiguredBundle<C> {
public static class Builder<C extends Configuration> {
private Supplier<EtcdClient> client;
private Supplier<ScheduledExecutorService> executor;
private Function<C, String> directoryAccessor;
public Builder<C> withClient(Supplier<EtcdClient> client) {
this.client = client;
return this;
}
public Builder<C> withExecutor(Supplier<ScheduledExecutorService> executor) {
this.executor = executor;
return this;<|fim▁hole|> public Builder<C> withDirectory(Function<C, String> directoryAccessor) {
this.directoryAccessor = directoryAccessor;
return this;
}
public EtcdJsonBundle<C> build() {
return new EtcdJsonBundle<C>(client, executor, directoryAccessor);
}
}
public static <C extends Configuration> Builder<C> builder() {
return new Builder<C>();
}
Supplier<EtcdClient> clientSupplier;
EtcdJson factory;
private Supplier<ScheduledExecutorService> executor;
private Function<C, String> directoryAccessor;
public EtcdJsonBundle(Supplier<EtcdClient> client, Supplier<ScheduledExecutorService> executor,
Function<C, String> directoryAccessor) {
this.clientSupplier = client;
this.executor = executor;
this.directoryAccessor = directoryAccessor;
}
@Override
public void initialize(Bootstrap<?> bootstrap) {
}
@Override
public void run(C configuration, Environment environment) throws Exception {
factory =
EtcdJson.builder().withClient(clientSupplier).withExecutor(executor.get())
.withBaseDirectory(directoryAccessor.apply(configuration))
.withMapper(environment.getObjectMapper())
.withMetricRegistry(environment.metrics()).build();
environment.lifecycle().manage(new EtcdJsonManager(factory));
environment.healthChecks().register("etcd-watch", new WatchServiceHealthCheck(factory.getWatchService()));
}
public EtcdJson getFactory() {
return this.factory;
}
}<|fim▁end|> | }
|
<|file_name|>42child.rs<|end_file_name|><|fim▁begin|>use std::process::Command;<|fim▁hole|>// shell
static FILE_CMD :& 'static str = "file";
fn main() {
let output = Command::new(FILE_CMD)
.arg("--version1")
.output().unwrap_or_else( |e|
panic!("failed to run cmd: {} with {}", FILE_CMD, e)
);
if output.status.success() {
println!("stdout: {}", String::from_utf8_lossy(&output.stdout));
} else {
println!("stderr: {}", String::from_utf8_lossy(&output.stderr));
}
}<|fim▁end|> | |
<|file_name|>issue-90113.rs<|end_file_name|><|fim▁begin|>mod list {
pub use self::List::Cons;
pub enum List<T> {
Cons(T, Box<List<T>>),
}
}
mod alias {
use crate::list::List;
pub type Foo = List<String>;
}
fn foo(l: crate::alias::Foo) {
match l {
Cons(..) => {} //~ ERROR: cannot find tuple struct or tuple variant `Cons` in this scope<|fim▁hole|>
fn main() {}<|fim▁end|> | }
} |
<|file_name|>demo2.py<|end_file_name|><|fim▁begin|># Import time (for delay) library (for SmartHome api) and GPIO (for raspberry pi gpio)
from library import SmartHomeApi
import RPi.GPIO as GPIO
import time
from datetime import datetime
# 7 -> LED
# Create the client with pre-existing credentials
api = SmartHomeApi("http://localhost:5000/api/0.1", id=10, api_key="api_eMxSb7n6G10Svojn3PlU5P6srMaDrFxmKAnWvnW6UyzmBG")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
last_status = "UNKNOWN"
while True:
preferences = api.GetUserPrefences(2)['results']
print(preferences)
preference = (item for item in preferences if item["key"] == "bedtime").next()
if not preference:
print("Could not fin 'bedtime' preference!")
api.AddPreference(2, "bedtime", "00:00")
print("Created bedtime preference! Please set it to the correct value in your dashboard")
else:
bedtime = preference['value']
if not bedtime:
print("Unexpected error occured!")
else:<|fim▁hole|> time_str = datetime.now().strftime('%H:%M')
print("time: {}".format(time_str))
bedtime_dt = datetime.strptime(bedtime, "%H:%M")
time_hm = datetime.strptime(time_str, "%H:%M")
if time_hm >= bedtime_dt:
print("Going to bed! Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.LOW)
else:
print("Not yet time for bed. Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.HIGH)
time.sleep(1)<|fim▁end|> | print(bedtime) |
<|file_name|>async_client.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Optional,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from google.cloud.bigquery_storage_v1beta2.types import table
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport
from .client import BigQueryWriteClient
class BigQueryWriteAsyncClient:
"""BigQuery Write API.
The Write API can be used to write data to BigQuery.
"""
_client: BigQueryWriteClient
DEFAULT_ENDPOINT = BigQueryWriteClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigQueryWriteClient.DEFAULT_MTLS_ENDPOINT
table_path = staticmethod(BigQueryWriteClient.table_path)
parse_table_path = staticmethod(BigQueryWriteClient.parse_table_path)
write_stream_path = staticmethod(BigQueryWriteClient.write_stream_path)
parse_write_stream_path = staticmethod(BigQueryWriteClient.parse_write_stream_path)
common_billing_account_path = staticmethod(
BigQueryWriteClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigQueryWriteClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigQueryWriteClient.common_folder_path)
parse_common_folder_path = staticmethod(
BigQueryWriteClient.parse_common_folder_path
)
common_organization_path = staticmethod(
BigQueryWriteClient.common_organization_path
)
parse_common_organization_path = staticmethod(
BigQueryWriteClient.parse_common_organization_path
)
common_project_path = staticmethod(BigQueryWriteClient.common_project_path)
parse_common_project_path = staticmethod(
BigQueryWriteClient.parse_common_project_path
)
common_location_path = staticmethod(BigQueryWriteClient.common_location_path)
parse_common_location_path = staticmethod(
BigQueryWriteClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_info.__func__(BigQueryWriteAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_file.__func__(BigQueryWriteAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return BigQueryWriteClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> BigQueryWriteTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryWriteTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(BigQueryWriteClient).get_transport_class, type(BigQueryWriteClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query write client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.BigQueryWriteTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = BigQueryWriteClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_write_stream(
self,
request: Union[storage.CreateWriteStreamRequest, dict] = None,
*,
parent: str = None,
write_stream: stream.WriteStream = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Creates a write stream to the given table. Additionally, every
table has a special COMMITTED stream named '_default' to which
data can be written. This stream doesn't need to be created
using CreateWriteStream. It is a stream that can be used
simultaneously by any number of clients. Data written to this
stream is considered committed as soon as an acknowledgement is
received.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_create_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.CreateWriteStreamRequest(
parent="parent_value",
)
# Make the request
response = client.create_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]):
The request object. Request message for
`CreateWriteStream`.
parent (:class:`str`):
Required. Reference to the table to which the stream
belongs, in the format of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
write_stream (:class:`google.cloud.bigquery_storage_v1beta2.types.WriteStream`):
Required. Stream to be created.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.CreateWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def append_rows(
self,
requests: AsyncIterator[storage.AppendRowsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]:
r"""Appends data to the given stream.
If ``offset`` is specified, the ``offset`` is checked against
the end of stream. The server returns ``OUT_OF_RANGE`` in
``AppendRowsResponse`` if an attempt is made to append to an
offset beyond the current end of the stream or
``ALREADY_EXISTS`` if user provids an ``offset`` that has
already been written to. User can retry with adjusted offset
within the same RPC stream. If ``offset`` is not specified,
append happens at the end of the stream.
The response contains the offset at which the append happened.
Responses are received in the same order in which requests are
sent. There will be one response for each successful request. If
the ``offset`` is not set in response, it means append didn't
happen due to some errors. If one request fails, all the
subsequent requests will also fail until a success request is
made again.
If the stream is of ``PENDING`` type, data will only be
available for read operations after the stream is committed.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_append_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1beta2.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
Args:
requests (AsyncIterator[`google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest`]):
The request object AsyncIterator. Request message for `AppendRows`.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]:
Response message for AppendRows.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.append_rows,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=86400.0,
),
default_timeout=86400.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_write_stream(
self,
request: Union[storage.GetWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Gets a write stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_get_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.GetWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.get_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]):
The request object. Request message for
`GetWriteStreamRequest`.
name (:class:`str`):
Required. Name of the stream to get, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
<|fim▁hole|> request = storage.GetWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def finalize_write_stream(
self,
request: Union[storage.FinalizeWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FinalizeWriteStreamResponse:
r"""Finalize a write stream so that no new data can be appended to
the stream. Finalize is not supported on the '_default' stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_finalize_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.FinalizeWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.finalize_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]):
The request object. Request message for invoking
`FinalizeWriteStream`.
name (:class:`str`):
Required. Name of the stream to finalize, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse:
Response message for FinalizeWriteStream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.FinalizeWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.finalize_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def batch_commit_write_streams(
self,
request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.BatchCommitWriteStreamsResponse:
r"""Atomically commits a group of ``PENDING`` streams that belong to
the same ``parent`` table. Streams must be finalized before
commit and cannot be committed multiple times. Once a stream is
committed, data in the stream becomes available for read
operations.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_batch_commit_write_streams():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.BatchCommitWriteStreamsRequest(
parent="parent_value",
write_streams=['write_streams_value_1', 'write_streams_value_2'],
)
# Make the request
response = client.batch_commit_write_streams(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]):
The request object. Request message for
`BatchCommitWriteStreams`.
parent (:class:`str`):
Required. Parent table that all the streams should
belong to, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse:
Response message for BatchCommitWriteStreams.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.BatchCommitWriteStreamsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_commit_write_streams,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def flush_rows(
self,
request: Union[storage.FlushRowsRequest, dict] = None,
*,
write_stream: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FlushRowsResponse:
r"""Flushes rows to a BUFFERED stream. If users are appending rows
to BUFFERED stream, flush operation is required in order for the
rows to become available for reading. A Flush operation flushes
up to any previously flushed offset in a BUFFERED stream, to the
offset specified in the request. Flush is not supported on the
\_default stream, since it is not BUFFERED.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_flush_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.FlushRowsRequest(
write_stream="write_stream_value",
)
# Make the request
response = client.flush_rows(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]):
The request object. Request message for `FlushRows`.
write_stream (:class:`str`):
Required. The stream that is the
target of the flush operation.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse:
Respond message for FlushRows.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.FlushRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.flush_rows,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("write_stream", request.write_stream),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-storage",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("BigQueryWriteAsyncClient",)<|fim▁end|> | |
<|file_name|>pygoogle.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Google AJAX Search Module
http://code.google.com/apis/ajaxsearch/documentation/reference.html
Needs Python 2.6 or later
"""
try:
import json
except ImportError as e:
import simplejson as json
except ImportError as e:
print(e)
exit()
import sys
import urllib.request, urllib.parse, urllib.error
import logging
import argparse
__author__ = "Kiran Bandla"
__version__ = "0.2"
URL = 'http://ajax.googleapis.com/ajax/services/search/web?'
#Web Search Specific Arguments
#http://code.google.com/apis/ajaxsearch/documentation/reference.html#_fonje_web
#SAFE,FILTER
"""
SAFE
This optional argument supplies the search safety level which may be one of:
* safe=active - enables the highest level of safe search filtering
* safe=moderate - enables moderate safe search filtering (default)
* safe=off - disables safe search filtering
"""<|fim▁hole|>SAFE_ACTIVE = "active"
SAFE_MODERATE = "moderate"
SAFE_OFF = "off"
"""
FILTER
This optional argument controls turning on or off the duplicate content filter:
* filter=0 - Turns off the duplicate content filter
* filter=1 - Turns on the duplicate content filter (default)
"""
FILTER_OFF = 0
FILTER_ON = 1
#Standard URL Arguments
#http://code.google.com/apis/ajaxsearch/documentation/reference.html#_fonje_args
"""
RSZ
This optional argument supplies the number of results that the application would like to recieve.
A value of small indicates a small result set size or 4 results.
A value of large indicates a large result set or 8 results. If this argument is not supplied, a value of small is assumed.
"""
RSZ_SMALL = "small"
RSZ_LARGE = "large"
"""
HL
This optional argument supplies the host language of the application making the request.
If this argument is not present then the system will choose a value based on the value of the Accept-Language http header.
If this header is not present, a value of en is assumed.
"""
class pygoogle:
def __init__(self,query,pages=10,hl='en',log_level=logging.INFO):
self.pages = pages #Number of pages. default 10
self.query = query
self.filter = FILTER_ON #Controls turning on or off the duplicate content filter. On = 1.
self.rsz = RSZ_LARGE #Results per page. small = 4 /large = 8
self.safe = SAFE_OFF #SafeBrowsing - active/moderate/off
self.hl = hl #Defaults to English (en)
self.__setup_logging(level=log_level)
def __setup_logging(self, level):
logger = logging.getLogger('pygoogle')
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(module)s %(levelname)s %(funcName)s| %(message)s'))
logger.addHandler(handler)
self.logger = logger
def __search__(self,print_results=False):
'''
returns list of results if successful or False otherwise
'''
results = []
for page in range(0,self.pages):
rsz = 8
if self.rsz == RSZ_SMALL:
rsz = 4
args = {'q' : self.query,
'v' : '1.0',
'start' : page*rsz,
'rsz': self.rsz,
'safe' : self.safe,
'filter' : self.filter,
'hl' : self.hl
}
self.logger.debug('search: "%s" page# : %s'%(self.query, page))
q = urllib.parse.urlencode(args)
search_results = urllib.request.urlopen(URL+q)
data = json.loads(search_results.read().decode("utf-8"))
if 'responseStatus' not in data:
self.logger.error('response does not have a responseStatus key')
continue
if data.get('responseStatus') != 200:
self.logger.debug('responseStatus is not 200')
self.logger.error('responseDetails : %s'%(data.get('responseDetails', None)))
continue
if print_results:
if 'responseData' in data and 'results' in data['responseData']:
for result in data['responseData']['results']:
if result:
print('[%s]'%(urllib.parse.unquote(result['titleNoFormatting'])))
print(result['content'].strip("<b>...</b>").replace("<b>",'').replace("</b>",'').replace("'","'").strip())
print(urllib.parse.unquote(result['unescapedUrl'])+'\n')
else:
# no responseData key was found in 'data'
self.logger.error('no responseData key found in response. very unusal')
results.append(data)
return results
def search(self):
"""Returns a dict of Title/URLs"""
results = {}
search_results = self.__search__()
if not search_results:
self.logger.info('No results returned')
return results
for data in search_results:
if 'responseData' in data and 'results' in data['responseData']:
for result in data['responseData']['results']:
if result and 'titleNoFormatting' in result:
title = urllib.parse.unquote(result['titleNoFormatting'])
results[title] = urllib.parse.unquote(result['unescapedUrl'])
else:
self.logger.error('no responseData key found in response')
self.logger.error(data)
return results
def search_page_wise(self):
"""Returns a dict of page-wise urls"""
results = {}
for page in range(0,self.pages):
args = {'q' : self.query,
'v' : '1.0',
'start' : page,
'rsz': RSZ_LARGE,
'safe' : SAFE_OFF,
'filter' : FILTER_ON,
}
q = urllib.parse.urlencode(args)
search_results = urllib.request.urlopen(URL+q)
data = json.loads(search_results.read())
urls = []
if 'responseData' in data and 'results' in data['responseData']:
for result in data['responseData']['results']:
if result and 'unescapedUrl' in result:
url = urllib.parse.unquote(result['unescapedUrl'])
urls.append(url)
else:
self.logger.error('no responseData key found in response')
results[page] = urls
return results
def get_urls(self):
"""Returns list of result URLs"""
results = []
search_results = self.__search__()
if not search_results:
self.logger.info('No results returned')
return results
for data in search_results:
if data and 'responseData' in data and data['responseData']['results']:
for result in data['responseData']['results']:
if result:
results.append(urllib.parse.unquote(result['unescapedUrl']))
return results
def get_result_count(self):
"""Returns the number of results"""
temp = self.pages
self.pages = 1
result_count = 0
search_results = self.__search__()
if not search_results:
return 0
try:
result_count = search_results[0]
if not isinstance(result_count, dict):
return 0
result_count = result_count.get('responseData', None)
if result_count:
if 'cursor' in result_count and 'estimatedResultCount' in result_count['cursor']:
return result_count['cursor']['estimatedResultCount']
return 0
except Exception as e:
self.logger.error(e)
finally:
self.pages = temp
return result_count
def display_results(self):
"""Prints results (for command line)"""
self.__search__(True)
def main():
parser = argparse.ArgumentParser(description='A simple Google search module for Python')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Verbose mode')
parser.add_argument('-p', '--pages', dest='pages', action='store', default=1, help='Number of pages to return. Max 10')
parser.add_argument('-hl', '--language', dest='language', action='store', default='en', help="language. default is 'en'")
parser.add_argument('query', nargs='*', default=None)
args = parser.parse_args()
query = ' '.join(args.query)
log_level = logging.INFO
if args.verbose:
log_level = logging.DEBUG
if not query:
parser.print_help()
exit()
search = pygoogle( log_level=log_level, query=query, pages=args.pages, hl=args.language)
search.display_results()
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>h2spec.go<|end_file_name|><|fim▁begin|>package main
import (
"crypto/tls"
"flag"
"fmt"
"github.com/summerwind/h2spec"
"os"
)
func main() {<|fim▁hole|> host := flag.String("h", "127.0.0.1", "Target host")
useTls := flag.Bool("t", false, "Connect over TLS")
insecureSkipVerify := flag.Bool("k", false, "Don't verify server's certificate")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [OPTIONS]\n\n", os.Args[0])
fmt.Println("Options:")
fmt.Println(" -p: Target port. (Default: 80)")
fmt.Println(" -h: Target host. (Default: 127.0.0.1)")
fmt.Println(" -t: Connect over TLS. (Default: false)")
fmt.Println(" -k: Don't verify server's certificate. (Default: false)")
fmt.Println(" --help: Display this help and exit.")
os.Exit(1)
}
flag.Parse()
var ctx h2spec.Context
ctx.Port = *port
ctx.Host = *host
ctx.Tls = *useTls
ctx.TlsConfig = &tls.Config{
InsecureSkipVerify: *insecureSkipVerify,
}
h2spec.Run(&ctx)
}<|fim▁end|> | port := flag.Int("p", 80, "Target port") |
<|file_name|>root_variables.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use fnv::{FnvHashMap, FnvHashSet};
use graphql_ir::{
FragmentDefinition, FragmentSpread, NamedItem, OperationDefinition, Program, Value, Variable,
Visitor,
};
use interner::StringKey;
use std::iter::FromIterator;
type VariableMap = FnvHashMap<StringKey, Variable>;
type Visited = FnvHashMap<StringKey, VariableMap>;
pub struct InferVariablesVisitor<'s> {
/// Cache fragments as they are transformed to avoid duplicate processing.
/// Because @argument values don't matter (only variable names/types),
/// each reachable fragment only has to be checked once.
visited_fragments: Visited,
program: &'s Program<'s>,
}
impl<'s> InferVariablesVisitor<'s> {
pub fn new(program: &'s Program<'s>) -> Self {
Self {
visited_fragments: Default::default(),
program,
}
}
/// Determine the set of root variables set of root variables that are transitively
/// referenced by each fragment, ie the union of all root variables used in the
/// fragment and any fragments it transitively spreads.
pub fn infer_operation_variables(&mut self, operation: &OperationDefinition) -> VariableMap {
let mut visitor = VaraiblesVisitor::new(
self.program,
&mut self.visited_fragments,
Default::default(),
);
visitor.visit_operation(operation);
visitor.variable_map
}
}
struct VaraiblesVisitor<'s> {
variable_map: VariableMap,
visited_fragments: &'s mut Visited,
program: &'s Program<'s>,
local_variables: FnvHashSet<StringKey>,
}
impl<'s> VaraiblesVisitor<'s> {
fn new(
program: &'s Program<'s>,
visited_fragments: &'s mut Visited,
local_variables: FnvHashSet<StringKey>,
) -> Self {
Self {
variable_map: Default::default(),
visited_fragments,
program,
local_variables,
}
}
}
impl<'s> VaraiblesVisitor<'s> {
/// Determine the set of root variables referenced locally in each
/// fragment. Note that RootArgumentDefinitions in the fragment's
/// argumentDefinitions can contain spurious entries for legacy
/// reasons. Instead of using those the fragment is traversed
/// to reanalyze variable usage.
fn infer_fragment_variables(&mut self, fragment: &FragmentDefinition) -> VariableMap {
if let Some(map) = self.visited_fragments.get(&fragment.name.item) {
map.clone()
} else {
// Break cycles by initially caching a version that is empty.
// If the current fragment is reached again, it won't have any
// root variables to add to its parents. The traversal below will
// find any root variables and update the cached version of the
// fragment.
self.visited_fragments
.insert(fragment.name.item, Default::default());
// Avoid collecting local variables usages as root varaibles
let local_variables = FnvHashSet::from_iter(
fragment
.variable_definitions
.iter()
.map(|var| var.name.item),
);
let mut visitor =
VaraiblesVisitor::new(self.program, self.visited_fragments, local_variables);
visitor.visit_fragment(fragment);
let result = visitor.variable_map;
self.visited_fragments
.insert(fragment.name.item, result.clone());
result
}
}
}
impl<'s> Visitor for VaraiblesVisitor<'s> {
const NAME: &'static str = "VaraiblesVisitor";
const VISIT_ARGUMENTS: bool = true;
const VISIT_DIRECTIVES: bool = true;
<|fim▁hole|> let fragment = self
.program
.fragment(spread.fragment.item)
.expect("Expect fragment to exist.");
// Detect root variables being passed as the value of @arguments;
// recover the expected type from the corresponding argument definitions.
if !fragment.variable_definitions.is_empty() {
for arg in spread.arguments.iter() {
if let Value::Variable(var) = &arg.value.item {
if let Some(def) = fragment.variable_definitions.named(var.name.item) {
self.variable_map
.entry(var.name.item)
.or_insert_with(|| Variable {
name: var.name,
type_: def.type_.clone(),
});
}
}
}
}
// Merge any root variables referenced by the spread fragment
// into this (parent) fragment's arguments.
let referenced_fragment_variables = self.infer_fragment_variables(fragment);
for (_, variable) in referenced_fragment_variables.into_iter() {
self.variable_map.insert(variable.name.item, variable);
}
}
fn visit_variable(&mut self, value: &Variable) {
if !self.local_variables.contains(&value.name.item) {
self.variable_map
.entry(value.name.item)
.or_insert_with(|| value.clone());
}
}
}<|fim▁end|> | fn visit_fragment_spread(&mut self, spread: &FragmentSpread) {
self.visit_directives(&spread.directives); |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from models import Connection
from django import forms
class ConnectionForm(forms.ModelForm):
class Meta:<|fim▁hole|> model = Connection
exclude = ('d_object_id',)<|fim▁end|> | |
<|file_name|>table_split_test.go<|end_file_name|><|fim▁begin|>// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/testkit"
"github.com/tikv/client-go/v2/tikv"
)
type testDDLTableSplitSuite struct{}
var _ = Suite(&testDDLTableSplitSuite{})
func (s *testDDLTableSplitSuite) TestTableSplit(c *C) {
store, err := mockstore.NewMockStore()
c.Assert(err, IsNil)
defer func() {
err := store.Close()
c.Assert(err, IsNil)
}()
session.SetSchemaLease(100 * time.Millisecond)
session.DisableStats4Test()
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
// Synced split table region.
tk.MustExec("set global tidb_scatter_region = 1")
tk.MustExec(`create table t_part (a int key) partition by range(a) (
partition p0 values less than (10),
partition p1 values less than (20)
)`)
defer dom.Close()
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
infoSchema := dom.InfoSchema()
c.Assert(infoSchema, NotNil)
t, err := infoSchema.TableByName(model.NewCIStr("mysql"), model.NewCIStr("tidb"))
c.Assert(err, IsNil)
checkRegionStartWithTableID(c, t.Meta().ID, store.(kvStore))
t, err = infoSchema.TableByName(model.NewCIStr("test"), model.NewCIStr("t_part"))
c.Assert(err, IsNil)
pi := t.Meta().GetPartitionInfo()
c.Assert(pi, NotNil)
for _, def := range pi.Definitions {
checkRegionStartWithTableID(c, def.ID, store.(kvStore))
}
}
type kvStore interface {<|fim▁hole|>
func checkRegionStartWithTableID(c *C, id int64, store kvStore) {
regionStartKey := tablecodec.EncodeTablePrefix(id)
var loc *tikv.KeyLocation
var err error
cache := store.GetRegionCache()
loc, err = cache.LocateKey(tikv.NewBackoffer(context.Background(), 5000), regionStartKey)
c.Assert(err, IsNil)
// Region cache may be out of date, so we need to drop this expired region and load it again.
cache.InvalidateCachedRegion(loc.Region)
c.Assert(loc.StartKey, BytesEquals, []byte(regionStartKey))
}<|fim▁end|> | GetRegionCache() *tikv.RegionCache
} |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>extern crate bytes;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate tokio_proto;
extern crate tokio_service;
use futures::{future, Future};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use tokio_proto::TcpClient;
use tokio_proto::multiplex::{ClientProto, ClientService, RequestId, ServerProto};
use tokio_service::Service;
use bytes::{BigEndian, Buf, BufMut, BytesMut};
use std::{io, str};
use std::net::SocketAddr;
pub struct Client {
inner: ClientService<TcpStream, CollatzProto>,
}
impl Client {
pub fn connect (
addr: &SocketAddr,
handle: &Handle,
) -> Box<Future<Item = Client, Error = io::Error>> {<|fim▁hole|> Box::new(ret)
}
}
impl Service for Client {
type Request = String;
type Response = String;
type Error = io::Error;
type Future = Box<Future<Item = String , Error= io::Error>>;
fn call(&self, req: String) -> Self::Future {
Box::new(self.inner.call(req).and_then(move |resp| Ok(resp)))
}
}
pub struct CollatzCodec;
pub struct CollatzProto;
type CollatzFrame = (RequestId, String);
impl Decoder for CollatzCodec {
type Item = CollatzFrame;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<CollatzFrame>, io::Error> {
if buf.len() < 5 {
return Ok(None);
}
let newline = buf[4..].iter().position(|b| *b==b'\n');
if let Some(n) = newline {
let line = buf.split_to(n + 4);
buf.split_to(1);
let request_id = io::Cursor::new(&line[0..4]).get_u32::<BigEndian> ();
return match str::from_utf8(&line.as_ref()[4..]) {
Ok(s) => Ok(Some((u64::from(request_id), s.to_string()))),
Err(_) => Err(io::Error::new(io::ErrorKind::Other, "invalid string")),
};
}
Ok(None)
}
}
impl Encoder for CollatzCodec {
type Item = CollatzFrame;
type Error = io::Error;
fn encode(&mut self, msg: CollatzFrame, buf: &mut BytesMut)-> io::Result<()> {
let len = 4 + msg.1.len() + 1;
buf.reserve(len);
let (request_id, msg) = msg;
buf.put_u32::<BigEndian>(request_id as u32);
buf.put_slice(msg.as_bytes());
buf.put_u8(b'\n');
Ok(())
}
}
impl<T: AsyncRead + AsyncWrite + 'static> ClientProto<T> for CollatzProto {
type Request = String;
type Response = String;
type Transport = Framed<T, CollatzCodec>;
type BindTransport = Result<Self::Transport, io::Error>;
fn bind_transport(&self, io: T) -> Self::BindTransport {
Ok(io.framed(CollatzCodec))
}
}
impl<T: AsyncRead + AsyncWrite + 'static> ServerProto<T> for CollatzProto {
type Request = String;
type Response = String;
type Transport = Framed<T, CollatzCodec>;
type BindTransport = Result<Self::Transport, io::Error>;
fn bind_transport(&self, io: T) -> Self::BindTransport {
Ok(io.framed(CollatzCodec))
}
}
pub struct CollatzService;
fn get_sequence(mut n: u64) -> Vec<u64> {
let mut result = vec![];
result.push(n);
while n > 1 {
if n % 2 == 0 {
n /= 2;
} else {
n = 3 * n + 1;
}
result.push(n);
}
result
}
impl Service for CollatzService {
type Request = String;
type Response = String;
type Error = io::Error;
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future {
match req.trim().parse::<u64> {
Ok(num) => {
let res = get_sequence(num);
Box::new(future::ok<format!("{:?}", rest)>)
}
Err(_) => {
Box::new(future::ok("Could not parse input as an u64".to_owned())),
}
}
}
}<|fim▁end|> | let ret = TcpClient::new(CollatzProto).connect(addr, handle)
.map(|service| Client {
inner: service,
}); |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod pic;
pub mod pit;
pub mod port_io;
use driver::vga;
use core::fmt::Write;
use driver::vga::Writer;
pub fn pic_init() {
unsafe {
//start the initialisation of the PICs
port_io::outb(pic::PIC_MASTER_COMMAND, pic::ICW1_INIT | pic::ICW1_ICW4);
port_io::wait();
port_io::outb(pic::PIC_SLAVE_COMMAND, pic::ICW1_INIT | pic::ICW1_ICW4);
port_io::wait();
//provide the PIC vector offsets
port_io::outb(pic::PIC_MASTER_DATA, pic::PIC_OFFSET_MASTER);
port_io::wait();
port_io::outb(pic::PIC_SLAVE_DATA, pic::PIC_OFFSET_SLAVE);
port_io::wait();
//provide slave/master relationship information
port_io::outb(pic::PIC_MASTER_DATA, 4); //inform MASTER there is a SLAVE at IRQ2
port_io::wait();
port_io::outb(pic::PIC_SLAVE_DATA, 2); //inform SLAVE it is a cascade identity
port_io::wait();
//provide additional environment information
port_io::outb(pic::PIC_MASTER_DATA, pic::ICW4_8086); //operate in 8086 mode
port_io::wait();
port_io::outb(pic::PIC_SLAVE_DATA, pic::ICW4_8086); //operate in 8086 mode
port_io::wait();
<|fim▁hole|> pic::irq_set_mask(0, false);
pic::irq_set_mask(1, false);
}
vga::okay();
vga::println("Initialised the PIC, at an offset of 0x20");
}
pub fn pit_init(hz: u32) {
pit::set_phase(hz);
vga::okay();
write!(Writer::new(), "Initialised the PIT, at a phase of {:#} Hz\n", hz)
.expect("Unexpected failure in write!()");
}<|fim▁end|> | //mask all interrupts, since none are currently initialised
port_io::outb(pic::PIC_MASTER_DATA, 0xFF);
port_io::outb(pic::PIC_SLAVE_DATA, 0xFF);
|
<|file_name|>sale.py<|end_file_name|><|fim▁begin|>from api_request import Api
from util import Util
from twocheckout import Twocheckout
class Sale(Twocheckout):
def __init__(self, dict_):
super(self.__class__, self).__init__(dict_)
@classmethod
def find(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/detail_sale', params))
return response.sale
@classmethod
def list(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/list_sales', params))
return response.sale_summary
def refund(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
url = 'sales/refund_lineitem'
elif hasattr(self, 'invoice_id'):
params['invoice_id'] = self.invoice_id
url = 'sales/refund_invoice'
else:
params['sale_id'] = self.sale_id
url = 'sales/refund_invoice'
return Sale(Api.call(url, params))
def stop(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
return Api.call('sales/stop_lineitem_recurring', params)
elif hasattr(self, 'sale_id'):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
params = {'lineitem_id': lineitem_id}
result[i] = Api.call('sales/stop_lineitem_recurring', params)
i += 1
response = { "response_code": "OK",
"response_message": str(len(result)) + " lineitems stopped successfully"
}
else:
response = {
"response_code": "NOTICE",
"response_message": "No active recurring lineitems"
}
else:
response = { "response_code": "NOTICE",
"response_message": "This method can only be called on a sale or lineitem"
}
return Sale(response)
def active(self):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
result[i] = lineitem_id
i += 1
response = { "response_code": "ACTIVE",
"response_message": str(len(result)) + " active recurring lineitems"
}
else:
response = {
"response_code": "NOTICE","response_message":<|fim▁hole|> "No active recurring lineitems"
}
return Sale(response)
def comment(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/create_comment', params))
def ship(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/mark_shipped', params))<|fim▁end|> | |
<|file_name|>opentsdb.go<|end_file_name|><|fim▁begin|>package ingest
import (
"compress/gzip"
"encoding/json"
"io"
"io/ioutil"
schema "github.com/grafana/metrictank/schema"
"github.com/raintank/tsdb-gw/api/models"
"github.com/raintank/tsdb-gw/publish"
log "github.com/sirupsen/logrus"
)
func OpenTSDBWrite(ctx *models.Context) {
if ctx.Req.Request.Body != nil {
defer ctx.Req.Request.Body.Close()
var reader io.Reader
var err error
if ctx.Req.Header.Get("Content-Encoding") == "gzip" {
reader, err = gzip.NewReader(ctx.Req.Request.Body)
if err != nil {
ctx.JSON(400, err.Error())
log.Errorf("Read Error, %v", err)
return
}
} else {
reader = ctx.Req.Request.Body
}
body, err := ioutil.ReadAll(reader)
if err != nil {
ctx.JSON(400, err.Error())
log.Errorf("Read Error, %v", err)
return
}
var req OpenTSDBPutRequest
err = json.Unmarshal(body, &req)
if err != nil {
ctx.JSON(400, err.Error())
log.Errorf("Read Error, %v", err)
return
}
var buf []*schema.MetricData
for _, ts := range req {
md := MetricPool.Get()
*md = schema.MetricData{
Name: ts.Metric,
Interval: 0,
Value: ts.Value,
Unit: "unknown",
Time: ts.Timestamp,
Mtype: "gauge",
Tags: ts.FormatTags(md.Tags),
OrgId: ctx.ID,
}
md.SetId()
buf = append(buf, md)
}
err = publish.Publish(buf)
for _, m := range buf {
m.Tags = m.Tags[:0]
MetricPool.Put(m)
}
if err != nil {
log.Errorf("failed to publish opentsdb write metrics. %s", err)
ctx.JSON(500, err)
return
}
ctx.JSON(200, "ok")
return<|fim▁hole|> ctx.JSON(400, "no data included in request.")
}
type OpenTSDBMetric struct {
Metric string `json:"metric"`
Timestamp int64 `json:"timestamp"`
Value float64 `json:"value"`
Tags map[string]string `json:"tags"`
}
type OpenTSDBPutRequest []OpenTSDBMetric
func (m OpenTSDBMetric) FormatTags(tagArray []string) []string {
for t, v := range m.Tags {
tagArray = append(tagArray, t+"="+v)
}
return tagArray
}<|fim▁end|> | }
|
<|file_name|>buf_writer.rs<|end_file_name|><|fim▁begin|>use futures_core::task::{Context, Poll};
use futures_io::{AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
use pin_utils::{unsafe_pinned, unsafe_unpinned};
use std::fmt;
use std::io::{self, Write};
use std::pin::Pin;
use super::DEFAULT_BUF_SIZE;
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: futures_io::AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
// TODO: Examples
pub struct BufWriter<W> {
inner: W,
buf: Vec<u8>,
written: usize,
}
impl<W: AsyncWrite> BufWriter<W> {
unsafe_pinned!(inner: W);
unsafe_unpinned!(buf: Vec<u8>);
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let Self { inner, buf, written } = unsafe { self.get_unchecked_mut() };
let mut inner = unsafe { Pin::new_unchecked(inner) };
let len = buf.len();
let mut ret = Ok(());
while *written < len {
match ready!(inner.as_mut().poll_write(cx, &buf[*written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *written > 0 {
buf.drain(..*written);
}
*written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.inner()
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.buf.len() + buf.len() > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if buf.len() >= self.buf.capacity() {
self.inner().poll_write(cx, buf)<|fim▁hole|> }
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if total_len >= self.buf.capacity() {
self.inner().poll_write_vectored(cx, bufs)
} else {
Poll::Ready(self.buf().write_vectored(bufs))
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_close(cx)
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.field("written", &self.written)
.finish()
}
}
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn poll_seek(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
pos: SeekFrom,
) -> Poll<io::Result<u64>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_seek(cx, pos)
}
}<|fim▁end|> | } else {
Poll::Ready(self.buf().write(buf)) |
<|file_name|>externalities.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Transaction Execution environment.
use util::*;
use action_params::{ActionParams, ActionValue};
use state::{State, Substate};
use engines::Engine;
use env_info::EnvInfo;
use executive::*;
use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory};
use types::executed::CallType;
use trace::{Tracer, VMTracer};
/// Policy for handling output data on `RETURN` opcode.
pub enum OutputPolicy<'a, 'b> {
/// Return reference to fixed sized output.
/// Used for message calls.
Return(BytesRef<'a>, Option<&'b mut Bytes>),
/// Init new contract as soon as `RETURN` is called.
InitContract(Option<&'b mut Bytes>),
}
/// Transaction properties that externalities need to know about.
pub struct OriginInfo {
address: Address,
origin: Address,
gas_price: U256,
value: U256
}
impl OriginInfo {
/// Populates origin info from action params.
pub fn from(params: &ActionParams) -> Self {
OriginInfo {
address: params.address.clone(),
origin: params.origin.clone(),
gas_price: params.gas_price,
value: match params.value {
ActionValue::Transfer(val) | ActionValue::Apparent(val) => val
}
}
}
}
/// Implementation of evm Externalities.
pub struct Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMTracer {
state: &'a mut State,
env_info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
origin_info: OriginInfo,
substate: &'a mut Substate,
schedule: Schedule,
output: OutputPolicy<'a, 'a>,
tracer: &'a mut T,
vm_tracer: &'a mut V,
}
impl<'a, T, V> Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMTracer {
#[cfg_attr(feature="dev", allow(too_many_arguments))]
/// Basic `Externalities` constructor.
pub fn new(state: &'a mut State,
env_info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
origin_info: OriginInfo,
substate: &'a mut Substate,
output: OutputPolicy<'a, 'a>,
tracer: &'a mut T,
vm_tracer: &'a mut V,
) -> Self {
Externalities {
state: state,
env_info: env_info,
engine: engine,
vm_factory: vm_factory,
depth: depth,
origin_info: origin_info,
substate: substate,
schedule: engine.schedule(env_info),
output: output,
tracer: tracer,
vm_tracer: vm_tracer,
}
}
}
impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMTracer {
fn storage_at(&self, key: &H256) -> H256 {
self.state.storage_at(&self.origin_info.address, key)
}
fn set_storage(&mut self, key: H256, value: H256) {
self.state.set_storage(&self.origin_info.address, key, value)
}
fn exists(&self, address: &Address) -> bool {
self.state.exists(address)
}
fn balance(&self, address: &Address) -> U256 {
self.state.balance(address)
}
fn blockhash(&self, number: &U256) -> H256 {
// TODO: comment out what this function expects from env_info, since it will produce panics if the latter is inconsistent
match *number < U256::from(self.env_info.number) && number.low_u64() >= cmp::max(256, self.env_info.number) - 256 {
true => {
let index = self.env_info.number - number.low_u64() - 1;
assert!(index < self.env_info.last_hashes.len() as u64, format!("Inconsistent env_info, should contain at least {:?} last hashes", index+1));
let r = self.env_info.last_hashes[index as usize].clone();
trace!("ext: blockhash({}) -> {} self.env_info.number={}\n", number, r, self.env_info.number);
r
},
false => {
trace!("ext: blockhash({}) -> null self.env_info.number={}\n", number, self.env_info.number);
H256::zero()
},
}
}
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult {
// create new contract address
let address = contract_address(&self.origin_info.address, &self.state.nonce(&self.origin_info.address));
// prepare the params
let params = ActionParams {
code_address: address.clone(),
address: address.clone(),
sender: self.origin_info.address.clone(),
origin: self.origin_info.origin.clone(),
gas: *gas,
gas_price: self.origin_info.gas_price,
value: ActionValue::Transfer(*value),
code: Some(Arc::new(code.to_vec())),
code_hash: code.sha3(),
data: None,
call_type: CallType::None,
};
self.state.inc_nonce(&self.origin_info.address);
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth);
// TODO: handle internal error separately
match ex.create(params, self.substate, self.tracer, self.vm_tracer) {
Ok(gas_left) => {
self.substate.contracts_created.push(address.clone());
ContractCreateResult::Created(address, gas_left)
},
_ => ContractCreateResult::Failed
}
}
fn call(&mut self,
gas: &U256,
sender_address: &Address,
receive_address: &Address,
value: Option<U256>,
data: &[u8],
code_address: &Address,
output: &mut [u8],
call_type: CallType
) -> MessageCallResult {
trace!(target: "externalities", "call");
let mut params = ActionParams {
sender: sender_address.clone(),
address: receive_address.clone(),
value: ActionValue::Apparent(self.origin_info.value),
code_address: code_address.clone(),
origin: self.origin_info.origin.clone(),
gas: *gas,
gas_price: self.origin_info.gas_price,
code: self.state.code(code_address),
code_hash: self.state.code_hash(code_address),
data: Some(data.to_vec()),
call_type: call_type,
};
if let Some(value) = value {
params.value = ActionValue::Transfer(value);
}
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth);
match ex.call(params, self.substate, BytesRef::Fixed(output), self.tracer, self.vm_tracer) {
Ok(gas_left) => MessageCallResult::Success(gas_left),
_ => MessageCallResult::Failed
}
}
fn extcode(&self, address: &Address) -> Arc<Bytes> {
self.state.code(address).unwrap_or_else(|| Arc::new(vec![]))
}
fn extcodesize(&self, address: &Address) -> usize {
self.state.code_size(address).unwrap_or(0)
}
#[cfg_attr(feature="dev", allow(match_ref_pats))]
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
where Self: Sized {
let handle_copy = |to: &mut Option<&mut Bytes>| {
to.as_mut().map(|b| **b = data.to_owned());
};
match self.output {
OutputPolicy::Return(BytesRef::Fixed(ref mut slice), ref mut copy) => {
handle_copy(copy);
let len = cmp::min(slice.len(), data.len());
(&mut slice[..len]).copy_from_slice(&data[..len]);
Ok(*gas)
},
OutputPolicy::Return(BytesRef::Flexible(ref mut vec), ref mut copy) => {
handle_copy(copy);
vec.clear();
vec.extend_from_slice(data);
Ok(*gas)
},
OutputPolicy::InitContract(ref mut copy) => {
let return_cost = U256::from(data.len()) * U256::from(self.schedule.create_data_gas);
if return_cost > *gas {
return match self.schedule.exceptional_failed_code_deposit {
true => Err(evm::Error::OutOfGas),
false => Ok(*gas)
}
}
handle_copy(copy);
let mut code = vec![];
code.extend_from_slice(data);
self.state.init_code(&self.origin_info.address, code);
Ok(*gas - return_cost)
}
}
}
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
use log_entry::LogEntry;
let address = self.origin_info.address.clone();
self.substate.logs.push(LogEntry {
address: address,
topics: topics,
data: data.to_vec()
});
}
fn suicide(&mut self, refund_address: &Address) {
let address = self.origin_info.address.clone();
let balance = self.balance(&address);
if &address == refund_address {
// TODO [todr] To be consisted with CPP client we set balance to 0 in that case.
self.state.sub_balance(&address, &balance);
} else {
trace!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance);
self.state.transfer_balance(&address, refund_address, &balance);
}
self.tracer.trace_suicide(address, balance, refund_address.clone());
self.substate.suicides.insert(address);
}
fn schedule(&self) -> &Schedule {
&self.schedule
}
fn env_info(&self) -> &EnvInfo {
self.env_info
}
fn depth(&self) -> usize {
self.depth
}
fn inc_sstore_clears(&mut self) {
self.substate.sstore_clears_count = self.substate.sstore_clears_count + U256::one();
}
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: &U256) -> bool {
self.vm_tracer.trace_prepare_execute(pc, instruction, gas_cost)
}
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
self.vm_tracer.trace_executed(gas_used, stack_push, mem_diff, store_diff)
}
}
#[cfg(test)]
mod tests {
use util::*;
use engines::Engine;
use env_info::EnvInfo;
use evm::Ext;
use state::{State, Substate};
use tests::helpers::*;
use devtools::GuardedTempResult;
use super::*;
use trace::{NoopTracer, NoopVMTracer};
use types::executed::CallType;
fn get_test_origin() -> OriginInfo {
OriginInfo {
address: Address::zero(),
origin: Address::zero(),
gas_price: U256::zero(),
value: U256::zero()
}
}
fn get_test_env_info() -> EnvInfo {
EnvInfo {
number: 100,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
}<|fim▁hole|> engine: Arc<Engine>,
sub_state: Substate,
env_info: EnvInfo
}
impl Default for TestSetup {
fn default() -> Self {
TestSetup::new()
}
}
impl TestSetup {
fn new() -> Self {
TestSetup {
state: get_temp_state(),
engine: get_test_spec().engine,
sub_state: Substate::new(),
env_info: get_test_env_info()
}
}
}
#[test]
fn can_be_created() {
let mut setup = TestSetup::new();
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
assert_eq!(ext.env_info().number, 100);
}
#[test]
fn can_return_block_hash_no_env() {
let mut setup = TestSetup::new();
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
let hash = ext.blockhash(&U256::from_str("0000000000000000000000000000000000000000000000000000000000120000").unwrap());
assert_eq!(hash, H256::zero());
}
#[test]
fn can_return_block_hash() {
let test_hash = H256::from("afafafafafafafafafafafbcbcbcbcbcbcbcbcbcbeeeeeeeeeeeeedddddddddd");
let test_env_number = 0x120001;
let mut setup = TestSetup::new();
{
let env_info = &mut setup.env_info;
env_info.number = test_env_number;
let mut last_hashes = (*env_info.last_hashes).clone();
last_hashes.push(test_hash.clone());
env_info.last_hashes = Arc::new(last_hashes);
}
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
let hash = ext.blockhash(&U256::from_str("0000000000000000000000000000000000000000000000000000000000120000").unwrap());
assert_eq!(test_hash, hash);
}
#[test]
#[should_panic]
fn can_call_fail_empty() {
let mut setup = TestSetup::new();
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
let mut output = vec![];
// this should panic because we have no balance on any account
ext.call(
&U256::from_str("0000000000000000000000000000000000000000000000000000000000120000").unwrap(),
&Address::new(),
&Address::new(),
Some(U256::from_str("0000000000000000000000000000000000000000000000000000000000150000").unwrap()),
&[],
&Address::new(),
&mut output,
CallType::Call
);
}
#[test]
fn can_log() {
let log_data = vec![120u8, 110u8];
let log_topics = vec![H256::from("af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd")];
let mut setup = TestSetup::new();
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
{
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
ext.log(log_topics, &log_data);
}
assert_eq!(setup.sub_state.logs.len(), 1);
}
#[test]
fn can_suicide() {
let refund_account = &Address::new();
let mut setup = TestSetup::new();
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer;
{
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer);
ext.suicide(refund_account);
}
assert_eq!(setup.sub_state.suicides.len(), 1);
}
}<|fim▁end|> | }
struct TestSetup {
state: GuardedTempResult<State>, |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React, { PropTypes } from 'react'
import { Grid, Row, Col } from 'react-bootstrap'
import Sort from '../../components/Sort'
import ProjectFilterForm from '../../components/ProjectFilterForm'
import Search from '../../containers/Search'
import ProjectsDashboardStatContainer from '../../containers/ProjectsDashboardStatContainer';
import { PROJECTS_SORT } from '../../resources/options'
const ProjectsDashboard = (props) => {
return (
<Grid fluid>
<Row>
<Col xs={12} md={4}>
<ProjectsDashboardStatContainer />
</Col>
<Col xs={12} md={8}>
Latest Updates
</Col>
</Row>
<Row>
<Col md={12}>
<Search
types={['projects']}
searchId='projectsDashboardSearch'
filterElement={<ProjectFilterForm />}
sortElement={<Sort options={PROJECTS_SORT} />}
/>
</Col>
</Row>
</Grid>
)
}
<|fim▁hole|><|fim▁end|> | export default ProjectsDashboard |
<|file_name|>gecko.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// `data` comes from components/style/properties.mako.rs; see build.rs for more details.
<%!
from data import to_rust_ident
from data import Keyword
%>
use app_units::Au;
use custom_properties::ComputedValuesMap;
% for style_struct in data.style_structs:
use gecko_bindings::structs::${style_struct.gecko_ffi_name};
use gecko_bindings::bindings::Gecko_Construct_${style_struct.gecko_ffi_name};
use gecko_bindings::bindings::Gecko_CopyConstruct_${style_struct.gecko_ffi_name};
use gecko_bindings::bindings::Gecko_Destroy_${style_struct.gecko_ffi_name};
% endfor
use gecko_bindings::bindings::{Gecko_CopyMozBindingFrom, Gecko_CopyListStyleTypeFrom};
use gecko_bindings::bindings::{Gecko_SetMozBinding, Gecko_SetListStyleType};
use gecko_bindings::bindings::{Gecko_SetNullImageValue, Gecko_SetGradientImageValue};
use gecko_bindings::bindings::{Gecko_EnsureImageLayersLength, Gecko_CreateGradient};
use gecko_bindings::bindings::{Gecko_CopyImageValueFrom, Gecko_CopyFontFamilyFrom};
use gecko_bindings::bindings::{Gecko_FontFamilyList_AppendGeneric, Gecko_FontFamilyList_AppendNamed};
use gecko_bindings::bindings::{Gecko_FontFamilyList_Clear, Gecko_InitializeImageLayer};
use gecko_bindings::bindings::ServoComputedValuesBorrowedOrNull;
use gecko_bindings::structs;
use gecko_bindings::sugar::ns_style_coord::{CoordDataValue, CoordData, CoordDataMut};
use gecko_bindings::sugar::ownership::HasArcFFI;
use gecko::values::{StyleCoordHelpers, GeckoStyleCoordConvertible, convert_nscolor_to_rgba};
use gecko::values::convert_rgba_to_nscolor;
use gecko::values::round_border_to_device_pixels;
use logical_geometry::WritingMode;
use properties::CascadePropertyFn;
use properties::longhands;
use std::fmt::{self, Debug};
use std::mem::{transmute, zeroed};
use std::ptr;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use std::sync::Arc;
use std::cmp;
pub mod style_structs {
% for style_struct in data.style_structs:
pub use super::${style_struct.gecko_struct_name} as ${style_struct.name};
% endfor
}
#[derive(Clone, Debug)]
pub struct ComputedValues {
% for style_struct in data.style_structs:
${style_struct.ident}: Arc<style_structs::${style_struct.name}>,
% endfor
custom_properties: Option<Arc<ComputedValuesMap>>,
shareable: bool,
pub writing_mode: WritingMode,
pub root_font_size: Au,
}
impl ComputedValues {
pub fn inherit_from(parent: &Arc<Self>) -> Arc<Self> {
Arc::new(ComputedValues {
custom_properties: parent.custom_properties.clone(),
shareable: parent.shareable,
writing_mode: parent.writing_mode,
root_font_size: parent.root_font_size,
% for style_struct in data.style_structs:
% if style_struct.inherited:
${style_struct.ident}: parent.${style_struct.ident}.clone(),
% else:
${style_struct.ident}: Self::initial_values().${style_struct.ident}.clone(),
% endif
% endfor
})
}
pub fn new(custom_properties: Option<Arc<ComputedValuesMap>>,
shareable: bool,
writing_mode: WritingMode,
root_font_size: Au,
% for style_struct in data.style_structs:
${style_struct.ident}: Arc<style_structs::${style_struct.name}>,
% endfor
) -> Self {
ComputedValues {
custom_properties: custom_properties,
shareable: shareable,
writing_mode: writing_mode,
root_font_size: root_font_size,
% for style_struct in data.style_structs:
${style_struct.ident}: ${style_struct.ident},
% endfor
}
}
pub fn style_for_child_text_node(parent: &Arc<Self>) -> Arc<Self> {
// Gecko expects text nodes to be styled as if they were elements that
// matched no rules (that is, inherited style structs are inherited and
// non-inherited style structs are set to their initial values).
ComputedValues::inherit_from(parent)
}
pub fn initial_values() -> &'static Self {
unsafe {<|fim▁hole|> &*raw_initial_values()
}
}
pub unsafe fn initialize() {
debug_assert!(raw_initial_values().is_null());
set_raw_initial_values(Box::into_raw(Box::new(ComputedValues {
% for style_struct in data.style_structs:
${style_struct.ident}: style_structs::${style_struct.name}::initial(),
% endfor
custom_properties: None,
shareable: true,
writing_mode: WritingMode::empty(),
root_font_size: longhands::font_size::get_initial_value(),
})));
}
pub unsafe fn shutdown() {
debug_assert!(!raw_initial_values().is_null());
let _ = Box::from_raw(raw_initial_values());
set_raw_initial_values(ptr::null_mut());
}
#[inline]
pub fn do_cascade_property<F: FnOnce(&[CascadePropertyFn])>(f: F) {
f(&CASCADE_PROPERTY)
}
% for style_struct in data.style_structs:
#[inline]
pub fn clone_${style_struct.name_lower}(&self) -> Arc<style_structs::${style_struct.name}> {
self.${style_struct.ident}.clone()
}
#[inline]
pub fn get_${style_struct.name_lower}(&self) -> &style_structs::${style_struct.name} {
&self.${style_struct.ident}
}
#[inline]
pub fn mutate_${style_struct.name_lower}(&mut self) -> &mut style_structs::${style_struct.name} {
Arc::make_mut(&mut self.${style_struct.ident})
}
% endfor
pub fn custom_properties(&self) -> Option<Arc<ComputedValuesMap>> {
self.custom_properties.as_ref().map(|x| x.clone())
}
pub fn root_font_size(&self) -> Au { self.root_font_size }
pub fn set_root_font_size(&mut self, s: Au) { self.root_font_size = s; }
pub fn set_writing_mode(&mut self, mode: WritingMode) { self.writing_mode = mode; }
// FIXME(bholley): Implement this properly.
#[inline]
pub fn is_multicol(&self) -> bool { false }
}
<%def name="declare_style_struct(style_struct)">
pub struct ${style_struct.gecko_struct_name} {
gecko: ${style_struct.gecko_ffi_name},
}
</%def>
<%def name="impl_simple_setter(ident, gecko_ffi_name)">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
${set_gecko_property(gecko_ffi_name, "v")}
}
</%def>
<%def name="impl_simple_clone(ident, gecko_ffi_name)">
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
self.gecko.${gecko_ffi_name}
}
</%def>
<%def name="impl_simple_copy(ident, gecko_ffi_name, *kwargs)">
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
self.gecko.${gecko_ffi_name} = other.gecko.${gecko_ffi_name};
}
</%def>
<%def name="impl_coord_copy(ident, gecko_ffi_name)">
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
self.gecko.${gecko_ffi_name}.copy_from(&other.gecko.${gecko_ffi_name});
}
</%def>
<%!
def get_gecko_property(ffi_name, self_param = "self"):
if "mBorderColor" in ffi_name:
return ffi_name.replace("mBorderColor",
"unsafe { *%s.gecko.__bindgen_anon_1.mBorderColor.as_ref() }"
% self_param)
return "%s.gecko.%s" % (self_param, ffi_name)
def set_gecko_property(ffi_name, expr):
if ffi_name == "__LIST_STYLE_TYPE__":
return "unsafe { Gecko_SetListStyleType(&mut self.gecko, %s as u32); }" % expr
if "mBorderColor" in ffi_name:
ffi_name = ffi_name.replace("mBorderColor",
"*self.gecko.__bindgen_anon_1.mBorderColor.as_mut()")
return "unsafe { %s = %s };" % (ffi_name, expr)
return "self.gecko.%s = %s;" % (ffi_name, expr)
%>
<%def name="impl_keyword_setter(ident, gecko_ffi_name, keyword, cast_type='u8')">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
use properties::longhands::${ident}::computed_value::T as Keyword;
// FIXME(bholley): Align binary representations and ditch |match| for cast + static_asserts
let result = match v {
% for value in keyword.values_for('gecko'):
Keyword::${to_rust_ident(value)} =>
structs::${keyword.gecko_constant(value)} ${keyword.maybe_cast(cast_type)},
% endfor
};
${set_gecko_property(gecko_ffi_name, "result")}
}
</%def>
<%def name="impl_keyword_clone(ident, gecko_ffi_name, keyword)">
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
use properties::longhands::${ident}::computed_value::T as Keyword;
// FIXME(bholley): Align binary representations and ditch |match| for cast + static_asserts
match ${get_gecko_property(gecko_ffi_name)} ${keyword.maybe_cast("u32")} {
% for value in keyword.values_for('gecko'):
structs::${keyword.gecko_constant(value)} => Keyword::${to_rust_ident(value)},
% endfor
x => panic!("Found unexpected value in style struct for ${ident} property: {:?}", x),
}
}
</%def>
<%def name="impl_color_setter(ident, gecko_ffi_name, complex_color=True)">
#[allow(unreachable_code)]
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
% if complex_color:
let result = v.into();
% else:
use cssparser::Color;
let result = match v {
Color::RGBA(rgba) => convert_rgba_to_nscolor(&rgba),
// FIXME #13547
Color::CurrentColor => 0,
};
% endif
${set_gecko_property(gecko_ffi_name, "result")}
}
</%def>
<%def name="impl_color_copy(ident, gecko_ffi_name, complex_color=True)">
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
let color = ${get_gecko_property(gecko_ffi_name, self_param = "other")};
${set_gecko_property(gecko_ffi_name, "color")};
}
</%def>
<%def name="impl_color_clone(ident, gecko_ffi_name, complex_color=True)">
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
% if complex_color:
${get_gecko_property(gecko_ffi_name)}.into()
% else:
use cssparser::Color;
Color::RGBA(convert_nscolor_to_rgba(${get_gecko_property(gecko_ffi_name)}))
% endif
}
</%def>
<%def name="impl_keyword(ident, gecko_ffi_name, keyword, need_clone, **kwargs)">
<%call expr="impl_keyword_setter(ident, gecko_ffi_name, keyword, **kwargs)"></%call>
<%call expr="impl_simple_copy(ident, gecko_ffi_name)"></%call>
%if need_clone:
<%call expr="impl_keyword_clone(ident, gecko_ffi_name, keyword)"></%call>
% endif
</%def>
<%def name="impl_simple(ident, gecko_ffi_name, need_clone=False)">
<%call expr="impl_simple_setter(ident, gecko_ffi_name)"></%call>
<%call expr="impl_simple_copy(ident, gecko_ffi_name)"></%call>
% if need_clone:
<%call expr="impl_simple_clone(ident, gecko_ffi_name)"></%call>
% endif
</%def>
<%def name="impl_color(ident, gecko_ffi_name, need_clone=False, complex_color=True)">
<%call expr="impl_color_setter(ident, gecko_ffi_name, complex_color)"></%call>
<%call expr="impl_color_copy(ident, gecko_ffi_name, complex_color)"></%call>
% if need_clone:
<%call expr="impl_color_clone(ident, gecko_ffi_name, complex_color)"></%call>
% endif
</%def>
<%def name="impl_app_units(ident, gecko_ffi_name, need_clone, round_to_pixels=False)">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
% if round_to_pixels:
let au_per_device_px = Au(self.gecko.mTwipsPerPixel);
self.gecko.${gecko_ffi_name} = round_border_to_device_pixels(v, au_per_device_px).0;
% else:
self.gecko.${gecko_ffi_name} = v.0;
% endif
}
<%call expr="impl_simple_copy(ident, gecko_ffi_name)"></%call>
%if need_clone:
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
Au(self.gecko.${gecko_ffi_name})
}
% endif
</%def>
<%def name="impl_split_style_coord(ident, gecko_ffi_name, index, need_clone=False)">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
v.to_gecko_style_coord(&mut self.gecko.${gecko_ffi_name}.data_at_mut(${index}));
}
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
self.gecko.${gecko_ffi_name}.data_at_mut(${index}).copy_from(&other.gecko.${gecko_ffi_name}.data_at(${index}));
}
% if need_clone:
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
use properties::longhands::${ident}::computed_value::T;
T::from_gecko_style_coord(&self.gecko.${gecko_ffi_name}.data_at(${index}))
.expect("clone for ${ident} failed")
}
% endif
</%def>
<%def name="impl_style_coord(ident, gecko_ffi_name, need_clone=False)">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
v.to_gecko_style_coord(&mut self.gecko.${gecko_ffi_name});
}
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
self.gecko.${gecko_ffi_name}.copy_from(&other.gecko.${gecko_ffi_name});
}
% if need_clone:
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
use properties::longhands::${ident}::computed_value::T;
T::from_gecko_style_coord(&self.gecko.${gecko_ffi_name})
.expect("clone for ${ident} failed")
}
% endif
</%def>
<%def name="impl_corner_style_coord(ident, gecko_ffi_name, x_index, y_index, need_clone=False)">
#[allow(non_snake_case)]
pub fn set_${ident}(&mut self, v: longhands::${ident}::computed_value::T) {
v.0.width.to_gecko_style_coord(&mut self.gecko.${gecko_ffi_name}.data_at_mut(${x_index}));
v.0.height.to_gecko_style_coord(&mut self.gecko.${gecko_ffi_name}.data_at_mut(${y_index}));
}
#[allow(non_snake_case)]
pub fn copy_${ident}_from(&mut self, other: &Self) {
self.gecko.${gecko_ffi_name}.data_at_mut(${x_index})
.copy_from(&other.gecko.${gecko_ffi_name}.data_at(${x_index}));
self.gecko.${gecko_ffi_name}.data_at_mut(${y_index})
.copy_from(&other.gecko.${gecko_ffi_name}.data_at(${y_index}));
}
% if need_clone:
#[allow(non_snake_case)]
pub fn clone_${ident}(&self) -> longhands::${ident}::computed_value::T {
use properties::longhands::${ident}::computed_value::T;
use euclid::Size2D;
let width = GeckoStyleCoordConvertible::from_gecko_style_coord(
&self.gecko.${gecko_ffi_name}.data_at(${x_index}))
.expect("Failed to clone ${ident}");
let height = GeckoStyleCoordConvertible::from_gecko_style_coord(
&self.gecko.${gecko_ffi_name}.data_at(${y_index}))
.expect("Failed to clone ${ident}");
T(Size2D::new(width, height))
}
% endif
</%def>
<%def name="impl_style_struct(style_struct)">
impl ${style_struct.gecko_struct_name} {
#[allow(dead_code, unused_variables)]
pub fn initial() -> Arc<Self> {
let mut result = Arc::new(${style_struct.gecko_struct_name} { gecko: unsafe { zeroed() } });
unsafe {
Gecko_Construct_${style_struct.gecko_ffi_name}(&mut Arc::get_mut(&mut result).unwrap().gecko);
}
result
}
pub fn get_gecko(&self) -> &${style_struct.gecko_ffi_name} {
&self.gecko
}
}
impl Drop for ${style_struct.gecko_struct_name} {
fn drop(&mut self) {
unsafe {
Gecko_Destroy_${style_struct.gecko_ffi_name}(&mut self.gecko);
}
}
}
impl Clone for ${style_struct.gecko_struct_name} {
fn clone(&self) -> Self {
unsafe {
let mut result = ${style_struct.gecko_struct_name} { gecko: zeroed() };
Gecko_CopyConstruct_${style_struct.gecko_ffi_name}(&mut result.gecko, &self.gecko);
result
}
}
}
// FIXME(bholley): Make bindgen generate Debug for all types.
%if style_struct.gecko_ffi_name in ("nsStyle" + x for x in "Border Display List Background Font SVGReset".split()):
impl Debug for ${style_struct.gecko_struct_name} {
// FIXME(bholley): Generate this.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Gecko style struct: ${style_struct.gecko_struct_name}")
}
}
%else:
impl Debug for ${style_struct.gecko_struct_name} {
// FIXME(bholley): Generate this.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.gecko.fmt(f) }
}
%endif
</%def>
<%def name="raw_impl_trait(style_struct, skip_longhands='', skip_additionals='')">
<%
longhands = [x for x in style_struct.longhands
if not (skip_longhands == "*" or x.name in skip_longhands.split())]
#
# Make a list of types we can't auto-generate.
#
force_stub = [];
# These are currently being shuffled to a different style struct on the gecko side.
force_stub += ["backface-visibility", "transform-box", "transform-style"]
# These live in an nsFont member in Gecko. Should be straightforward to do manually.
force_stub += ["font-kerning", "font-variant"]
# These have unusual representations in gecko.
force_stub += ["list-style-type", "text-overflow"]
# In a nsTArray, have to be done manually, but probably not too much work
# (the "filling them", not the "making them work")
force_stub += ["animation-name", "animation-duration",
"animation-timing-function", "animation-iteration-count",
"animation-direction", "animation-play-state",
"animation-fill-mode", "animation-delay"]
# These are part of shorthands so we must include them in stylo builds,
# but we haven't implemented the stylo glue for the longhand
# so we generate a stub
force_stub += ["list-style-image", # box
"flex-basis", # position
# transition
"transition-duration", "transition-timing-function",
"transition-property", "transition-delay",
"column-count", # column
]
# Types used with predefined_type()-defined properties that we can auto-generate.
predefined_types = {
"LengthOrPercentage": impl_style_coord,
"LengthOrPercentageOrAuto": impl_style_coord,
"LengthOrPercentageOrNone": impl_style_coord,
"Number": impl_simple,
"Opacity": impl_simple,
}
keyword_longhands = [x for x in longhands if x.keyword and not x.name in force_stub]
predefined_longhands = [x for x in longhands
if x.predefined_type in predefined_types and not x.name in force_stub]
stub_longhands = [x for x in longhands if x not in keyword_longhands + predefined_longhands]
# If one of the longhands is not handled
# by either:
# - being a keyword
# - being a predefined longhand
# - being a longhand with manual glue code (i.e. in skip_longhands)
# - being generated as a stub
#
# then we raise an error here.
#
# If you hit this error, please add `product="servo"` to the longhand.
# In case the longhand is used in a shorthand, add it to the force_stub
# list above.
for stub in stub_longhands:
if stub.name not in force_stub:
raise Exception("Don't know what to do with longhand %s in style struct %s"
% (stub.name,style_struct. gecko_struct_name))
%>
impl ${style_struct.gecko_struct_name} {
/*
* Manually-Implemented Methods.
*/
${caller.body().strip()}
/*
* Auto-Generated Methods.
*/
<%
for longhand in keyword_longhands:
impl_keyword(longhand.ident, longhand.gecko_ffi_name, longhand.keyword, longhand.need_clone)
for longhand in predefined_longhands:
impl_fn = predefined_types[longhand.predefined_type]
impl_fn(longhand.ident, longhand.gecko_ffi_name, need_clone=longhand.need_clone)
%>
/*
* Stubs.
*/
% for longhand in stub_longhands:
#[allow(non_snake_case)]
pub fn set_${longhand.ident}(&mut self, _: longhands::${longhand.ident}::computed_value::T) {
if cfg!(debug_assertions) {
println!("stylo: Unimplemented property setter: ${longhand.name}");
}
}
#[allow(non_snake_case)]
pub fn copy_${longhand.ident}_from(&mut self, _: &Self) {
if cfg!(debug_assertions) {
println!("stylo: Unimplemented property setter: ${longhand.name}");
}
}
% if longhand.need_clone:
#[allow(non_snake_case)]
pub fn clone_${longhand.ident}(&self) -> longhands::${longhand.ident}::computed_value::T {
unimplemented!()
}
% endif
% if longhand.need_index:
pub fn ${longhand.ident}_count(&self) -> usize { 0 }
pub fn ${longhand.ident}_at(&self, _index: usize)
-> longhands::${longhand.ident}::computed_value::SingleComputedValue {
unimplemented!()
}
% endif
% endfor
<% additionals = [x for x in style_struct.additional_methods
if skip_additionals != "*" and not x.name in skip_additionals.split()] %>
% for additional in additionals:
${additional.stub()}
% endfor
}
</%def>
<% data.manual_style_structs = [] %>
<%def name="impl_trait(style_struct_name, skip_longhands='', skip_additionals='')">
<%self:raw_impl_trait style_struct="${next(x for x in data.style_structs if x.name == style_struct_name)}"
skip_longhands="${skip_longhands}" skip_additionals="${skip_additionals}">
${caller.body()}
</%self:raw_impl_trait>
<% data.manual_style_structs.append(style_struct_name) %>
</%def>
<%!
class Side(object):
def __init__(self, name, index):
self.name = name
self.ident = name.lower()
self.index = index
class Corner(object):
def __init__(self, name, index):
self.x_name = "NS_CORNER_" + name + "_X"
self.y_name = "NS_CORNER_" + name + "_Y"
self.ident = name.lower()
self.x_index = 2 * index
self.y_index = 2 * index + 1
SIDES = [Side("Top", 0), Side("Right", 1), Side("Bottom", 2), Side("Left", 3)]
CORNERS = [Corner("TOP_LEFT", 0), Corner("TOP_RIGHT", 1), Corner("BOTTOM_RIGHT", 2), Corner("BOTTOM_LEFT", 3)]
%>
#[allow(dead_code)]
fn static_assert() {
unsafe {
% for corner in CORNERS:
transmute::<_, [u32; ${corner.x_index}]>([1; structs::${corner.x_name} as usize]);
transmute::<_, [u32; ${corner.y_index}]>([1; structs::${corner.y_name} as usize]);
% endfor
}
// Note: using the above technique with an enum hits a rust bug when |structs| is in a different crate.
% for side in SIDES:
{ const DETAIL: u32 = [0][(structs::Side::eSide${side.name} as usize != ${side.index}) as usize]; let _ = DETAIL; }
% endfor
}
<% border_style_keyword = Keyword("border-style",
"none solid double dotted dashed hidden groove ridge inset outset") %>
<% skip_border_longhands = " ".join(["border-{0}-{1}".format(x.ident, y)
for x in SIDES
for y in ["color", "style", "width"]] +
["border-{0}-radius".format(x.ident.replace("_", "-"))
for x in CORNERS]) %>
<%self:impl_trait style_struct_name="Border"
skip_longhands="${skip_border_longhands}"
skip_additionals="*">
% for side in SIDES:
<% impl_keyword("border_%s_style" % side.ident, "mBorderStyle[%s]" % side.index, border_style_keyword,
need_clone=True) %>
<% impl_color("border_%s_color" % side.ident, "(mBorderColor)[%s]" % side.index, need_clone=True) %>
<% impl_app_units("border_%s_width" % side.ident, "mComputedBorder.%s" % side.ident, need_clone=True,
round_to_pixels=True) %>
pub fn border_${side.ident}_has_nonzero_width(&self) -> bool {
self.gecko.mComputedBorder.${side.ident} != 0
}
% endfor
% for corner in CORNERS:
<% impl_corner_style_coord("border_%s_radius" % corner.ident,
"mBorderRadius",
corner.x_index,
corner.y_index,
need_clone=True) %>
% endfor
</%self:impl_trait>
<% skip_margin_longhands = " ".join(["margin-%s" % x.ident for x in SIDES]) %>
<%self:impl_trait style_struct_name="Margin"
skip_longhands="${skip_margin_longhands}">
% for side in SIDES:
<% impl_split_style_coord("margin_%s" % side.ident,
"mMargin",
side.index,
need_clone=True) %>
% endfor
</%self:impl_trait>
<% skip_padding_longhands = " ".join(["padding-%s" % x.ident for x in SIDES]) %>
<%self:impl_trait style_struct_name="Padding"
skip_longhands="${skip_padding_longhands}">
% for side in SIDES:
<% impl_split_style_coord("padding_%s" % side.ident,
"mPadding",
side.index,
need_clone=True) %>
% endfor
</%self:impl_trait>
<% skip_position_longhands = " ".join(x.ident for x in SIDES) %>
<%self:impl_trait style_struct_name="Position"
skip_longhands="${skip_position_longhands} z-index box-sizing">
% for side in SIDES:
<% impl_split_style_coord("%s" % side.ident,
"mOffset",
side.index,
need_clone=True) %>
% endfor
pub fn set_z_index(&mut self, v: longhands::z_index::computed_value::T) {
use properties::longhands::z_index::computed_value::T;
match v {
T::Auto => self.gecko.mZIndex.set_value(CoordDataValue::Auto),
T::Number(n) => self.gecko.mZIndex.set_value(CoordDataValue::Integer(n)),
}
}
pub fn copy_z_index_from(&mut self, other: &Self) {
use gecko_bindings::structs::nsStyleUnit;
// z-index is never a calc(). If it were, we'd be leaking here, so
// assert that it isn't.
debug_assert!(self.gecko.mZIndex.unit() != nsStyleUnit::eStyleUnit_Calc);
unsafe {
self.gecko.mZIndex.copy_from_unchecked(&other.gecko.mZIndex);
}
}
pub fn clone_z_index(&self) -> longhands::z_index::computed_value::T {
use properties::longhands::z_index::computed_value::T;
return match self.gecko.mZIndex.as_value() {
CoordDataValue::Auto => T::Auto,
CoordDataValue::Integer(n) => T::Number(n),
_ => {
debug_assert!(false);
T::Number(0)
}
}
}
pub fn set_box_sizing(&mut self, v: longhands::box_sizing::computed_value::T) {
use computed_values::box_sizing::T;
use gecko_bindings::structs::StyleBoxSizing;
// TODO: guess what to do with box-sizing: padding-box
self.gecko.mBoxSizing = match v {
T::content_box => StyleBoxSizing::Content,
T::border_box => StyleBoxSizing::Border
}
}
${impl_simple_copy('box_sizing', 'mBoxSizing')}
</%self:impl_trait>
<% skip_outline_longhands = " ".join("outline-color outline-style outline-width".split() +
["-moz-outline-radius-{0}".format(x.ident.replace("_", ""))
for x in CORNERS]) %>
<%self:impl_trait style_struct_name="Outline"
skip_longhands="${skip_outline_longhands}"
skip_additionals="*">
<% impl_keyword("outline_style", "mOutlineStyle", border_style_keyword, need_clone=True) %>
<% impl_color("outline_color", "mOutlineColor", need_clone=True) %>
<% impl_app_units("outline_width", "mActualOutlineWidth", need_clone=True,
round_to_pixels=True) %>
% for corner in CORNERS:
<% impl_corner_style_coord("_moz_outline_radius_%s" % corner.ident.replace("_", ""),
"mOutlineRadius",
corner.x_index,
corner.y_index) %>
% endfor
pub fn outline_has_nonzero_width(&self) -> bool {
self.gecko.mActualOutlineWidth != 0
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Font"
skip_longhands="font-family font-stretch font-style font-size font-weight"
skip_additionals="*">
pub fn set_font_family(&mut self, v: longhands::font_family::computed_value::T) {
use properties::longhands::font_family::computed_value::FontFamily;
use gecko_bindings::structs::FontFamilyType;
let list = &mut self.gecko.mFont.fontlist;
unsafe { Gecko_FontFamilyList_Clear(list); }
for family in &v.0 {
match *family {
FontFamily::FamilyName(ref name) => {
unsafe { Gecko_FontFamilyList_AppendNamed(list, name.as_ptr()); }
}
FontFamily::Generic(ref name) => {
let family_type =
if name == &atom!("serif") { FontFamilyType::eFamily_serif }
else if name == &atom!("sans-serif") { FontFamilyType::eFamily_sans_serif }
else if name == &atom!("cursive") { FontFamilyType::eFamily_cursive }
else if name == &atom!("fantasy") { FontFamilyType::eFamily_fantasy }
else if name == &atom!("monospace") { FontFamilyType::eFamily_monospace }
else { panic!("Unknown generic font family") };
unsafe { Gecko_FontFamilyList_AppendGeneric(list, family_type); }
}
}
}
}
pub fn copy_font_family_from(&mut self, other: &Self) {
unsafe { Gecko_CopyFontFamilyFrom(&mut self.gecko.mFont, &other.gecko.mFont); }
}
<%call expr="impl_keyword('font_style', 'mFont.style',
data.longhands_by_name['font-style'].keyword, need_clone=False)"></%call>
// FIXME(bholley): Gecko has two different sizes, one of which (mSize) is the
// actual computed size, and the other of which (mFont.size) is the 'display
// size' which takes font zooming into account. We don't handle font zooming yet.
pub fn set_font_size(&mut self, v: longhands::font_size::computed_value::T) {
self.gecko.mFont.size = v.0;
self.gecko.mSize = v.0;
}
pub fn copy_font_size_from(&mut self, other: &Self) {
self.gecko.mFont.size = other.gecko.mFont.size;
self.gecko.mSize = other.gecko.mSize;
}
pub fn clone_font_size(&self) -> longhands::font_size::computed_value::T {
Au(self.gecko.mSize)
}
<% stretch_keyword = Keyword("font-stretch",
"normal ultra-condensed extra-condensed condensed " +
"semi-condensed semi-expanded expanded " +
"extra-expanded ultra-expanded",
gecko_constant_prefix='NS_FONT_STRETCH') %>
${impl_keyword('font_stretch', 'mFont.stretch', stretch_keyword, need_clone=False, cast_type='i16')}
pub fn set_font_weight(&mut self, v: longhands::font_weight::computed_value::T) {
self.gecko.mFont.weight = v as u16;
}
${impl_simple_copy('font_weight', 'mFont.weight')}
pub fn clone_font_weight(&self) -> longhands::font_weight::computed_value::T {
debug_assert!(self.gecko.mFont.weight >= 100);
debug_assert!(self.gecko.mFont.weight <= 900);
debug_assert!(self.gecko.mFont.weight % 10 == 0);
unsafe { transmute(self.gecko.mFont.weight) }
}
// This is used for PartialEq, which we don't implement for gecko style structs.
pub fn compute_font_hash(&mut self) {}
</%self:impl_trait>
<% skip_box_longhands= """display overflow-y vertical-align
-moz-binding page-break-before page-break-after""" %>
<%self:impl_trait style_struct_name="Box" skip_longhands="${skip_box_longhands}">
// We manually-implement the |display| property until we get general
// infrastructure for preffing certain values.
<% display_keyword = Keyword("display", "inline block inline-block table inline-table table-row-group " +
"table-header-group table-footer-group table-row table-column-group " +
"table-column table-cell table-caption list-item flex none " +
"-moz-box -moz-inline-box",
gecko_enum_prefix="StyleDisplay") %>
${impl_keyword('display', 'mDisplay', display_keyword, True)}
// overflow-y is implemented as a newtype of overflow-x, so we need special handling.
// We could generalize this if we run into other newtype keywords.
<% overflow_x = data.longhands_by_name["overflow-x"] %>
pub fn set_overflow_y(&mut self, v: longhands::overflow_y::computed_value::T) {
use properties::longhands::overflow_x::computed_value::T as BaseType;
// FIXME(bholley): Align binary representations and ditch |match| for cast + static_asserts
self.gecko.mOverflowY = match v.0 {
% for value in overflow_x.keyword.values_for('gecko'):
BaseType::${to_rust_ident(value)} => structs::${overflow_x.keyword.gecko_constant(value)} as u8,
% endfor
};
}
${impl_simple_copy('overflow_y', 'mOverflowY')}
pub fn clone_overflow_y(&self) -> longhands::overflow_y::computed_value::T {
use properties::longhands::overflow_x::computed_value::T as BaseType;
use properties::longhands::overflow_y::computed_value::T as NewType;
// FIXME(bholley): Align binary representations and ditch |match| for cast + static_asserts
match self.gecko.mOverflowY as u32 {
% for value in overflow_x.keyword.values_for('gecko'):
structs::${overflow_x.keyword.gecko_constant(value)} => NewType(BaseType::${to_rust_ident(value)}),
% endfor
x => panic!("Found unexpected value in style struct for overflow_y property: {}", x),
}
}
pub fn set_vertical_align(&mut self, v: longhands::vertical_align::computed_value::T) {
<% keyword = data.longhands_by_name["vertical-align"].keyword %>
use properties::longhands::vertical_align::computed_value::T;
// FIXME: Align binary representations and ditch |match| for cast + static_asserts
match v {
% for value in keyword.values_for('gecko'):
T::${to_rust_ident(value)} =>
self.gecko.mVerticalAlign.set_value(
CoordDataValue::Enumerated(structs::${keyword.gecko_constant(value)})),
% endfor
T::LengthOrPercentage(v) => self.gecko.mVerticalAlign.set(v),
}
}
pub fn clone_vertical_align(&self) -> longhands::vertical_align::computed_value::T {
use properties::longhands::vertical_align::computed_value::T;
use values::computed::LengthOrPercentage;
match self.gecko.mVerticalAlign.as_value() {
% for value in keyword.values_for('gecko'):
CoordDataValue::Enumerated(structs::${keyword.gecko_constant(value)}) => T::${to_rust_ident(value)},
% endfor
CoordDataValue::Enumerated(_) => panic!("Unexpected enum variant for vertical-align"),
_ => {
let v = LengthOrPercentage::from_gecko_style_coord(&self.gecko.mVerticalAlign)
.expect("Expected length or percentage for vertical-align");
T::LengthOrPercentage(v)
}
}
}
<%call expr="impl_coord_copy('vertical_align', 'mVerticalAlign')"></%call>
#[allow(non_snake_case)]
pub fn set__moz_binding(&mut self, v: longhands::_moz_binding::computed_value::T) {
use properties::longhands::_moz_binding::SpecifiedValue as BindingValue;
match v {
BindingValue::None => debug_assert!(self.gecko.mBinding.mRawPtr.is_null()),
BindingValue::Url(ref url, ref extra_data) => {
unsafe {
Gecko_SetMozBinding(&mut self.gecko,
url.as_str().as_ptr(),
url.as_str().len() as u32,
extra_data.base.as_raw(),
extra_data.referrer.as_raw(),
extra_data.principal.as_raw());
}
}
}
}
#[allow(non_snake_case)]
pub fn copy__moz_binding_from(&mut self, other: &Self) {
unsafe { Gecko_CopyMozBindingFrom(&mut self.gecko, &other.gecko); }
}
// Temp fix for Bugzilla bug 24000.
// Map 'auto' and 'avoid' to false, and 'always', 'left', and 'right' to true.
// "A conforming user agent may interpret the values 'left' and 'right'
// as 'always'." - CSS2.1, section 13.3.1
pub fn set_page_break_before(&mut self, v: longhands::page_break_before::computed_value::T) {
use computed_values::page_break_before::T;
let result = match v {
T::auto => false,
T::always => true,
T::avoid => false,
T::left => true,
T::right => true
};
self.gecko.mBreakBefore = result;
}
${impl_simple_copy('page_break_before', 'mBreakBefore')}
// Temp fix for Bugzilla bug 24000.
// See set_page_break_before for detail.
pub fn set_page_break_after(&mut self, v: longhands::page_break_after::computed_value::T) {
use computed_values::page_break_after::T;
let result = match v {
T::auto => false,
T::always => true,
T::avoid => false,
T::left => true,
T::right => true
};
self.gecko.mBreakBefore = result;
}
${impl_simple_copy('page_break_after', 'mBreakAfter')}
</%self:impl_trait>
<%def name="simple_image_array_property(name, shorthand, field_name)">
<%
image_layers_field = "mImage" if shorthand == "background" else "mMask"
%>
pub fn copy_${shorthand}_${name}_from(&mut self, other: &Self) {
unsafe {
Gecko_EnsureImageLayersLength(&mut self.gecko.${image_layers_field},
other.gecko.${image_layers_field}.mLayers.len());
}
for (layer, other) in self.gecko.${image_layers_field}.mLayers.iter_mut()
.zip(other.gecko.${image_layers_field}.mLayers.iter())
.take(other.gecko.${image_layers_field}
.${field_name}Count as usize) {
layer.${field_name} = other.${field_name};
}
self.gecko.${image_layers_field}.${field_name}Count =
other.gecko.${image_layers_field}.${field_name}Count;
}
pub fn set_${shorthand}_${name}(&mut self,
v: longhands::${shorthand}_${name}::computed_value::T) {
unsafe {
Gecko_EnsureImageLayersLength(&mut self.gecko.${image_layers_field}, v.0.len());
}
self.gecko.${image_layers_field}.${field_name}Count = v.0.len() as u32;
for (servo, geckolayer) in v.0.into_iter()
.zip(self.gecko.${image_layers_field}.mLayers.iter_mut()) {
geckolayer.${field_name} = {
${caller.body()}
};
}
}
</%def>
<%def name="impl_common_image_layer_properties(shorthand)">
<%
image_layers_field = "mImage" if shorthand == "background" else "mMask"
%>
<%self:simple_image_array_property name="repeat" shorthand="${shorthand}" field_name="mRepeat">
use properties::longhands::${shorthand}_repeat::single_value::computed_value::T;
use gecko_bindings::structs::nsStyleImageLayers_Repeat;
use gecko_bindings::structs::NS_STYLE_IMAGELAYER_REPEAT_REPEAT;
use gecko_bindings::structs::NS_STYLE_IMAGELAYER_REPEAT_NO_REPEAT;
use gecko_bindings::structs::NS_STYLE_IMAGELAYER_REPEAT_SPACE;
use gecko_bindings::structs::NS_STYLE_IMAGELAYER_REPEAT_ROUND;
let (repeat_x, repeat_y) = match servo {
T::repeat_x => (NS_STYLE_IMAGELAYER_REPEAT_REPEAT,
NS_STYLE_IMAGELAYER_REPEAT_NO_REPEAT),
T::repeat_y => (NS_STYLE_IMAGELAYER_REPEAT_NO_REPEAT,
NS_STYLE_IMAGELAYER_REPEAT_REPEAT),
T::repeat => (NS_STYLE_IMAGELAYER_REPEAT_REPEAT,
NS_STYLE_IMAGELAYER_REPEAT_REPEAT),
T::space => (NS_STYLE_IMAGELAYER_REPEAT_SPACE,
NS_STYLE_IMAGELAYER_REPEAT_SPACE),
T::round => (NS_STYLE_IMAGELAYER_REPEAT_ROUND,
NS_STYLE_IMAGELAYER_REPEAT_ROUND),
T::no_repeat => (NS_STYLE_IMAGELAYER_REPEAT_NO_REPEAT,
NS_STYLE_IMAGELAYER_REPEAT_NO_REPEAT),
};
nsStyleImageLayers_Repeat {
mXRepeat: repeat_x as u8,
mYRepeat: repeat_y as u8,
}
</%self:simple_image_array_property>
<%self:simple_image_array_property name="clip" shorthand="${shorthand}" field_name="mClip">
use properties::longhands::${shorthand}_clip::single_value::computed_value::T;
match servo {
T::border_box => structs::NS_STYLE_IMAGELAYER_CLIP_BORDER as u8,
T::padding_box => structs::NS_STYLE_IMAGELAYER_CLIP_PADDING as u8,
T::content_box => structs::NS_STYLE_IMAGELAYER_CLIP_CONTENT as u8,
}
</%self:simple_image_array_property>
<%self:simple_image_array_property name="origin" shorthand="${shorthand}" field_name="mOrigin">
use properties::longhands::${shorthand}_origin::single_value::computed_value::T;
match servo {
T::border_box => structs::NS_STYLE_IMAGELAYER_ORIGIN_BORDER as u8,
T::padding_box => structs::NS_STYLE_IMAGELAYER_ORIGIN_PADDING as u8,
T::content_box => structs::NS_STYLE_IMAGELAYER_ORIGIN_CONTENT as u8,
}
</%self:simple_image_array_property>
pub fn copy_${shorthand}_position_from(&mut self, other: &Self) {
self.gecko.${image_layers_field}.mPositionXCount
= cmp::min(1, other.gecko.${image_layers_field}.mPositionXCount);
self.gecko.${image_layers_field}.mPositionYCount
= cmp::min(1, other.gecko.${image_layers_field}.mPositionYCount);
self.gecko.${image_layers_field}.mLayers.mFirstElement.mPosition =
other.gecko.${image_layers_field}.mLayers.mFirstElement.mPosition;
unsafe {
Gecko_EnsureImageLayersLength(&mut self.gecko.${image_layers_field},
other.gecko.${image_layers_field}.mLayers.len());
}
for (layer, other) in self.gecko.${image_layers_field}.mLayers.iter_mut()
.zip(other.gecko.${image_layers_field}.mLayers.iter())
.take(other.gecko.${image_layers_field}.mPositionXCount as usize) {
layer.mPosition.mXPosition
= other.mPosition.mXPosition;
}
for (layer, other) in self.gecko.${image_layers_field}.mLayers.iter_mut()
.zip(other.gecko.${image_layers_field}.mLayers.iter())
.take(other.gecko.${image_layers_field}.mPositionYCount as usize) {
layer.mPosition.mYPosition
= other.mPosition.mYPosition;
}
self.gecko.${image_layers_field}.mPositionXCount
= other.gecko.${image_layers_field}.mPositionXCount;
self.gecko.${image_layers_field}.mPositionYCount
= other.gecko.${image_layers_field}.mPositionYCount;
}
pub fn clone_${shorthand}_position(&self)
-> longhands::${shorthand}_position::computed_value::T {
use values::computed::position::Position;
longhands::background_position::computed_value::T(
self.gecko.${image_layers_field}.mLayers.iter()
.take(self.gecko.${image_layers_field}.mPositionXCount as usize)
.take(self.gecko.${image_layers_field}.mPositionYCount as usize)
.map(|position| Position {
horizontal: position.mPosition.mXPosition.into(),
vertical: position.mPosition.mYPosition.into(),
})
.collect()
)
}
pub fn set_${shorthand}_position(&mut self,
v: longhands::${shorthand}_position::computed_value::T) {
unsafe {
Gecko_EnsureImageLayersLength(&mut self.gecko.${image_layers_field}, v.0.len());
}
self.gecko.${image_layers_field}.mPositionXCount = v.0.len() as u32;
self.gecko.${image_layers_field}.mPositionYCount = v.0.len() as u32;
for (servo, geckolayer) in v.0.into_iter().zip(self.gecko.${image_layers_field}
.mLayers.iter_mut()) {
geckolayer.mPosition.mXPosition = servo.horizontal.into();
geckolayer.mPosition.mYPosition = servo.vertical.into();
}
}
<%self:simple_image_array_property name="size" shorthand="${shorthand}" field_name="mSize">
use gecko_bindings::structs::nsStyleImageLayers_Size_Dimension;
use gecko_bindings::structs::nsStyleImageLayers_Size_DimensionType;
use gecko_bindings::structs::{nsStyleCoord_CalcValue, nsStyleImageLayers_Size};
use properties::longhands::background_size::single_value::computed_value::T;
let mut width = nsStyleCoord_CalcValue::new();
let mut height = nsStyleCoord_CalcValue::new();
let (w_type, h_type) = match servo {
T::Explicit(size) => {
let mut w_type = nsStyleImageLayers_Size_DimensionType::eAuto;
let mut h_type = nsStyleImageLayers_Size_DimensionType::eAuto;
if let Some(w) = size.width.to_calc_value() {
width = w;
w_type = nsStyleImageLayers_Size_DimensionType::eLengthPercentage;
}
if let Some(h) = size.height.to_calc_value() {
height = h;
h_type = nsStyleImageLayers_Size_DimensionType::eLengthPercentage;
}
(w_type, h_type)
}
T::Cover => (nsStyleImageLayers_Size_DimensionType::eCover,
nsStyleImageLayers_Size_DimensionType::eCover),
T::Contain => (nsStyleImageLayers_Size_DimensionType::eContain,
nsStyleImageLayers_Size_DimensionType::eContain),
};
nsStyleImageLayers_Size {
mWidth: nsStyleImageLayers_Size_Dimension { _base: width },
mHeight: nsStyleImageLayers_Size_Dimension { _base: height },
mWidthType: w_type as u8,
mHeightType: h_type as u8,
}
</%self:simple_image_array_property>
pub fn clone_${shorthand}_size(&self) -> longhands::background_size::computed_value::T {
use gecko_bindings::structs::nsStyleCoord_CalcValue as CalcValue;
use gecko_bindings::structs::nsStyleImageLayers_Size_DimensionType as DimensionType;
use properties::longhands::background_size::single_value::computed_value::{ExplicitSize, T};
use values::computed::LengthOrPercentageOrAuto;
fn to_servo(value: CalcValue, ty: u8) -> LengthOrPercentageOrAuto {
if ty == DimensionType::eAuto as u8 {
LengthOrPercentageOrAuto::Auto
} else {
debug_assert!(ty == DimensionType::eLengthPercentage as u8);
LengthOrPercentageOrAuto::Calc(value.into())
}
}
longhands::background_size::computed_value::T(
self.gecko.${image_layers_field}.mLayers.iter().map(|ref layer| {
if DimensionType::eCover as u8 == layer.mSize.mWidthType {
debug_assert!(layer.mSize.mHeightType == DimensionType::eCover as u8);
return T::Cover
}
if DimensionType::eContain as u8 == layer.mSize.mWidthType {
debug_assert!(layer.mSize.mHeightType == DimensionType::eContain as u8);
return T::Contain
}
T::Explicit(ExplicitSize {
width: to_servo(layer.mSize.mWidth._base, layer.mSize.mWidthType),
height: to_servo(layer.mSize.mHeight._base, layer.mSize.mHeightType),
})
}).collect()
)
}
pub fn copy_${shorthand}_image_from(&mut self, other: &Self) {
unsafe {
Gecko_CopyImageValueFrom(&mut self.gecko.${image_layers_field}.mLayers.mFirstElement.mImage,
&other.gecko.${image_layers_field}.mLayers.mFirstElement.mImage);
}
}
pub fn set_${shorthand}_image(&mut self,
images: longhands::${shorthand}_image::computed_value::T) {
use gecko_bindings::structs::nsStyleImage;
use gecko_bindings::structs::nsStyleImageLayers_LayerType as LayerType;
use gecko_bindings::structs::{NS_STYLE_GRADIENT_SHAPE_LINEAR, NS_STYLE_GRADIENT_SIZE_FARTHEST_CORNER};
use gecko_bindings::structs::nsStyleCoord;
use values::computed::{Image, LinearGradient};
use values::specified::AngleOrCorner;
use values::specified::{HorizontalDirection, VerticalDirection};
use cssparser::Color as CSSColor;
fn set_linear_gradient(gradient: LinearGradient, geckoimage: &mut nsStyleImage) {
let stop_count = gradient.stops.len();
if stop_count >= ::std::u32::MAX as usize {
warn!("stylo: Prevented overflow due to too many gradient stops");
return;
}
let gecko_gradient = unsafe {
Gecko_CreateGradient(NS_STYLE_GRADIENT_SHAPE_LINEAR as u8,
NS_STYLE_GRADIENT_SIZE_FARTHEST_CORNER as u8,
/* repeating = */ false,
/* legacy_syntax = */ false,
stop_count as u32)
};
match gradient.angle_or_corner {
AngleOrCorner::Angle(angle) => {
unsafe {
(*gecko_gradient).mAngle.set(angle);
(*gecko_gradient).mBgPosX.set_value(CoordDataValue::None);
(*gecko_gradient).mBgPosY.set_value(CoordDataValue::None);
}
}
AngleOrCorner::Corner(horiz, vert) => {
let percent_x = match horiz {
HorizontalDirection::Left => 0.0,
HorizontalDirection::Right => 1.0,
};
let percent_y = match vert {
VerticalDirection::Top => 0.0,
VerticalDirection::Bottom => 1.0,
};
unsafe {
(*gecko_gradient).mAngle.set_value(CoordDataValue::None);
(*gecko_gradient).mBgPosX
.set_value(CoordDataValue::Percent(percent_x));
(*gecko_gradient).mBgPosY
.set_value(CoordDataValue::Percent(percent_y));
}
}
}
let mut coord: nsStyleCoord = nsStyleCoord::null();
for (index, stop) in gradient.stops.iter().enumerate() {
// NB: stops are guaranteed to be none in the gecko side by
// default.
coord.set(stop.position);
let color = match stop.color {
CSSColor::CurrentColor => {
// TODO(emilio): gecko just stores an nscolor,
// and it doesn't seem to support currentColor
// as value in a gradient.
//
// Double-check it and either remove
// currentColor for servo or see how gecko
// handles this.
0
},
CSSColor::RGBA(ref rgba) => convert_rgba_to_nscolor(rgba),
};
let mut stop = unsafe {
&mut (*gecko_gradient).mStops[index]
};
stop.mColor = color;
stop.mIsInterpolationHint = false;
stop.mLocation.copy_from(&coord);
}
unsafe {
Gecko_SetGradientImageValue(geckoimage, gecko_gradient);
}
}
unsafe {
// Prevent leaking of the last elements we did set
for image in &mut self.gecko.${image_layers_field}.mLayers {
Gecko_SetNullImageValue(&mut image.mImage)
}
// XXXManishearth clear mSourceURI for masks
Gecko_EnsureImageLayersLength(&mut self.gecko.${image_layers_field}, images.0.len());
for image in &mut self.gecko.${image_layers_field}.mLayers {
Gecko_InitializeImageLayer(image, LayerType::${shorthand.title()});
}
}
self.gecko.${image_layers_field}.mImageCount = images.0.len() as u32;
for (image, geckoimage) in images.0.into_iter().zip(self.gecko.${image_layers_field}
.mLayers.iter_mut()) {
% if shorthand == "background":
if let Some(image) = image.0 {
match image {
Image::LinearGradient(gradient) => {
set_linear_gradient(gradient, &mut geckoimage.mImage)
},
Image::Url(..) => {
// let utf8_bytes = url.as_bytes();
// Gecko_SetUrlImageValue(&mut self.gecko.mImage.mLayers.mFirstElement,
// utf8_bytes.as_ptr() as *const _,
// utf8_bytes.len());
warn!("stylo: imgRequestProxies are not threadsafe in gecko, \
background-image: url() not yet implemented");
}
}
}
% else:
use properties::longhands::mask_image::single_value::computed_value::T;
match image {
T::Image(image) => match image {
Image::LinearGradient(gradient) => {
set_linear_gradient(gradient, &mut geckoimage.mImage)
}
_ => () // we need to support image values
},
_ => () // we need to support url valeus
}
% endif
}
}
<%
fill_fields = "mRepeat mClip mOrigin mPositionX mPositionY mImage"
if shorthand == "background":
fill_fields += " mAttachment"
else:
# mSourceURI uses mImageCount
fill_fields += " mMaskMode mComposite"
%>
pub fn fill_arrays(&mut self) {
use gecko_bindings::bindings::Gecko_FillAll${shorthand.title()}Lists;
use std::cmp;
let mut max_len = 1;
% for member in fill_fields.split():
max_len = cmp::max(max_len, self.gecko.${image_layers_field}.${member}Count);
% endfor
// XXXManishearth Gecko does an optimization here where it only
// fills things in if any of the properties have been set
unsafe {
// While we could do this manually, we'd need to also manually
// run all the copy constructors, so we just delegate to gecko
Gecko_FillAll${shorthand.title()}Lists(&mut self.gecko.${image_layers_field}, max_len);
}
}
</%def>
// TODO: Gecko accepts lists in most background-related properties. We just use
// the first element (which is the common case), but at some point we want to
// add support for parsing these lists in servo and pushing to nsTArray's.
<% skip_background_longhands = """background-color background-repeat
background-image background-clip
background-origin background-attachment
background-size background-position""" %>
<%self:impl_trait style_struct_name="Background"
skip_longhands="${skip_background_longhands}"
skip_additionals="*">
<% impl_color("background_color", "mBackgroundColor", need_clone=True, complex_color=False) %>
<% impl_common_image_layer_properties("background") %>
<%self:simple_image_array_property name="attachment" shorthand="background" field_name="mAttachment">
use properties::longhands::background_attachment::single_value::computed_value::T;
match servo {
T::scroll => structs::NS_STYLE_IMAGELAYER_ATTACHMENT_SCROLL as u8,
T::fixed => structs::NS_STYLE_IMAGELAYER_ATTACHMENT_FIXED as u8,
T::local => structs::NS_STYLE_IMAGELAYER_ATTACHMENT_LOCAL as u8,
}
</%self:simple_image_array_property>
</%self:impl_trait>
<%self:impl_trait style_struct_name="List" skip_longhands="list-style-type" skip_additionals="*">
${impl_keyword_setter("list_style_type", "__LIST_STYLE_TYPE__",
data.longhands_by_name["list-style-type"].keyword)}
pub fn copy_list_style_type_from(&mut self, other: &Self) {
unsafe {
Gecko_CopyListStyleTypeFrom(&mut self.gecko, &other.gecko);
}
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Effects"
skip_longhands="box-shadow">
pub fn set_box_shadow(&mut self, v: longhands::box_shadow::computed_value::T) {
use cssparser::Color;
self.gecko.mBoxShadow.replace_with_new(v.0.len() as u32);
for (servo, gecko_shadow) in v.0.into_iter()
.zip(self.gecko.mBoxShadow.iter_mut()) {
gecko_shadow.mXOffset = servo.offset_x.0;
gecko_shadow.mYOffset = servo.offset_y.0;
gecko_shadow.mRadius = servo.blur_radius.0;
gecko_shadow.mSpread = servo.spread_radius.0;
gecko_shadow.mSpread = servo.spread_radius.0;
gecko_shadow.mInset = servo.inset;
gecko_shadow.mColor = match servo.color {
Color::RGBA(rgba) => {
gecko_shadow.mHasColor = true;
convert_rgba_to_nscolor(&rgba)
},
// TODO handle currentColor
// https://bugzilla.mozilla.org/show_bug.cgi?id=760345
Color::CurrentColor => 0,
}
}
}
pub fn copy_box_shadow_from(&mut self, other: &Self) {
self.gecko.mBoxShadow.copy_from(&other.gecko.mBoxShadow);
}
pub fn clone_box_shadow(&self) -> longhands::box_shadow::computed_value::T {
use cssparser::Color;
let buf = self.gecko.mBoxShadow.iter().map(|shadow| {
longhands::box_shadow::single_value::computed_value::T {
offset_x: Au(shadow.mXOffset),
offset_y: Au(shadow.mYOffset),
blur_radius: Au(shadow.mRadius),
spread_radius: Au(shadow.mSpread),
inset: shadow.mInset,
color: Color::RGBA(convert_nscolor_to_rgba(shadow.mColor)),
}
}).collect();
longhands::box_shadow::computed_value::T(buf)
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="InheritedTable"
skip_longhands="border-spacing">
pub fn set_border_spacing(&mut self, v: longhands::border_spacing::computed_value::T) {
self.gecko.mBorderSpacingCol = v.horizontal.0;
self.gecko.mBorderSpacingRow = v.vertical.0;
}
pub fn copy_border_spacing_from(&mut self, other: &Self) {
self.gecko.mBorderSpacingCol = other.gecko.mBorderSpacingCol;
self.gecko.mBorderSpacingRow = other.gecko.mBorderSpacingRow;
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="InheritedBox"
skip_longhands="image-rendering">
<% render_keyword = Keyword("image-rendering",
"auto optimizequality optimizespeed crispedges") %>
${impl_keyword('image_rendering', 'mImageRendering', render_keyword, need_clone=False)}
</%self:impl_trait>
<%self:impl_trait style_struct_name="InheritedText"
skip_longhands="text-align text-shadow line-height letter-spacing word-spacing">
<% text_align_keyword = Keyword("text-align", "start end left right center justify -moz-center -moz-left " +
"-moz-right match-parent") %>
${impl_keyword('text_align', 'mTextAlign', text_align_keyword, need_clone=False)}
pub fn set_text_shadow(&mut self, v: longhands::text_shadow::computed_value::T) {
use cssparser::Color;
self.gecko.mTextShadow.replace_with_new(v.0.len() as u32);
for (servo, gecko_shadow) in v.0.into_iter()
.zip(self.gecko.mTextShadow.iter_mut()) {
gecko_shadow.mXOffset = servo.offset_x.0;
gecko_shadow.mYOffset = servo.offset_y.0;
gecko_shadow.mRadius = servo.blur_radius.0;
gecko_shadow.mHasColor = false;
gecko_shadow.mColor = match servo.color {
Color::RGBA(rgba) => {
gecko_shadow.mHasColor = true;
convert_rgba_to_nscolor(&rgba)
},
// TODO handle currentColor
// https://bugzilla.mozilla.org/show_bug.cgi?id=760345
Color::CurrentColor => 0,
}
}
}
pub fn copy_text_shadow_from(&mut self, other: &Self) {
self.gecko.mTextShadow.copy_from(&other.gecko.mTextShadow);
}
pub fn clone_text_shadow(&self) -> longhands::text_shadow::computed_value::T {
use cssparser::Color;
let buf = self.gecko.mTextShadow.iter().map(|shadow| {
longhands::text_shadow::computed_value::TextShadow {
offset_x: Au(shadow.mXOffset),
offset_y: Au(shadow.mYOffset),
blur_radius: Au(shadow.mRadius),
color: Color::RGBA(convert_nscolor_to_rgba(shadow.mColor)),
}
}).collect();
longhands::text_shadow::computed_value::T(buf)
}
pub fn set_line_height(&mut self, v: longhands::line_height::computed_value::T) {
use properties::longhands::line_height::computed_value::T;
// FIXME: Align binary representations and ditch |match| for cast + static_asserts
let en = match v {
T::Normal => CoordDataValue::Normal,
T::Length(val) => CoordDataValue::Coord(val.0),
T::Number(val) => CoordDataValue::Factor(val),
T::MozBlockHeight =>
CoordDataValue::Enumerated(structs::NS_STYLE_LINE_HEIGHT_BLOCK_HEIGHT),
};
self.gecko.mLineHeight.set_value(en);
}
pub fn clone_line_height(&self) -> longhands::line_height::computed_value::T {
use properties::longhands::line_height::computed_value::T;
return match self.gecko.mLineHeight.as_value() {
CoordDataValue::Normal => T::Normal,
CoordDataValue::Coord(coord) => T::Length(Au(coord)),
CoordDataValue::Factor(n) => T::Number(n),
CoordDataValue::Enumerated(val) if val == structs::NS_STYLE_LINE_HEIGHT_BLOCK_HEIGHT =>
T::MozBlockHeight,
_ => {
debug_assert!(false);
T::MozBlockHeight
}
}
}
<%call expr="impl_coord_copy('line_height', 'mLineHeight')"></%call>
pub fn set_letter_spacing(&mut self, v: longhands::letter_spacing::computed_value::T) {
match v.0 {
Some(au) => self.gecko.mLetterSpacing.set_value(CoordDataValue::Coord(au.0)),
None => self.gecko.mLetterSpacing.set_value(CoordDataValue::Normal)
}
}
<%call expr="impl_coord_copy('letter_spacing', 'mLetterSpacing')"></%call>
pub fn set_word_spacing(&mut self, v: longhands::word_spacing::computed_value::T) {
use values::computed::LengthOrPercentage::*;
match v.0 {
Some(lop) => match lop {
Length(au) => self.gecko.mWordSpacing.set_value(CoordDataValue::Coord(au.0)),
Percentage(f) => self.gecko.mWordSpacing.set_value(CoordDataValue::Percent(f)),
Calc(l_p) => self.gecko.mWordSpacing.set_value(CoordDataValue::Calc(l_p.into())),
},
// https://drafts.csswg.org/css-text-3/#valdef-word-spacing-normal
None => self.gecko.mWordSpacing.set_value(CoordDataValue::Coord(0)),
}
}
<%call expr="impl_coord_copy('word_spacing', 'mWordSpacing')"></%call>
</%self:impl_trait>
<%self:impl_trait style_struct_name="Text"
skip_longhands="text-decoration-color text-decoration-line"
skip_additionals="*">
${impl_color("text_decoration_color", "mTextDecorationColor", need_clone=True)}
pub fn set_text_decoration_line(&mut self, v: longhands::text_decoration_line::computed_value::T) {
let mut bits: u8 = 0;
if v.underline {
bits |= structs::NS_STYLE_TEXT_DECORATION_LINE_UNDERLINE as u8;
}
if v.overline {
bits |= structs::NS_STYLE_TEXT_DECORATION_LINE_OVERLINE as u8;
}
if v.line_through {
bits |= structs::NS_STYLE_TEXT_DECORATION_LINE_LINE_THROUGH as u8;
}
self.gecko.mTextDecorationLine = bits;
}
${impl_simple_copy('text_decoration_line', 'mTextDecorationLine')}
#[inline]
pub fn has_underline(&self) -> bool {
(self.gecko.mTextDecorationLine & (structs::NS_STYLE_TEXT_DECORATION_LINE_UNDERLINE as u8)) != 0
}
#[inline]
pub fn has_overline(&self) -> bool {
(self.gecko.mTextDecorationLine & (structs::NS_STYLE_TEXT_DECORATION_LINE_OVERLINE as u8)) != 0
}
#[inline]
pub fn has_line_through(&self) -> bool {
(self.gecko.mTextDecorationLine & (structs::NS_STYLE_TEXT_DECORATION_LINE_LINE_THROUGH as u8)) != 0
}
</%self:impl_trait>
<% skip_svg_longhands = """
flood-color lighting-color stop-color
mask-mode mask-repeat mask-clip mask-origin mask-composite mask-position mask-size mask-image
clip-path
"""
%>
<%self:impl_trait style_struct_name="SVG"
skip_longhands="${skip_svg_longhands}"
skip_additionals="*">
<% impl_color("flood_color", "mFloodColor", complex_color=False) %>
<% impl_color("lighting_color", "mLightingColor", complex_color=False) %>
<% impl_color("stop_color", "mStopColor", complex_color=False) %>
<% impl_common_image_layer_properties("mask") %>
<%self:simple_image_array_property name="mode" shorthand="mask" field_name="mMaskMode">
use properties::longhands::mask_mode::single_value::computed_value::T;
match servo {
T::alpha => structs::NS_STYLE_MASK_MODE_ALPHA as u8,
T::luminance => structs::NS_STYLE_MASK_MODE_LUMINANCE as u8,
T::match_source => structs::NS_STYLE_MASK_MODE_MATCH_SOURCE as u8,
}
</%self:simple_image_array_property>
<%self:simple_image_array_property name="composite" shorthand="mask" field_name="mComposite">
use properties::longhands::mask_composite::single_value::computed_value::T;
match servo {
T::add => structs::NS_STYLE_MASK_COMPOSITE_ADD as u8,
T::subtract => structs::NS_STYLE_MASK_COMPOSITE_SUBTRACT as u8,
T::intersect => structs::NS_STYLE_MASK_COMPOSITE_INTERSECT as u8,
T::exclude => structs::NS_STYLE_MASK_COMPOSITE_EXCLUDE as u8,
}
</%self:simple_image_array_property>
pub fn set_clip_path(&mut self, v: longhands::clip_path::computed_value::T) {
use gecko_bindings::bindings::{Gecko_NewBasicShape, Gecko_DestroyClipPath};
use gecko_bindings::structs::StyleClipPathGeometryBox;
use gecko_bindings::structs::{StyleBasicShape, StyleBasicShapeType, StyleShapeSourceType};
use gecko_bindings::structs::{StyleClipPath, StyleFillRule};
use gecko::conversions::basic_shape::set_corners_from_radius;
use gecko::values::GeckoStyleCoordConvertible;
use values::computed::basic_shape::*;
let ref mut clip_path = self.gecko.mClipPath;
// clean up existing struct
unsafe { Gecko_DestroyClipPath(clip_path) };
clip_path.mType = StyleShapeSourceType::None;
match v {
ShapeSource::Url(..) => println!("stylo: clip-path: url() not yet implemented"),
ShapeSource::None => {} // don't change the type
ShapeSource::Box(reference) => {
clip_path.mReferenceBox = reference.into();
clip_path.mType = StyleShapeSourceType::Box;
}
ShapeSource::Shape(servo_shape, maybe_box) => {
clip_path.mReferenceBox = maybe_box.map(Into::into)
.unwrap_or(StyleClipPathGeometryBox::NoBox);
clip_path.mType = StyleShapeSourceType::Shape;
fn init_shape(clip_path: &mut StyleClipPath, ty: StyleBasicShapeType) -> &mut StyleBasicShape {
unsafe {
// We have to be very careful to avoid a copy here!
let ref mut union = clip_path.__bindgen_anon_1;
let mut shape: &mut *mut StyleBasicShape = union.mBasicShape.as_mut();
*shape = Gecko_NewBasicShape(ty);
&mut **shape
}
}
match servo_shape {
BasicShape::Inset(rect) => {
let mut shape = init_shape(clip_path, StyleBasicShapeType::Inset);
unsafe { shape.mCoordinates.set_len(4) };
// set_len() can't call constructors, so the coordinates
// can contain any value. set_value() attempts to free
// allocated coordinates, so we don't want to feed it
// garbage values which it may misinterpret.
// Instead, we use leaky_set_value to blindly overwrite
// the garbage data without
// attempting to clean up.
shape.mCoordinates[0].leaky_set_null();
rect.top.to_gecko_style_coord(&mut shape.mCoordinates[0]);
shape.mCoordinates[1].leaky_set_null();
rect.right.to_gecko_style_coord(&mut shape.mCoordinates[1]);
shape.mCoordinates[2].leaky_set_null();
rect.bottom.to_gecko_style_coord(&mut shape.mCoordinates[2]);
shape.mCoordinates[3].leaky_set_null();
rect.left.to_gecko_style_coord(&mut shape.mCoordinates[3]);
set_corners_from_radius(rect.round, &mut shape.mRadius);
}
BasicShape::Circle(circ) => {
let mut shape = init_shape(clip_path, StyleBasicShapeType::Circle);
unsafe { shape.mCoordinates.set_len(1) };
shape.mCoordinates[0].leaky_set_null();
circ.radius.to_gecko_style_coord(&mut shape.mCoordinates[0]);
shape.mPosition = circ.position.into();
}
BasicShape::Ellipse(el) => {
let mut shape = init_shape(clip_path, StyleBasicShapeType::Ellipse);
unsafe { shape.mCoordinates.set_len(2) };
shape.mCoordinates[0].leaky_set_null();
el.semiaxis_x.to_gecko_style_coord(&mut shape.mCoordinates[0]);
shape.mCoordinates[1].leaky_set_null();
el.semiaxis_y.to_gecko_style_coord(&mut shape.mCoordinates[1]);
shape.mPosition = el.position.into();
}
BasicShape::Polygon(poly) => {
let mut shape = init_shape(clip_path, StyleBasicShapeType::Polygon);
unsafe {
shape.mCoordinates.set_len(poly.coordinates.len() as u32 * 2);
}
for (i, coord) in poly.coordinates.iter().enumerate() {
shape.mCoordinates[2 * i].leaky_set_null();
shape.mCoordinates[2 * i + 1].leaky_set_null();
coord.0.to_gecko_style_coord(&mut shape.mCoordinates[2 * i]);
coord.1.to_gecko_style_coord(&mut shape.mCoordinates[2 * i + 1]);
}
shape.mFillRule = if poly.fill == FillRule::EvenOdd {
StyleFillRule::Evenodd
} else {
StyleFillRule::Nonzero
};
}
}
}
}
}
pub fn copy_clip_path_from(&mut self, other: &Self) {
use gecko_bindings::bindings::Gecko_CopyClipPathValueFrom;
unsafe {
Gecko_CopyClipPathValueFrom(&mut self.gecko.mClipPath, &other.gecko.mClipPath);
}
}
pub fn clone_clip_path(&self) -> longhands::clip_path::computed_value::T {
use gecko_bindings::structs::StyleShapeSourceType;
use gecko_bindings::structs::StyleClipPathGeometryBox;
use values::computed::basic_shape::*;
let ref clip_path = self.gecko.mClipPath;
match clip_path.mType {
StyleShapeSourceType::None => ShapeSource::None,
StyleShapeSourceType::Box => {
ShapeSource::Box(clip_path.mReferenceBox.into())
}
StyleShapeSourceType::URL => {
warn!("stylo: clip-path: url() not implemented yet");
Default::default()
}
StyleShapeSourceType::Shape => {
let reference = if let StyleClipPathGeometryBox::NoBox = clip_path.mReferenceBox {
None
} else {
Some(clip_path.mReferenceBox.into())
};
let union = clip_path.__bindgen_anon_1;
let shape = unsafe { &**union.mBasicShape.as_ref() };
ShapeSource::Shape(shape.into(), reference)
}
}
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Color"
skip_longhands="*">
pub fn set_color(&mut self, v: longhands::color::computed_value::T) {
let result = convert_rgba_to_nscolor(&v);
${set_gecko_property("mColor", "result")}
}
<%call expr="impl_simple_copy('color', 'mColor')"></%call>
pub fn clone_color(&self) -> longhands::color::computed_value::T {
let color = ${get_gecko_property("mColor")} as u32;
convert_nscolor_to_rgba(color)
}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Pointing"
skip_longhands="cursor">
pub fn set_cursor(&mut self, v: longhands::cursor::computed_value::T) {
use properties::longhands::cursor::computed_value::T;
use style_traits::cursor::Cursor;
self.gecko.mCursor = match v {
T::AutoCursor => structs::NS_STYLE_CURSOR_AUTO,
T::SpecifiedCursor(cursor) => match cursor {
Cursor::None => structs::NS_STYLE_CURSOR_NONE,
Cursor::Default => structs::NS_STYLE_CURSOR_DEFAULT,
Cursor::Pointer => structs::NS_STYLE_CURSOR_POINTER,
Cursor::ContextMenu => structs::NS_STYLE_CURSOR_CONTEXT_MENU,
Cursor::Help => structs::NS_STYLE_CURSOR_HELP,
Cursor::Progress => structs::NS_STYLE_CURSOR_DEFAULT, // Gecko doesn't support "progress" yet
Cursor::Wait => structs::NS_STYLE_CURSOR_WAIT,
Cursor::Cell => structs::NS_STYLE_CURSOR_CELL,
Cursor::Crosshair => structs::NS_STYLE_CURSOR_CROSSHAIR,
Cursor::Text => structs::NS_STYLE_CURSOR_TEXT,
Cursor::VerticalText => structs::NS_STYLE_CURSOR_VERTICAL_TEXT,
Cursor::Alias => structs::NS_STYLE_CURSOR_ALIAS,
Cursor::Copy => structs::NS_STYLE_CURSOR_COPY,
Cursor::Move => structs::NS_STYLE_CURSOR_MOVE,
Cursor::NoDrop => structs::NS_STYLE_CURSOR_NO_DROP,
Cursor::NotAllowed => structs::NS_STYLE_CURSOR_NOT_ALLOWED,
Cursor::Grab => structs::NS_STYLE_CURSOR_GRAB,
Cursor::Grabbing => structs::NS_STYLE_CURSOR_GRABBING,
Cursor::EResize => structs::NS_STYLE_CURSOR_E_RESIZE,
Cursor::NResize => structs::NS_STYLE_CURSOR_N_RESIZE,
Cursor::NeResize => structs::NS_STYLE_CURSOR_NE_RESIZE,
Cursor::NwResize => structs::NS_STYLE_CURSOR_NW_RESIZE,
Cursor::SResize => structs::NS_STYLE_CURSOR_S_RESIZE,
Cursor::SeResize => structs::NS_STYLE_CURSOR_SE_RESIZE,
Cursor::SwResize => structs::NS_STYLE_CURSOR_SW_RESIZE,
Cursor::WResize => structs::NS_STYLE_CURSOR_W_RESIZE,
Cursor::EwResize => structs::NS_STYLE_CURSOR_EW_RESIZE,
Cursor::NsResize => structs::NS_STYLE_CURSOR_NS_RESIZE,
Cursor::NeswResize => structs::NS_STYLE_CURSOR_NESW_RESIZE,
Cursor::NwseResize => structs::NS_STYLE_CURSOR_NWSE_RESIZE,
Cursor::ColResize => structs::NS_STYLE_CURSOR_COL_RESIZE,
Cursor::RowResize => structs::NS_STYLE_CURSOR_ROW_RESIZE,
Cursor::AllScroll => structs::NS_STYLE_CURSOR_ALL_SCROLL,
Cursor::ZoomIn => structs::NS_STYLE_CURSOR_ZOOM_IN,
Cursor::ZoomOut => structs::NS_STYLE_CURSOR_ZOOM_OUT,
}
} as u8;
}
${impl_simple_copy('cursor', 'mCursor')}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Column"
skip_longhands="column-width">
pub fn set_column_width(&mut self, v: longhands::column_width::computed_value::T) {
match v.0 {
Some(au) => self.gecko.mColumnWidth.set_value(CoordDataValue::Coord(au.0)),
None => self.gecko.mColumnWidth.set_value(CoordDataValue::Auto),
}
}
${impl_coord_copy('column_width', 'mColumnWidth')}
</%self:impl_trait>
<%self:impl_trait style_struct_name="Counters"
skip_longhands="content">
pub fn set_content(&mut self, v: longhands::content::computed_value::T) {
use properties::longhands::content::computed_value::T;
use properties::longhands::content::computed_value::ContentItem;
use gecko_bindings::structs::nsStyleContentData;
use gecko_bindings::structs::nsStyleContentType::*;
use gecko_bindings::bindings::Gecko_ClearStyleContents;
// Converts a string as utf16, and returns an owned, zero-terminated raw buffer.
fn as_utf16_and_forget(s: &str) -> *mut u16 {
use std::mem;
let mut vec = s.encode_utf16().collect::<Vec<_>>();
vec.push(0u16);
let ptr = vec.as_mut_ptr();
mem::forget(vec);
ptr
}
#[inline(always)]
#[cfg(debug_assertions)]
fn set_image_tracked(contents: &mut nsStyleContentData, val: bool) {
contents.mImageTracked = val;
}
#[inline(always)]
#[cfg(not(debug_assertions))]
fn set_image_tracked(_contents: &mut nsStyleContentData, _val: bool) {}
// Ensure destructors run, otherwise we could leak.
if !self.gecko.mContents.is_empty() {
unsafe {
Gecko_ClearStyleContents(&mut self.gecko);
}
}
match v {
T::none |
T::normal => {}, // Do nothing, already cleared.
T::Content(items) => {
// NB: set_len also reserves the appropriate space.
unsafe { self.gecko.mContents.set_len(items.len() as u32) }
for (i, item) in items.into_iter().enumerate() {
// TODO: Servo lacks support for attr(), and URIs,
// We don't support images, but need to remember to
// explicitly initialize mImageTracked in debug builds.
set_image_tracked(&mut self.gecko.mContents[i], false);
// NB: Gecko compares the mString value if type is not image
// or URI independently of whatever gets there. In the quote
// cases, they set it to null, so do the same here.
unsafe {
*self.gecko.mContents[i].mContent.mString.as_mut() = ptr::null_mut();
}
match item {
ContentItem::String(value) => {
self.gecko.mContents[i].mType = eStyleContentType_String;
unsafe {
// NB: we share allocators, so doing this is fine.
*self.gecko.mContents[i].mContent.mString.as_mut() =
as_utf16_and_forget(&value);
}
}
ContentItem::OpenQuote
=> self.gecko.mContents[i].mType = eStyleContentType_OpenQuote,
ContentItem::CloseQuote
=> self.gecko.mContents[i].mType = eStyleContentType_CloseQuote,
ContentItem::NoOpenQuote
=> self.gecko.mContents[i].mType = eStyleContentType_NoOpenQuote,
ContentItem::NoCloseQuote
=> self.gecko.mContents[i].mType = eStyleContentType_NoCloseQuote,
ContentItem::Counter(..) |
ContentItem::Counters(..)
=> self.gecko.mContents[i].mType = eStyleContentType_Uninitialized,
}
}
}
}
}
pub fn copy_content_from(&mut self, other: &Self) {
use gecko_bindings::bindings::Gecko_CopyStyleContentsFrom;
unsafe {
Gecko_CopyStyleContentsFrom(&mut self.gecko, &other.gecko)
}
}
</%self:impl_trait>
<%def name="define_ffi_struct_accessor(style_struct)">
#[no_mangle]
#[allow(non_snake_case, unused_variables)]
pub extern "C" fn Servo_GetStyle${style_struct.gecko_name}(computed_values:
ServoComputedValuesBorrowedOrNull) -> *const ${style_struct.gecko_ffi_name} {
ComputedValues::arc_from_borrowed(&computed_values).unwrap().get_${style_struct.name_lower}().get_gecko()
as *const ${style_struct.gecko_ffi_name}
}
</%def>
% for style_struct in data.style_structs:
${declare_style_struct(style_struct)}
${impl_style_struct(style_struct)}
% if not style_struct.name in data.manual_style_structs:
<%self:raw_impl_trait style_struct="${style_struct}"></%self:raw_impl_trait>
% endif
${define_ffi_struct_accessor(style_struct)}
% endfor
// To avoid UB, we store the initial values as a atomic. It would be nice to
// store them as AtomicPtr, but we can't have static AtomicPtr without const
// fns, which aren't in stable Rust.
static INITIAL_VALUES_STORAGE: AtomicUsize = ATOMIC_USIZE_INIT;
unsafe fn raw_initial_values() -> *mut ComputedValues {
INITIAL_VALUES_STORAGE.load(Ordering::Relaxed) as *mut ComputedValues
}
unsafe fn set_raw_initial_values(v: *mut ComputedValues) {
INITIAL_VALUES_STORAGE.store(v as usize, Ordering::Relaxed);
}
static CASCADE_PROPERTY: [CascadePropertyFn; ${len(data.longhands)}] = [
% for property in data.longhands:
longhands::${property.ident}::cascade_property,
% endfor
];<|fim▁end|> | debug_assert!(!raw_initial_values().is_null()); |
<|file_name|>settings.rs<|end_file_name|><|fim▁begin|>// src/settings.rs
// Copyright (C) 2017 authors and contributors (see AUTHORS file)
//
// This file is released under the MIT License.
// ===========================================================================
// Imports
// ===========================================================================
// Stdlib imports
use std::mem;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::RwLock;
// Third-party imports
// use config::*;
// Local imports
use error::{SasdErrorKind, SasdResult};
// ===========================================================================
// Config Helpers
// ===========================================================================
pub type SettingsHandle = Rc<RwLock<Settings>>;
pub fn new_settings_handle(settings: Settings) -> SettingsHandle
{
Rc::new(RwLock::new(settings))
}
// ===========================================================================
// SettingsConfig
// ===========================================================================
#[derive(Debug, Deserialize)]
pub struct WindowsConfig {
token_data_dir: String,
}
#[derive(Debug, Deserialize)]
pub struct UnixConfig {
socket_dir: String,
}
#[derive(Debug, Deserialize)]
pub struct SettingsConfig {
port: u16,
unix: Option<UnixConfig>,
windows: Option<WindowsConfig>,
}
// ===========================================================================
// SettingsBuilder
// ===========================================================================
#[derive(Debug)]
pub struct UnixBuilder {
_builder: SettingsBuilder,
// Settings
socket_dir: Option<PathBuf>,
}
impl UnixBuilder {
pub fn new(builder: SettingsBuilder) -> Self
{
UnixBuilder {
_builder: builder,
socket_dir: None,
}
}
pub fn socket_dir(mut self, dir: String) -> SasdResult<Self>
{
self.socket_dir = Some(self._builder.validate_path(dir)?);
Ok(self)
}
pub fn unix_done(self) -> SasdResult<SettingsBuilder>
{
let mut builder = self._builder;
match self.socket_dir {
Some(s) => {
let unix = UnixSection { socket_dir: s };
builder.unix = Some(unix);
Ok(builder)
}
None => {
if cfg!(unix) {
bail!(SasdErrorKind::SettingsError(
"Missing socket directory".to_owned(),
))
} else {
Ok(builder)
}
}
}
}
}
#[derive(Debug)]
pub struct WindowsBuilder {
_builder: SettingsBuilder,
token_data_dir: Option<PathBuf>,
}
impl WindowsBuilder {
fn new(builder: SettingsBuilder) -> Self
{
WindowsBuilder {
_builder: builder,
token_data_dir: None,
}
}
pub fn token_data_dir(mut self, dir: String) -> SasdResult<Self>
{
self.token_data_dir = Some(self._builder.validate_path(dir)?);
Ok(self)
}
pub fn windows_done(self) -> SasdResult<SettingsBuilder>
{
let mut builder = self._builder;
match self.token_data_dir {
Some(t) => {
let windows = WindowsSection { token_data_dir: t };
builder.windows = Some(windows);
Ok(builder)
}
None => {
if cfg!(windows) {
bail!(SasdErrorKind::SettingsError(
"Missing token data directory".to_owned(),
))
} else {
Ok(builder)
}
}
}
}
}
#[derive(Debug)]
pub struct SettingsBuilder {
port: Option<u16>,
unix: Option<UnixSection>,
windows: Option<WindowsSection>,
}
impl SettingsBuilder {
pub fn new() -> Self
{
SettingsBuilder {
port: None,
unix: None,
windows: None,
}
}
fn from_unix_config(self, config: &mut SettingsConfig) -> SasdResult<Self>
{
let unix_config = mem::replace(&mut config.unix, None);
match unix_config {
Some(c) => self.unix().socket_dir(c.socket_dir)?.unix_done(),
None => {
if cfg!(unix) {
bail!(SasdErrorKind::SettingsError(
"Missing unix configuration".to_owned(),
))
}
Ok(self)
}
}
}
fn from_windows_config(self, config: &mut SettingsConfig)
-> SasdResult<Self>
{
let windows_config = mem::replace(&mut config.windows, None);
match windows_config {
Some(c) => {
self.windows()
.token_data_dir(c.token_data_dir)?
.windows_done()
}
None => {
if cfg!(windows) {
bail!(SasdErrorKind::SettingsError(
"Missing windows configuration".to_owned(),
))
}
Ok(self)
}
}
}
pub fn from_config(mut config: SettingsConfig) -> SasdResult<Settings>
{
let builder = SettingsBuilder::new();
let builder = builder.port(config.port)?;
let builder = builder.from_unix_config(&mut config)?;
let builder = builder.from_windows_config(&mut config)?;
builder.build()
}
pub fn unix(self) -> UnixBuilder
{
UnixBuilder::new(self)
}
pub fn windows(self) -> WindowsBuilder
{
WindowsBuilder::new(self)
}
pub fn port(mut self, port: u16) -> SasdResult<Self>
{
if port < 1024 {
let errmsg =
format!("port: value must not be less than 1024, got {}", port);
bail!(SasdErrorKind::SettingsError(errmsg))
}
self.port = Some(port);
Ok(self)
}
#[cfg(unix)]
pub fn build(self) -> SasdResult<Settings>
{
if self.unix.is_none() {
bail!(SasdErrorKind::SettingsError(
"Missing unix configuration".to_owned(),
))
}
// Must have port configured
let ret = match self.port {
Some(p) => {
Settings {
port: p,
unix: self.unix.unwrap(),
windows: self.windows,
}
}
None => {
bail!(SasdErrorKind::SettingsError(
"Missing config value: port".to_owned(),
))
}
};
Ok(ret)
}
#[cfg(windows)]
pub fn build(self) -> SasdResult<Settings>
{
if self.windows.is_none() {
bail!(SasdErrorKind::SettingsError(
"Missing windows configuration".to_owned(),
))
}
// Must have port configured
let ret = match self.port {
Some(p) => {
Settings {
port: p,
unix: self.unix,
windows: self.windows.unwrap(),
}
}
None => {
bail!(SasdErrorKind::SettingsError(
"Missing config value: port".to_owned(),
))
}
};
Ok(ret)
}
fn validate_path(&self, path: String) -> SasdResult<PathBuf>
{
let p = PathBuf::from(path);
let errmsg = if !p.exists() {
Some(format!("path does not exist: {}", p.display()))
} else if !p.is_dir() {
Some(format!("path is not a directory: {}", p.display()))
} else {
None
};
match errmsg {
Some(msg) => bail!(SasdErrorKind::SettingsError(msg)),
None => Ok(p),
}
}
}
// ===========================================================================
// Settings
// ===========================================================================
#[derive(Debug)]
pub struct WindowsSection {
pub token_data_dir: PathBuf,
}
#[derive(Debug)]
pub struct UnixSection {
pub socket_dir: PathBuf,
}
#[cfg(unix)]
#[derive(Debug)]
pub struct Settings {
pub port: u16,
unix: UnixSection,
windows: Option<WindowsSection>,
}
#[cfg(windows)]
#[derive(Debug)]
pub struct Settings {
pub port: u16,
unix: Option<UnixSection>,
windows: WindowsSection,
}
impl Settings {
#[cfg(unix)]
pub fn unix(&self) -> &UnixSection
{
&self.unix
}
#[cfg(windows)]
pub fn unix(&self) -> Option<&UnixSection>
{
match self.unix {
Some(ref u) => Some(u),
None => None,<|fim▁hole|> }
}
#[cfg(unix)]
pub fn windows(&self) -> Option<&WindowsSection>
{
match self.windows {
Some(ref w) => Some(w),
None => None,
}
}
#[cfg(windows)]
pub fn windows(&self) -> &WindowsSection
{
&self.windows
}
}
// ===========================================================================
// Scratch
// ===========================================================================
#[cfg(test)]
pub mod test {
// Helpers
pub mod helper {
use super::super::{Settings, UnixSection, WindowsSection};
#[cfg(unix)]
pub fn new_settings(port: u16, unix: UnixSection, windows: Option<WindowsSection>)
-> Settings
{
Settings {
port: port,
unix: unix,
windows: windows,
}
}
#[cfg(windows)]
pub fn new_settings(port: u16, unix: Option<UnixSection>, windows: WindowsSection)
-> Settings
{
Settings {
port: port,
unix: unix,
windows: windows,
}
}
}
// See also: test::settings::unixbuilder
mod unixbuilder {
#[cfg(unix)]
mod new {
use settings::{SettingsBuilder, UnixBuilder};
// Default value of socket_dir is None
#[test]
fn socket_dir_default_value()
{
// ----------------------------
// WHEN
// UnixBuilder is instantiated
// ----------------------------
let settings = SettingsBuilder::new();
let builder = UnixBuilder::new(settings);
// --------------------------------------------
// THEN
// the private socket_dir field is set to None
// --------------------------------------------
assert!(builder.socket_dir.is_none())
}
}
mod socket_dir {}
}
// See also: test::settings::windowsbuilder
mod windowsbuilder {
#[cfg(windows)]
mod new {
use settings::{SettingsBuilder, WindowsBuilder};
// Default value of socket_dir is None
#[test]
fn token_data_dir_default_value()
{
// ----------------------------
// WHEN
// WindowsBuilder is instantiated
// ----------------------------
let settings = SettingsBuilder::new();
let builder = WindowsBuilder::new(settings);
// --------------------------------------------
// THEN
// the private token_data_dir field is set to None
// --------------------------------------------
assert!(builder.token_data_dir.is_none())
}
}
}
mod settingsbuilder {
// TODO
// this is an integration test
#[cfg(windows)]
mod from_config {
use config::*;
use settings::{SettingsBuilder, SettingsConfig};
use std::fs::remove_dir_all;
use std::path::Path;
use tempdir::TempDir;
#[test]
fn build_settings()
{
let path = Path::new("files/test.toml");
let mut config = Config::new();
config.merge(File::from(path)).unwrap();
// Change token_data_dir to a tempdir
let mut s: SettingsConfig = config.try_into().unwrap();
let tempdir = TempDir::new("sasd").unwrap();
let dirpath = tempdir.into_path();
let dirpath_str =
dirpath.clone().into_os_string().into_string().unwrap();
if let Some(ref mut w) = s.windows {
w.token_data_dir = dirpath_str.clone();
}
let settings = SettingsBuilder::from_config(s).unwrap();
assert_eq!(
settings.windows().token_data_dir.display().to_string(),
dirpath_str
);
// Delete the temp dir
remove_dir_all(dirpath).unwrap();
}
}
}
}
// ===========================================================================
//
// ===========================================================================<|fim▁end|> | |
<|file_name|>collapseDuplicateDeclarations.js<|end_file_name|><|fim▁begin|>export default function collapseDuplicateDeclarations() {
return (root) => {
root.walkRules((node) => {
let seen = new Map()
let droppable = new Set([])
node.walkDecls((decl) => {
// This could happen if we have nested selectors. In that case the
// parent will loop over all its declarations but also the declarations
// of nested rules. With this we ensure that we are shallowly checking
// declarations.
if (decl.parent !== node) {
return
}
if (seen.has(decl.prop)) {
droppable.add(seen.get(decl.prop))
}<|fim▁hole|> seen.set(decl.prop, decl)
})
for (let decl of droppable) {
decl.remove()
}
})
}
}<|fim▁end|> | |
<|file_name|>mod_resolver.rs<|end_file_name|><|fim▁begin|>use std::io;
use std::path::PathBuf;
use super::read_config;
use crate::{FileName, Input, Session};
fn verify_mod_resolution(input_file_name: &str, exp_misformatted_files: &[&str]) {
let input_file = PathBuf::from(input_file_name);
let config = read_config(&input_file);
let mut session = Session::<io::Stdout>::new(config, None);
let report = session
.format(Input::File(input_file_name.into()))
.expect("Should not have had any execution errors");
let errors_by_file = &report.internal.borrow().0;
for exp_file in exp_misformatted_files {
assert!(errors_by_file.contains_key(&FileName::Real(PathBuf::from(exp_file))));
}
}
#[test]
fn nested_out_of_line_mods_loaded() {
// See also https://github.com/rust-lang/rustfmt/issues/4874
verify_mod_resolution(
"tests/mod-resolver/issue-4874/main.rs",
&[
"tests/mod-resolver/issue-4874/bar/baz.rs",<|fim▁hole|> "tests/mod-resolver/issue-4874/foo/qux.rs",
],
);
}
#[test]
fn out_of_line_nested_inline_within_out_of_line() {
// See also https://github.com/rust-lang/rustfmt/issues/5063
verify_mod_resolution(
"tests/mod-resolver/issue-5063/main.rs",
&[
"tests/mod-resolver/issue-5063/foo/bar/baz.rs",
"tests/mod-resolver/issue-5063/foo.rs",
],
);
}<|fim▁end|> | |
<|file_name|>economy.py<|end_file_name|><|fim▁begin|>from canvas.exceptions import ServiceError, ValidationError
from canvas.economy import InvalidPurchase
from drawquest import knobs
from drawquest.apps.palettes.models import get_palette_by_name, all_palettes
from drawquest.signals import balance_changed
def balance(user):
return int(user.kv.stickers.currency.get() or 0)
def _adjust_balance(user, amount):
if amount >= 0:
user.kv.stickers.currency.increment(amount)
else:
result = user.kv.stickers.currency.increment_ifsufficient(amount)
if not result['success']:
raise InvalidPurchase("Insufficient balance.")
balance_changed.send(None, user=user)
publish_balance(user)
def publish_balance(user):
user.redis.coin_channel.publish({'balance': balance(user)})
def credit(user, amount):
_adjust_balance(user, amount)
<|fim▁hole|>def debit(user, amount):
_adjust_balance(user, -amount)
def credit_first_quest(user):
credit(user, knobs.REWARDS['first_quest'])
def credit_quest_of_the_day_completion(user):
credit(user, knobs.REWARDS['quest_of_the_day'])
def credit_archived_quest_completion(user):
credit(user, knobs.REWARDS['archived_quest'])
def credit_personal_share(user):
credit(user, knobs.REWARDS['personal_share'])
def credit_streak(user, streak):
credit(user, knobs.REWARDS['streak_{}'.format(streak)])
def credit_star(user):
user.kv.stickers_received.increment(1)
credit(user, knobs.REWARDS['star'])
def purchase_palette(user, palette):
if isinstance(palette, basestring):
palette = get_palette_by_name(palette_name)
if palette in user.redis.palettes:
raise InvalidPurchase("You've already bought this palette.")
debit(user, palette.cost)
user.redis.palettes.unlock(palette)<|fim▁end|> | |
<|file_name|>consistencygroups.py<|end_file_name|><|fim▁begin|># Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The consistencygroups api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import consistencygroups as consistencygroup_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
def make_consistencygroup(elem):
elem.set('id')
elem.set('status')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
def make_consistencygroup_from_src(elem):
elem.set('id')
elem.set('status')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('cgsnapshot_id')
class ConsistencyGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup',
selector='consistencygroup')
make_consistencygroup(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroups')
elem = xmlutil.SubTemplateElement(root, 'consistencygroup',
selector='consistencygroups')
make_consistencygroup(elem)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupFromSrcTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup-from-src',
selector='consistencygroup-from-src')
make_consistencygroup_from_src(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
<|fim▁hole|> dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
return {'body': {'consistencygroup': consistencygroup}}
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node,
'consistencygroup')
attributes = ['name',
'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = consistencygroup_node.\
getAttribute(attr)
return consistencygroup
class CreateFromSrcDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
retval = {'body': {'consistencygroup-from-src': consistencygroup}}
return retval
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node, 'consistencygroup-from-src')
attributes = ['cgsnapshot', 'name', 'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = (
consistencygroup_node.getAttribute(attr))
return consistencygroup
class ConsistencyGroupsController(wsgi.Controller):
"""The ConsistencyGroups API controller for the OpenStack API."""
_view_builder_class = consistencygroup_views.ViewBuilder
def __init__(self):
self.consistencygroup_api = consistencygroupAPI.API()
super(ConsistencyGroupsController, self).__init__()
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def show(self, req, id):
"""Return data about the given consistency group."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
consistencygroup = self.consistencygroup_api.get(
context,
group_id=id)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, consistencygroup)
def delete(self, req, id, body):
"""Delete a consistency group."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
force = False
if body:
cg_body = body['consistencygroup']
force = cg_body.get('force', False)
LOG.info(_LI('Delete consistency group with id: %s'), id,
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.delete(context, group, force)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def index(self, req):
"""Returns a summary list of consistency groups."""
return self._get_consistencygroups(req, is_detail=False)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def detail(self, req):
"""Returns a detailed list of consistency groups."""
return self._get_consistencygroups(req, is_detail=True)
def _get_consistencygroups(self, req, is_detail):
"""Returns a list of consistency groups through view builder."""
context = req.environ['cinder.context']
consistencygroups = self.consistencygroup_api.get_all(context)
limited_list = common.limited(consistencygroups, req)
if is_detail:
consistencygroups = self._view_builder.detail_list(req,
limited_list)
else:
consistencygroups = self._view_builder.summary_list(req,
limited_list)
return consistencygroups
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new consistency group."""
LOG.debug('Creating new consistency group %s', body)
if not self.is_valid_body(body, 'consistencygroup'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
volume_types = consistencygroup.get('volume_types', None)
if not volume_types:
msg = _("volume_types must be provided to create "
"consistency group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = consistencygroup.get('availability_zone', None)
LOG.info(_LI("Creating consistency group %(name)s."),
{'name': name},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create(
context, name, description, volume_types,
availability_zone=availability_zone)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeType as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupFromSrcTemplate)
@wsgi.deserializers(xml=CreateFromSrcDeserializer)
def create_from_src(self, req, body):
"""Create a new consistency group from a source.
The source can be a snapshot. It could be extended
in the future to support other sources. Note that
this does not require volume_types as the "create"
API above.
"""
LOG.debug('Creating new consistency group %s.', body)
if not self.is_valid_body(body, 'consistencygroup-from-src'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup-from-src']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None)
if not cgsnapshot_id:
msg = _("Cgsnapshot id must be provided to create "
"consistency group %(name)s from source.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Creating consistency group %(name)s from cgsnapshot "
"%(snap)s."),
{'name': name, 'snap': cgsnapshot_id},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create_from_src(
context, name, description, cgsnapshot_id)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.CinderException as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def update(self, req, id, body):
"""Update the consistency group.
Expected format of the input parameter 'body':
{
"consistencygroup":
{
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2,..."
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for consistency group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
if not self.is_valid_body(body, 'consistencygroup'):
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['cinder.context']
consistencygroup = body.get('consistencygroup', None)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
add_volumes = consistencygroup.get('add_volumes', None)
remove_volumes = consistencygroup.get('remove_volumes', None)
if (not name and not description and not add_volumes
and not remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes},
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.update(
context, group, name, description,
add_volumes, remove_volumes)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
class Consistencygroups(extensions.ExtensionDescriptor):
"""consistency groups support."""
name = 'Consistencygroups'
alias = 'consistencygroups'
namespace = 'http://docs.openstack.org/volume/ext/consistencygroups/api/v1'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Consistencygroups.alias, ConsistencyGroupsController(),
collection_actions={'detail': 'GET', 'create_from_src': 'POST'},
member_actions={'delete': 'POST', 'update': 'PUT'})
resources.append(res)
return resources<|fim▁end|> | class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string): |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
Util classes
------------
Classes which represent data types useful for the package pySpatialTools.
<|fim▁hole|>
## Spatial elements collectors
from spatialelements import SpatialElementsCollection, Locations
## Membership relations
from Membership import Membership<|fim▁end|> | """ |
<|file_name|>tailhook-config.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from libqtile.manager import Key, Click, Drag, Screen, Group
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile import xcbq
xcbq.keysyms["XF86AudioRaiseVolume"] = 0x1008ff13
xcbq.keysyms["XF86AudioLowerVolume"] = 0x1008ff11
xcbq.keysyms["XF86AudioMute"] = 0x1008ff12
def window_sorter(win):
patterns = (
('Яндекс.Почта', 'E-mail'),
('Gmail', 'E-mail'),
('SquirrelMail', 'E-mail'),
('zeromq', 'Docs'),
('PyYAML', 'Docs'),<|fim▁hole|> ('Guide', 'Docs'),
)
for k, v in patterns:
if k in win.name:
return v
mod = "mod4"
keys = [
Key([mod], "j",
lazy.layout.down()),
Key([mod], "k",
lazy.layout.up()),
Key([mod, "shift"], "j",
lazy.layout.move_down()),
Key([mod, "shift"], "k",
lazy.layout.move_up()),
Key([mod, "control"], "j",
lazy.layout.section_down()),
Key([mod, "control"], "k",
lazy.layout.section_up()),
Key([mod], "h",
lazy.layout.collapse_branch()), # for tree layout
Key([mod], "l",
lazy.layout.expand_branch()), # for tree layout
Key([mod], "r",
lazy.layout.sort_windows(window_sorter)), # for tree layout
Key([mod, "shift"], "h",
lazy.layout.move_left()),
Key([mod, "shift"], "l",
lazy.layout.move_right()),
Key([mod, "control"], "l",
lazy.layout.increase_ratio()),
Key([mod, "control"], "h",
lazy.layout.decrease_ratio()),
Key([mod], "comma",
lazy.layout.increase_nmaster()),
Key([mod], "period",
lazy.layout.decrease_nmaster()),
Key([mod], "Tab",
lazy.group.next_window()),
Key([mod, "shift"], "Tab",
lazy.group.prev_window()),
Key([mod, "shift"], "Return",
lazy.layout.rotate()),
Key([mod, "shift"], "space",
lazy.layout.toggle_split()),
Key([mod], "w",
lazy.to_screen(0)),
Key([mod], "e",
lazy.to_screen(1)),
Key([mod], "space",
lazy.nextlayout()),
Key([mod], "c",
lazy.window.kill()),
Key([mod], "t",
lazy.window.disable_floating()),
Key([mod, "shift"], "t",
lazy.window.enable_floating()),
Key([mod], "p",
lazy.spawn("exec dmenu_run "
"-fn 'Consolas:size=13' -nb '#000000' -nf '#ffffff' -b")),
Key([mod], "b",
lazy.spawn("~/note/conf/uzbl/open_history")),
Key([mod, "shift"], "b",
lazy.spawn("~/note/conf/uzbl/open_bookmark")),
Key([mod], "s",
lazy.spawn("~/note/conf/uzbl/open_ddg")),
Key([mod, "shift"], "s",
lazy.spawn("~/note/conf/uzbl/open_goog")),
Key([mod], "q",
lazy.spawn('xtrlock')),
Key([mod], "y",
lazy.spawn('xclip -o -selection primary | xclip -selection clipboard')),
Key([mod], "u",
lazy.spawn('xclip -o -selection clipboard | xclip -selection primary')),
Key([], "XF86AudioRaiseVolume",
lazy.spawn("amixer sset Master 5%+")),
Key([], "XF86AudioLowerVolume",
lazy.spawn("amixer sset Master 5%-")),
Key([], "XF86AudioMute",
lazy.spawn("amixer sset Master toggle")),
Key(["shift"], "XF86AudioRaiseVolume",
lazy.spawn("mpc volume +5")),
Key(["shift"], "XF86AudioLowerVolume",
lazy.spawn("mpc volume -5")),
Key(["shift"], "XF86AudioMute",
lazy.spawn("mpc toggle")),
Key([mod], "Left",
lazy.prevgroup()),
Key([mod], "Right",
lazy.nextgroup()),
]
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
border = dict(
border_normal='#808080',
border_width=2,
)
layouts = [
layout.Tile(**border),
layout.Max(),
layout.Stack(**border),
layout.TreeTab(sections=['Surfing', 'E-mail', 'Docs', 'Incognito']),
layout.Slice('left', 320, wmclass='pino',
fallback=layout.Slice('right', 320, role='roster',
fallback=layout.Stack(1, **border))),
layout.Slice('left', 192, role='gimp-toolbox',
fallback=layout.Slice('right', 256, role='gimp-dock',
fallback=layout.Stack(1, **border))),
]
floating_layout = layout.Floating(**border)
groups = [
Group('1'),
Group('2', layout='max'),
Group('3'),
Group('4', layout='treetab'),
Group('5'),
Group('6'),
Group('7'),
Group('8'),
Group('9'),
]
for i in groups:
keys.append(
Key([mod], i.name, lazy.group[i.name].toscreen())
)
keys.append(
Key([mod, "shift"], i.name, lazy.window.togroup(i.name))
)
screens = [
Screen(
top = bar.Bar(
[
widget.GroupBox(borderwidth=2,
font='Consolas',fontsize=18,
padding=1, margin_x=1, margin_y=1),
widget.Sep(),
widget.WindowName(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.Battery(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.CPUGraph(),
widget.MemoryGraph(),
widget.SwapGraph(foreground='C02020'),
widget.Sep(),
widget.Systray(),
widget.Sep(),
widget.Clock('%H:%M:%S %d.%m.%Y',
font='Consolas', fontsize=18, padding=6),
],
24,
),
),
]
@hook.subscribe.client_new
def dialogs(window):
if(window.window.get_wm_type() == 'dialog'
or window.window.get_wm_transient_for()):
window.floating = True<|fim▁end|> | ('documentation', 'Docs'),
('-ietf-', 'Docs'),
('GNOME Live!', 'Docs'), |
<|file_name|>animationToggle.js<|end_file_name|><|fim▁begin|>/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { useCallback } from '@googleforcreators/react';
import { __ } from '@googleforcreators/i18n';
import { trackEvent } from '@googleforcreators/tracking';
import { Icons } from '@googleforcreators/design-system';
import { STORY_ANIMATION_STATE } from '@googleforcreators/animation';
/**
* Internal dependencies
*/
import { useStory } from '../../../app';
import PageMenuButton from './pageMenuButton';
function AnimationToggle() {
const { animationState, updateAnimationState } = useStory(
({ state: { animationState }, actions: { updateAnimationState } }) => {
return {
animationState,
updateAnimationState,
};
}
);
const isPlaying = [
STORY_ANIMATION_STATE.PLAYING,
STORY_ANIMATION_STATE.PLAYING_SELECTED,
].includes(animationState);
const tooltip = isPlaying
? __('Stop', 'web-stories')
: __('Play', 'web-stories');
const label = isPlaying
? __('Stop Page Animations', 'web-stories')
: __('Play Page Animations', 'web-stories');
const shortcut = 'mod+k';
const Icon = isPlaying ? Icons.StopOutline : Icons.PlayOutline;
const toggleAnimationState = useCallback(() => {
updateAnimationState({
animationState: isPlaying
? STORY_ANIMATION_STATE.RESET
: STORY_ANIMATION_STATE.PLAYING,
});
trackEvent('canvas_play_animations', {
status: isPlaying ? 'stop' : 'play',<|fim▁hole|> });
}, [isPlaying, updateAnimationState]);
return (
<PageMenuButton
title={tooltip}
shortcut={shortcut}
onClick={toggleAnimationState}
aria-label={label}
>
<Icon />
</PageMenuButton>
);
}
export default AnimationToggle;<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.views.generic import TemplateView
#from apiclient.discovery import build
from googleapiclient.discovery import build
from .utils import SearchResults
from . import *
class SearchView(TemplateView):
template_name = "googlesearch/search_results.html"
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
service = build("customsearch", GOOGLE_SEARCH_API_VERSION,
developerKey=GOOGLE_SEARCH_API_KEY)
#add a "try" block to see if googleapiclient throws a 400 error
try:
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=self.page_to_index(),
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
results = SearchResults(results)
pages = self.calculate_pages()
#if googleapiclient raises an error, we need to catch it here
except:
#run the search again starting with a defined page 1 instead of the "user" defined
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=1,
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
#set some default values used for the context below
page = 1
# previous, current, next pages
pages = [0, 1, 2]
results = SearchResults(results)
""" Set some defaults """
context.update({
'items': [],
'total_results': 0,
'current_page': 0,
'prev_page': 0,
'next_page': 0,
'search_terms': self.request.GET.get('q', ''),
'error': results
})
""" Now parse the results and send back some
useful data """
context.update({
'items': results.items,
'total_results': results.total_results,
'current_page': pages[1],
'prev_page': pages[0],
'next_page': pages[2],
'search_terms': results.search_terms,
})<|fim▁hole|>
return context
def calculate_pages(self):
""" Returns a tuple consisting of
the previous page, the current page,
and the next page """
current_page = int(self.request.GET.get('p', 1))
return (current_page - 1, current_page, current_page + 1)
def page_to_index(self, page=None):
""" Converts a page to the start index """
if page is None:
page = self.request.GET.get('p', 1)
return int(page) * int(GOOGLE_SEARCH_RESULTS_PER_PAGE) + 1 - int(GOOGLE_SEARCH_RESULTS_PER_PAGE)<|fim▁end|> | |
<|file_name|>local_data_table.js<|end_file_name|><|fim▁begin|>import $ from "jquery";
import _ from "underscore";
import "bootstrap";
import "jquery-deparam";
import "./configure_datatables";
import View from "../view";
import Cache from "../cache";
import ColumnManager from "./column_manager";
import SelectionManager from "./selection_manager";
import LockManager from "./lock_manager";
import FilterView from "./filter_view";
import { extractColumnCSSClass, toColumnCSSClass } from "../utils/css";
import cidMap from "./cid_map";
import Config from "./config";
class LocalDataTable extends View {
constructor(options) {
super(options);
this.options = options || {};
// copy over certain properties from options to the table itself
_.extend(this, _.pick(this.options, ["selectedIds", "paginate"]));
_.bindAll(this, "_onRowCreated", "_onBulkHeaderClick", "_onBulkRowClick", "_bulkCheckboxAdjust", "_onDraw",
"_onColumnVisibilityChange", "_onColumnReorder");
this._initKeyWithValueOrDefault('urlPagination', this.options.urlPagination, true);
this.urlPagination = this.urlPagination && this.paginate;
this.cache = new Cache();
this.selectionManager = new SelectionManager();
this.rowClass = this.options.rowClass || this._resolveRowClass();
this.config = this.options.config || (this._configFromPlugin && this._configFromPlugin()) || new Config();
this._applyDefaults();
this._columnManager = new ColumnManager(this);
this._lockManager = new LockManager(this);
this.listenTo(this.collection, "add", this._onAdd);
this.listenTo(this.collection, "remove", this._onRemove);
this.listenTo(this.collection, "reset", this._onReset);
}
_initKeyWithValueOrDefault(key, value, defaultValue) {
if (this[key] === undefined) {
if (value !== undefined) {
this[key] = value;
} else {
this[key] = defaultValue;
}
}
}
availableColumnTypes() {
return this.config.columnTypes;
}
// apply filtering
filter(...args) {
this._lockManager.ensureUnlocked("filter");
this.dataTable.fnFilter(...args);
}
// change pagination
page(...args) {
this._lockManager.ensureUnlocked("page");
return this.dataTable.fnPageChange(...args);
}
// sort specific columns
sort(...args) {
this._lockManager.ensureUnlocked("sort");
return this.dataTable.fnSort(...args);
}
selectedModels() {
this._lockManager.ensureUnlocked("bulk");
return this.selectionManager.models();
}
render() {
this.$el.html(this.template);
this._dataTableCreate();
this._initBulkHandling();
this._enableRowHighlight();
this.paginate && this._initPaginationHandling();
this._triggerChangeSelection();
this.urlPagination && this._setupPaginationHistory();
this.trigger("render");
this._afterRender();
return this;
}
renderColumn(id) {
const config = this._columnManager.columnConfigForId(id);
if (!config) {
throw new Error("column not found");
}
this.cache.each(row => {
row.renderColumnByConfig(config);
});
}
selectAllVisible(state) {
this._lockManager.ensureUnlocked("bulk");
this.bulkCheckbox.prop("checked", state);
_.each(this._visibleRowsOnCurrentPage(), function(row) {
this._setRowSelectedState(row.model, row, state);
}, this);
this._triggerChangeSelection({ selectAllVisible: state });
}
selectAllMatching() {
this._lockManager.ensureUnlocked("bulk");
if (!this.paginate) throw new Error("#selectAllMatching can only be used with paginated tables");
_.each(this._allMatchingModels(), function(model) {
this._setRowSelectedState(model, this.cache.get(model), true);
}, this);
this._triggerChangeSelection();
}
matchingCount() {
this._lockManager.ensureUnlocked("bulk");
return this.dataTable.fnSettings().aiDisplay.length;
}
totalRecordsCount() {
this._lockManager.ensureUnlocked("bulk");
return this.dataTable.fnSettings().fnRecordsTotal();
}
pageLimit() {
return this.dataTable.fnSettings()._iDisplayLength;
}
columnRequired(state, id) {
if (!state && this._columnManager.columnConfigForId(id).required) {
throw new Error("can not disable visibility when column is required");
}
}
columnVisibility(attr, state) {
if (arguments.length === 1) {
// getter
return this._columnManager.visibility.get(attr);
} else {
this.columnRequired(state, attr);
this._columnManager.visibility.set(attr, state);
state && this.renderColumn(attr);
}
}
// takes a hash of { columnAttr: columnState, ... }
setColumnVisibilities(columns) {
_.each(columns, this.columnRequired, this);
this._columnManager.visibility.set(columns);
_.each(columns, function(state, attr) {
state && this.renderColumn(attr);
}, this);
}
restoreColumnVisibility() {
_.each(this.columnsConfig(), function(column) {
if (column.id) {
this.columnVisibility(column.id, column.visible);
}
}, this);
}
columnOrder(order) {
if (this._reorderableColumnsEnabled()) {
this._changeColumnOrder(order);
}
}
restoreColumnOrder() {
if (this._reorderableColumnsEnabled()) {
this._changeColumnOrder({ reset: true });
}
}
changeSorting(sorting) {
this._columnManager.changeSorting(sorting);
if (this.dataTable) {
const normalizeSortingColumn = sort => _.first(sort, 2);
sorting = _.map(this._columnManager.dataTableSortingConfig(), normalizeSortingColumn);
let currentSorting = _.map(this.dataTable.fnSettings().aaSorting, normalizeSortingColumn);
if (!_.isEqual(currentSorting, sorting)) {
this.dataTable.fnSort(sorting);
}
}
}
lock(name, state) {
if (arguments.length === 1) {
// getter
return this._lockManager.lock(name);
} else if (arguments.length === 2) {
// setter
this._lockManager.lock(name, state);
} else {
throw new Error("#lock requires a name and/or a state");
}
}
columnsConfig() {
return this._columnManager.columnsConfig();
}
configGenerator() {
return this._columnManager._configGenerator;
}
disableFilters(errorMessage) {
const columns = this.columnsConfig();
for (const c in columns) {
if (!columns[c].filter) continue;
this.child(`filter-${columns[c].id}`).disableFilter(errorMessage);
}
}
enableFilters() {
const columns = this.columnsConfig();
for (const c in columns) {
if (!columns[c].filter) continue;
this.child(`filter-${columns[c].id}`).enableFilter();
}
}
updateAjaxSource() {
// get ajax url
const ajaxURL = this.dataTable.fnSettings().sAjaxSource;
// get the endpoint of ajax url
const splitUrl = ajaxURL.split("?");
const endpoint = splitUrl[0];
// Early exit if no params
if (!splitUrl[1]) {
return;
}
// get parameters of ajax url
const params = $.deparam(splitUrl[1]);
// make ext_filter_json param the same as the current url, now with new filters
params.ext_filter_json = JSON.stringify(this.configGenerator()._getUrlFilterParams());
// Build new url with old endpoint but new params
const newURL = `${endpoint}?${$.param(params)}`;
// Update datatable ajax source
this.dataTable.fnSettings().sAjaxSource = newURL;
// trigger "filter:column" event
this._onColumnFilter();
}
columnElements(selector) {
const selectorString = selector || "";
return this.$("table").find(`thead tr th${selectorString}`);
}
// Private APIs
_enableReorderableColumns() {
const self = this;
self._colReorder = new $.fn.dataTable.ColReorder(this.dataTable, {
fnReorderCallback(fromIndex, toIndex) {
// notify that columns have been externally rearranged
self._columnManager.columnsSwapped(fromIndex, toIndex);
// pass event up
self._onColumnReorder();
},
bAddFixed: false,
bResizeTableWrapper: false,
allowHeaderDoubleClick: false,
allowResize: self.resizableColumns,
// iFixedColumns configures how many columns should be unmovable starting from left
// if the first column is the bulk column we make it unmovable
iFixedColumns: this.$el.find(this.BULK_COLUMN_HEADER_CHECKBOX_SELECTOR).length
});
}
_renderGrandTotalsRow() {
let hasGrandTotalsCell = false;
// if dataTable is available AND we have a totals row
if (this.dataTable && this.totalsRow) {
// If we don't have a footer rendered, render it
if (this.dataTable.find("tfoot").length < 1) {
this.dataTable.append("<tfoot><tr></tr></tfoot>");
}
// Clear the footer
const $grandTotalsRow = this.dataTable.find("tfoot tr");
$grandTotalsRow.html("");
// Iterate over the current columns config
this.columnsConfig().forEach(col => {
if (this.columnVisibility(col.id) || col.bulk) {
const node = $("<td>");
// If column is a non totals column, draw "Grand totals" on the first one and the rest are empty
if (this.isNontotalsColumn && this.isNontotalsColumn(col)) {
if (hasGrandTotalsCell) {
$grandTotalsRow.append(node);
} else {
hasGrandTotalsCell = true;
col.grandTotalRenderer ? col.grandTotalRenderer.apply(this.totalsRow, [node, col]) : node.text("Grand Total");
$grandTotalsRow.append(node);
}
} else {
(col.grandTotalRenderer || col.renderer).apply(this.totalsRow, [node, col]);
$grandTotalsRow.append(node);
}
}
});
}
}
_renderHeaderGroup() {
if (!_.isEmpty(this.rowClass.prototype.columnGroupDefinitions)) {
const columnGroups = this.rowClass.prototype.columnGroupDefinitions;
let tr = this.$("table").find('thead tr.header-groups-row');
if (tr.length === 0) {
tr = $('<tr class="header-groups-row">');
} else {
tr.empty();
}
const uniqueHeaderGroupDataIndex = {};
_.each(this._columnManager._visibilitySummary().visible, col => {
const columnConfig = _.findWhere(this._columnManager.columnsConfig(), { attr: col });
let headerGroupDataIndex = columnConfig.headerGroupDataIndex;
let columnGroupConfig = _.findWhere(columnGroups, { "headerGroupDataIndex": headerGroupDataIndex });
if (!columnGroupConfig || !headerGroupDataIndex) {
console.log(`Unable to find a matching headerGroupDataIndex for ${columnConfig.attr}`);
columnGroupConfig = { colspan: 1, headerName: '' };
headerGroupDataIndex = columnConfig.title;
}
if (columnGroupConfig && !uniqueHeaderGroupDataIndex[headerGroupDataIndex]) {
uniqueHeaderGroupDataIndex[headerGroupDataIndex] = true;
tr.append(`<th colspan="${columnGroupConfig.colspan}" class="header-groups">${columnGroupConfig.headerName}</th>`);
}
});
this.$("table").find('thead').prepend(tr);
}
}
// Changes or resets the column order.
// When called with no args, returns the current order.
// Call with { reset : true } to have it restore column order to initial configuration
// Provide array of indexes as first argument to have it reordered by that
_changeColumnOrder(order) {
if (this._colReorder) {
const columnsOrig = _.clone(this.dataTable.fnSettings().aoColumns);
if (_.isArray(order)) {
this._colReorder.fnOrder(order);
} else if (_.has(order, 'reset') && order.reset) {
this._colReorder.fnReset();
} else {
return this._colReorder.fnOrder();
}
// restore columnsConfig order to match the underlying order from dataTable
const columnsConfig = this.columnsConfig();
const columnsConfigOrig = _.clone(columnsConfig);
// reset config
columnsConfig.splice(0, columnsConfig.length);
// fill in config in correct order
_.each(this.dataTable.fnSettings().aoColumns, tableColumn => {
const oldIndex = columnsOrig.indexOf(tableColumn);
if (oldIndex !== -1) {
columnsConfig.push(columnsConfigOrig[oldIndex]);
}
});
this._columnManager.columnsReordered();
}
}
_allMatchingModels() {
// returns all models matching the current filter criteria, regardless of pagination
// since we are using deferred rendering, the dataTable.$ and dataTable._ methods don't return all
// matching data since some of the rows may not have been rendered yet.
// here we use the the aiDisplay property to get indicies of the data matching the current filtering
// and return the associated models
return _.map(this.dataTable.fnSettings().aiDisplay, function(index) {
return this.collection.at(index);
}, this);
}
_applyDefaults() {
_.defaults(this, {
sorting: [[0, this.paginate ? "desc" : "asc"]]
});
if (!this.objectName.plural) {
throw new Error("plural object name must be provided");
} else if (!this.objectName.singular) {
throw new Error("singular object name must be provided");
}
}
// returns row objects that have not been filtered out and are on the current page
_visibleRowsOnCurrentPage() {
// non-paginated tables will return all rows, ignoring the page param
const visibleRowsCurrentPageArgs = { filter: "applied", page: "current" };
return this.dataTable.$("tr", visibleRowsCurrentPageArgs).map((index, node) => $(node).data("row"));
}
_setRowSelectedState(model, row, state) {
this.selectionManager.process(model, state);
// the row may not exist yet as we utilize deferred rendering. we track the model as
// selected and make the ui reflect this when the row is finally created
row && row.bulkState(state);
}
_dataTableCreate() {
this.dataTable = this.$("table").dataTable(this._dataTableConfig());
this._setupSelect2PaginationAttributes();
this._installSortInterceptors();
this.filteringEnabled && this._setupFiltering();
if (this._reorderableColumnsEnabled()) {
this._enableReorderableColumns();
}
this._columnManager.on("change:visibility", this._onColumnVisibilityChange);
this._columnManager.applyVisibilityPreferences();
this._renderHeaderGroup();
if (this.collection.length) this._onReset(this.collection);
// if resizeable, add resizeable class
if (this._colReorder && this._colReorder.s.allowResize) {
this.$("table").addClass("dataTable-resizeableColumns");
}
if (this.striped) {
this.$("table").addClass("table-striped");
}
}
_areAllVisibleRowsSelected() {
let allSelected;
const visibleRows = this._visibleRowsOnCurrentPage();
if (visibleRows.length) {
allSelected = _.all(visibleRows, row => row.bulkState() === true);
} else {
// have no selections does not count as having all selected
allSelected = false;
}
return allSelected;
}
// when changing between pages / filters we set the header bulk checkbox state based on whether all newly visible rows are selected or not
// note: we defer execution as the "page" and "filter" events are called before new rows are swapped in
// this allows our code to run after the all the new rows are inserted
_bulkCheckboxAdjust() {
if (!this.bulkCheckbox) return;
_.defer(() => {
this.bulkCheckbox.prop("checked", this._areAllVisibleRowsSelected());
});
}
// Do not enable when columnGroupDefinitions is defined and not empty.
_reorderableColumnsEnabled() {
return this.reorderableColumns && _.isEmpty(this.rowClass.prototype.columnGroupDefinitions);
}
_initPaginationHandling() {
this.dataTable.on("page", this._bulkCheckboxAdjust);
}
_setQueryStringPageFromDataTable() {
let page = this.dataTable.fnPagingInfo().iPage;
if (page !== this._parsePageNumberFromQueryString() - 1) {
history.pushState({}, "pagination", this._createQueryStringWithPageNumber(page + 1));
}
}
_setupPaginationHistory() {
this.dataTable.on("page", () => {
this._setQueryStringPageFromDataTable();
});
this.dataTable.on("pageLengthChange", () => {
this._setQueryStringPageFromDataTable();
});
window.onpopstate = () => {
this._goToPageFromQueryString();
};
}
_afterRender() {
if (this.urlPagination) {
this._goToPageFromQueryString();
}
}
_goToPageFromQueryString() {
let pageNumber = this._parsePageNumberFromQueryString() - 1;
if (pageNumber >= 0) {
this.page(pageNumber);
}
}
_urlParameters() {
return $.deparam(window.location.href.split("?")[1] || "");
}
_createQueryStringWithPageNumber(pageNumber) {
let urlParameters = this._urlParameters();
urlParameters.page = pageNumber;
return "?" + $.param(urlParameters);
}
_parsePageNumberFromQueryString() {
let parameters = this._urlParameters();
let page = parseInt(parameters.page);
if (isNaN(page)) {
return 1;
} else {
return page;
}
}
_initBulkHandling() {
const bulkCheckbox = this.$el.find(this.BULK_COLUMN_HEADER_CHECKBOX_SELECTOR);
if (!bulkCheckbox.length) return;
this.bulkCheckbox = bulkCheckbox;
this.bulkCheckbox.click(this._onBulkHeaderClick);
this.dataTable.on("click", this.BULK_COLUMN_CHECKBOXES_SELECTOR, this._onBulkRowClick);
this.dataTable.on("filter", this._bulkCheckboxAdjust);
}
_enableRowHighlight() {
this.dataTable.on("click", this.ROWS_SELECTOR, this._onRowHighlightClick);
}
_onRowHighlightClick(event) {
const el = $(event.target).closest("tr");
const currentState = el.hasClass("highlighted");
$(event.target).closest("tbody").find('tr').toggleClass('highlighted', false);
el.toggleClass("highlighted", !currentState);
}
_dataTableConfig() {
let displayStart, recordTotal;
if (this.urlPagination) {
displayStart = this._getSafeDisplayStartFromPageNumber();
recordTotal = displayStart + this.paginateLength;
}
return {
sDom: this.layout,
bDeferRender: true,
bPaginate: this.paginate,
aLengthMenu: this.paginateLengthMenu,
iDisplayLength: this.paginateLength,
iDisplayStart: displayStart,
iRecordsTotal: recordTotal,
iRecordsDisplay: recordTotal,
bInfo: true,
fnCreatedRow: this._onRowCreated,
aoColumns: this._columnManager.dataTableColumnsConfig(),
aaSorting: this._columnManager.dataTableSortingConfig(),
fnDrawCallback: this._onDraw,
oLanguage: {
sEmptyTable: this.emptyText
}
};
}
_getSafeDisplayStartFromPageNumber() {
let pageIndex = this._parsePageNumberFromQueryString() - 1;
if (pageIndex < 0) {
return 0;
} else {
return pageIndex * this.paginateLength;
}
}
_triggerChangeSelection(extraData) {
const data = _.extend(extraData || {}, { count: this.selectionManager.count() });
this.trigger("change:selected", data);
}
_setupSelect2PaginationAttributes() {
this.$('select')
.attr('data-plugin', 'select2')
.css('width', '5em');
}
// DataTables does not provide a good way to programmatically disable sorting, so we:
// 1) remove the default sorting event handler that dataTables adds
// 2) Create a div and put the header in it. We need to do this so sorting doesn't conflict with filtering
// on the click events.
// 3) insert our own event handler on the div that stops the event if we are locked
// 4) re-insert the dataTables sort event handler
_installSortInterceptors() {
const self = this;
this.dataTable.find("thead th").each(function(index) {
$(this).off("click.DT");
$(this).off("keypress.DT");
const wrapper = $("<div class='DataTables_sort_wrapper'><div class='DataTables_sort_interceptor'></div></div>");
$(this).contents().appendTo(wrapper.find(".DataTables_sort_interceptor"));
$(this).append(wrapper);
// handle clicking on div as sorting
$('.DataTables_sort_interceptor', this).on("click", event => {
if (self.lock("sort")) {
event.stopImmediatePropagation();
} else {
history.pushState({}, "pagination", self._createQueryStringWithPageNumber(1));
}
});
// default sort handler for column with index
self.dataTable.fnSortListener($('.DataTables_sort_wrapper', this), index);
});
}
// Sets up filtering for the dataTable
_setupFiltering() {
const table = this;
const cg = table.configGenerator();
// Close active filter menu if user clicks on document
$("body").on("click", e => {
$("[data-toggle='filter-popover']").each(function() {
if (!$(this).is(e.target) && $(this).has(e.target).length === 0 && $(".popover").has(e.target).length === 0) {
$(this).popover("hide");
}
});<|fim▁hole|>
// We make a filter for each column header
table.dataTable.find("thead th").each(function(index) {
// here we use the CSS in the header to get the column config by attr
// there isn't a better way to do this currently
let col;
const columnClassName = extractColumnCSSClass(this.className);
if (columnClassName) {
cg.columnsConfig.forEach(currentColConfig => {
if (currentColConfig.id && toColumnCSSClass(currentColConfig.id) === columnClassName) {
col = currentColConfig;
}
});
} else {
// TODO: FAIL!!!
}
if (col) {
// We only make the filter controls if there's a filter element in the column manager
if (col.filter) {
table.child(`filter-${col.id}`, new FilterView({
column: col,
table,
head: this,
className: "dropdown DataTables_filter_wrapper"
}));
$(this).append(table.child(`filter-${col.id}`).render().$el);
}
}
});
}
// events
_onColumnReorder() {
this.trigger("reorder");
this._renderGrandTotalsRow();
}
_onDraw() {
this.trigger("draw", arguments);
this._renderGrandTotalsRow();
this._renderHeaderGroup();
}
_onColumnVisibilityChange(summary) {
this.dataTable.find(".dataTables_empty").attr("colspan", summary.visible.length);
this._renderGrandTotalsRow();
this._renderHeaderGroup();
}
_onBulkHeaderClick(event) {
const state = this.bulkCheckbox.prop("checked");
this.selectAllVisible(state);
// don't let dataTables sort this column on the click of checkbox
event.stopPropagation();
}
_onBulkRowClick(event) {
const checkbox = $(event.target);
const row = checkbox.closest("tr").data("row");
const checked = checkbox.prop("checked");
// ensure that when a single row checkbox is unchecked, we uncheck the header bulk checkbox
if (!checked) this.bulkCheckbox.prop("checked", false);
this._setRowSelectedState(row.model, row, checked);
this._triggerChangeSelection();
event.stopPropagation();
}
_onRowCreated(node, data) {
const model = this.collection.get(data);
// eslint-disable-next-line new-cap
const row = new this.rowClass({
el: node,
model,
columnsConfig: this.columnsConfig()
});
this.cache.set(model, row);
this.child(`child${row.cid}`, row).render();
// due to deferred rendering, the model associated with the row may have already been selected, but not rendered yet.
this.selectionManager.has(model) && row.bulkState(true);
}
_onAdd(model) {
if (!this.dataTable) return;
this.dataTable.fnAddData({ cid: model.cid });
this._triggerChangeSelection();
}
_onRemove(model) {
if (!this.dataTable) return;
const cache = this.cache;
const row = cache.get(model);
this.dataTable.fnDeleteRow(row.el, () => {
cache.unset(model);
row.close();
});
this.selectionManager.process(model, false);
this._triggerChangeSelection();
}
_onReset(collection) {
if (!this.dataTable) return;
this.dataTable.fnClearTable();
this.cache.each(row => {
row.close();
});
this.cache.reset();
// populate with preselected items
this.selectionManager = new SelectionManager();
_.each(this.selectedIds, function(id) {
// its possible that a selected id is provided for a model that doesn't actually exist in the table, ignore it
const selectedModel = this.collection.get(id);
selectedModel && this._setRowSelectedState(selectedModel, null, true);
}, this);
// add new data
this.dataTable.fnAddData(cidMap(collection));
this._triggerChangeSelection();
}
_onColumnFilter() {
this.trigger("filter:column");
}
}
_.extend(LocalDataTable.prototype, {
BULK_COLUMN_HEADER_CHECKBOX_SELECTOR: "th:first.bulk :checkbox",
BULK_COLUMN_CHECKBOXES_SELECTOR: "td:first-child.bulk :checkbox",
ROWS_SELECTOR: "tbody tr",
template: '<table cellpadding="0" class="table"></table>',
paginate: true,
paginateLengthMenu: [10, 25, 50, 100],
paginateLength: 10,
selectedIds: [],
filteringEnabled: false,
layout: "<'row'<'col-xs-6'l><'col-xs-6'f>r>t<'row'<'col-xs-6'i><'col-xs-6'p>>",
striped: true,
reorderableColumns: true,
resizableColumns: false,
objectName: {
singular: "row",
plural: "rows"
}
});
export default LocalDataTable;<|fim▁end|> | }); |
<|file_name|>filesystem.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def readFile(self, rFile):
errMsg = "File system read access not yet implemented for "
errMsg += "Oracle"
raise SqlmapUnsupportedFeatureException(errMsg)
def writeFile(self, wFile, dFile, fileType=None, forceCheck=False):
errMsg = "File system write access not yet implemented for "
errMsg += "Oracle"
raise SqlmapUnsupportedFeatureException(errMsg)<|fim▁end|> |
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission |
<|file_name|>0012_auto_20170620_1435.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-20 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hamask', '0011_auto_20170615_1520'),
]<|fim▁hole|> model_name='workout',
name='day_of_week',
field=models.CharField(blank=True, choices=[('1', 'Sunday'), ('2', 'Monday'), ('3', 'Tuesday'), ('4', 'Wednesday'), ('5', 'Thursday'), ('6', 'Friday'), ('7', 'Saturday')], max_length=30, null=True),
),
migrations.AlterField(
model_name='program',
name='start_date',
field=models.DateField(blank=True, null=True),
),
]<|fim▁end|> |
operations = [
migrations.AddField( |
<|file_name|>ssml-type.ts<|end_file_name|><|fim▁begin|>/**
Copyright 2020 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* @fileoverview Common interface for SSML type objects
*/
export type SoundLibrary = {sound: string, url: string}[]
/** Raw interface that will be extended by each type */
export interface Data {}
export interface SsmlType<T extends Data> {
getTimelineHtml: (data: T) => string<|fim▁hole|> html: string,
onOpen: (block: any, timeline: any, index: number,
soundLibrary: SoundLibrary) => void
}
}<|fim▁end|> | getSsml: (data: T) => string
getOuterSsml: (data: T) => string
getEditor: (data: T, soundLibrary: SoundLibrary) => { |
<|file_name|>filters.js<|end_file_name|><|fim▁begin|>/* global createNS */
/* exported filtersFactory */
var filtersFactory = (function () {
var ob = {};
ob.createFilter = createFilter;
ob.createAlphaToLuminanceFilter = createAlphaToLuminanceFilter;
function createFilter(filId, skipCoordinates) {
var fil = createNS('filter');
fil.setAttribute('id', filId);
if (skipCoordinates !== true) {
fil.setAttribute('filterUnits', 'objectBoundingBox');<|fim▁hole|> fil.setAttribute('y', '0%');
fil.setAttribute('width', '100%');
fil.setAttribute('height', '100%');
}
return fil;
}
function createAlphaToLuminanceFilter() {
var feColorMatrix = createNS('feColorMatrix');
feColorMatrix.setAttribute('type', 'matrix');
feColorMatrix.setAttribute('color-interpolation-filters', 'sRGB');
feColorMatrix.setAttribute('values', '0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1');
return feColorMatrix;
}
return ob;
}());<|fim▁end|> | fil.setAttribute('x', '0%'); |
<|file_name|>do_with.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
@contextmanager
def log(name):
print('[%s] start...' % name)
yield
print('[%s] end.' % name)
with log('DEBUG'):
print('Hello, world!')
print('Hello, Python!')<|fim▁end|> | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import contextmanager |
<|file_name|>oxygen.py<|end_file_name|><|fim▁begin|><|fim▁hole|> extra_keys = ['capacity', 'pipe_length', 'is_initial']
def __init__(self, *args, **kwargs):
super(Oxygen, self).__init__(*args, **kwargs)
self.capacity = kwargs.get('capacity', 3000)
self.contained = self.capacity
self.pipe_length = kwargs.get('pipe_length', 5000)
self.is_initial = kwargs.get('is_initial', False)
def tick(self):
if self.contained == 0:
self.world.dispatch_event("on_suffocate")<|fim▁end|> | import actor
class Oxygen(actor.Actor): |
<|file_name|>observable.rs<|end_file_name|><|fim▁begin|>// Rx -- Reactive programming for Rust
// Copyright 2016 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
use observer::Observer;
use observer::{NextObserver, CompletedObserver, ErrorObserver, OptionObserver, ResultObserver};
use std::fmt::Debug;
use transform::{ContinueWithObservable, MapErrorObservable, MapObservable};
/// A stream of values.
///
/// An observable represents a stream of values, much like an iterator,
/// but instead of being “pull-based” like an iterator, it is “push-based”.
/// Multiple observers can subscribe to an observable and when the observable
/// produces a value, all observers get called with this value.
///
/// An observable can be _finite_ or _infinite_. An example of an infinite
/// observable are mouse clicks: you never know if the user is going to click
/// once more. An example of a finite observable are the results of a database
/// query: a database is can hold only finitely many records, so one result is
/// the last one.
///
/// A finite observable can end in two ways:
///
/// * **Completed**: when the observable ends normally.
/// For instance, an observable of database query results
/// will complete after the last result has been produced.
/// * **Failed**: when an error occurred.
/// For instance, an observable of database query results
/// may fail if the connection is lost.
///
/// Failures are fatal: after an observable produces an error, it will not
/// produce any new values. If this is not the desired behavior, you can
/// use an observable of `Result`.
pub trait Observable {
/// The value produced by the observable.
type Item: Clone;
/// The error produced if the observable fails.
type Error: Clone;
/// The result of subscribing an observer.
// TODO: This drop bound is not required and it only complicates stuff, remove it.
type Subscription: Drop;
/// Subscribes an observer and returns the subscription.
///
/// After subscription, `on_next` will be called on the observer for every
/// value produced. If the observable completes, `on_completed` is called.
/// If the observable fails with an error, `on_error` is called. It is
/// guaranteed that no methods will be called on the observer after
/// `on_completed` or `on_error` have been called.
///
/// _When_ the observer is called is not part of the observable contract,
/// it depends on the kind of observable. The observer may be called before
/// `subscribe` returns, or it may be called in the future.
///<|fim▁hole|> fn subscribe<O>(&mut self, observer: O) -> Self::Subscription
where O: Observer<Self::Item, Self::Error>;
/// Subscribes a function to handle values produced by the observable.
///
/// For every value produced by the observable, `on_next` is called.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_next<FnNext>(&mut self,
on_next: FnNext)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item) {
let observer = NextObserver {
fn_next: on_next,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next and completion.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. A failure will cause a
/// panic. After `on_completed` has been called, it is guaranteed that neither
/// `on_next` nor `on_completed` is called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_completed<FnNext, FnCompleted>(&mut self,
on_next: FnNext,
on_completed: FnCompleted)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item), FnCompleted: FnOnce() {
let observer = CompletedObserver {
fn_next: on_next,
fn_completed: on_completed,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next, completion, and error.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. If it fails, `on_error`
/// is called. After `on_completed` or `on_error` have been called, it is
/// guaranteed that none of the three functions are called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_error<FnNext, FnCompleted, FnError>(&mut self,
on_next: FnNext,
on_completed: FnCompleted,
on_error: FnError)
-> Self::Subscription
where FnNext: FnMut(Self::Item), FnCompleted: FnOnce(), FnError: FnOnce(Self::Error) {
let observer = ErrorObserver {
fn_next: on_next,
fn_completed: on_completed,
fn_error: on_error,
};
self.subscribe(observer)
}
/// Subscribes a function that takes an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the functions with `Some(x)`.
/// * `on_completed()`: calls the function with `None`.
/// * `on_error(error)`: panics.
///
/// After the function has been called with `None`,
/// it is guaranteed never to be called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_option<FnOption>(&mut self,
on_next_or_completed: FnOption)
-> Self::Subscription
where Self::Error: Debug, FnOption: FnMut(Option<Self::Item>) {
let observer = OptionObserver {
fn_option: on_next_or_completed
};
self.subscribe(observer)
}
/// Subscribes a function that takes a result of an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the function with `Ok(Some(x))`.
/// * `on_completed()`: calls the function with `Ok(None)`.
/// * `on_error(error)`: calls the function with `Err(error)`.
///
/// After the function has been called with `Ok(None)` or `Err(error)`,
/// it is guaranteed never to be called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_result<FnResult>(&mut self,
on_next_or_completed_or_error: FnResult)
-> Self::Subscription
where FnResult: FnMut(Result<Option<Self::Item>, Self::Error>) {
let observer = ResultObserver {
fn_result: on_next_or_completed_or_error
};
self.subscribe(observer)
}
/// Transforms an observable by applying f to every value produced.
fn map<'s, U, F>(&'s mut self, f: F) -> MapObservable<'s, Self, F>
where F: Fn(Self::Item) -> U {
MapObservable::new(self, f)
}
/// Transforms an observable by applying f the error in case of failure.
fn map_error<'s, F, G>(&'s mut self, f: G) -> MapErrorObservable<'s, Self, G>
where G: Fn(Self::Error) -> F {
MapErrorObservable::new(self, f)
}
/// Joins two observables sequentially.
///
/// After the current observable completes, an observer will start to
/// receive values from `next` until that observable completes or fails.
/// The `next` observable is only subscribed to after the current observable
/// completes.
fn continue_with<'s, ObNext>(&'s mut self, next: &'s mut ObNext) -> ContinueWithObservable<'s, Self, ObNext>
where ObNext: Observable<Item = Self::Item, Error = Self::Error> {
ContinueWithObservable::new(self, next)
}
}<|fim▁end|> | /// The returned value represents the subscription. Dropping the subscription
/// will prevent further calls on the observer. |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var crypto = require('crypto')
var multimatch = require('multimatch')
var path = require('path')
var KEY = 'metalsmith'
module.exports = plugin
function plugin(options) {
return function (files, metalsmith, done) {<|fim▁hole|> Object.keys(files)
.filter(function (p) {
return multimatch(p, options.pattern).length > 0
})
.forEach(function (p) {
var hash = crypto.createHmac('md5', KEY).update(files[p].contents).digest('hex')
var ext = path.extname(p)
var fingerprint = [p.substring(0, p.lastIndexOf(ext)), '-', hash, ext].join('').replace(/\\/g, '/')
files[fingerprint] = files[p]
metadata.fingerprint[p] = fingerprint
})
return process.nextTick(done)
}
}<|fim▁end|> | var metadata = metalsmith.metadata()
metadata.fingerprint = (metadata.fingerprint || {}) |
<|file_name|>edu_dp_o.py<|end_file_name|><|fim▁begin|>import sys
import ctypes
<|fim▁hole|> if sys.platform.startswith('linux'):
libc = ctypes.cdll.LoadLibrary('libc.so.6')
return libc.__sched_cpucount(ctypes.sizeof(ctypes.c_long), (ctypes.c_long * 1)(N))
elif sys.platform == 'darwin':
libc = ctypes.cdll.LoadLibrary('libSystem.dylib')
return libc.__popcountdi2(N)
else:
assert(False)
def main():
N = int(input())
mod = 10 ** 9 + 7
A = [[int(x) for x in input().split()] for _ in range(N)]
dp = [0] * (1 << N)
dp[0] = 1
for state in range(1 << N):
dp[state] %= mod
i = popcount(state)
for j in range(N):
if (state >> j & 1) == 0 and A[i][j]:
dp[state | (1 << j)] += dp[state]
print(dp[-1])
if __name__ == '__main__':
main()<|fim▁end|> | def popcount(N): |
<|file_name|>grpc_vtgate_client.py<|end_file_name|><|fim▁begin|># Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""A simple, direct connection to the vtgate proxy server, using gRPC.
"""
import logging
import re
from urlparse import urlparse
# Import main protobuf library first
# to work around import order issues.
import google.protobuf # pylint: disable=unused-import
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
from vtproto import vtgate_pb2
from vtproto import vtgateservice_pb2
from vtdb import dbexceptions
from vtdb import proto3_encoding
from vtdb import vtdb_logger
from vtdb import vtgate_client
from vtdb import vtgate_cursor
from vtdb import vtgate_utils
_errno_pattern = re.compile(r'\(errno (\d+)\)', re.IGNORECASE)
_throttler_err_pattern = re.compile(
r'exceeded (.*) quota, rate limiting', re.IGNORECASE)
class GRPCVTGateConnection(vtgate_client.VTGateClient,
proto3_encoding.Proto3Connection):
"""A direct gRPC connection to the vtgate query service, using proto3.
"""
def __init__(self, addr, timeout,
root_certificates=None, private_key=None, certificate_chain=None,
**kwargs):
"""Creates a new GRPCVTGateConnection.
Args:
addr: address to connect to.
timeout: connection time out.
root_certificates: PEM_encoded root certificates.
private_key: PEM-encoded private key.
certificate_chain: PEM-encoded certificate chain.
**kwargs: passed up.
"""
super(GRPCVTGateConnection, self).__init__(addr, timeout, **kwargs)
self.stub = None
self.root_certificates = root_certificates
self.private_key = private_key
self.certificate_chain = certificate_chain
self.logger_object = vtdb_logger.get_logger()
def dial(self):
if self.stub:
self.stub.close()
p = urlparse('http://' + self.addr)
if self.root_certificates or self.private_key or self.certificate_chain:
creds = implementations.ssl_channel_credentials(
self.root_certificates, self.private_key, self.certificate_chain)
channel = implementations.secure_channel(p.hostname, p.port, creds)
else:
channel = implementations.insecure_channel(p.hostname, p.port)
self.stub = vtgateservice_pb2.beta_create_Vitess_stub(channel)
def close(self):
"""close closes the server connection and frees up associated resources.
The stub object is managed by the gRPC library, removing references
to it will just close the channel.
"""
if self.session and self.session.in_transaction:
self.rollback()
self.stub = None
def is_closed(self):
return self.stub is None
def cursor(self, *pargs, **kwargs):
cursorclass = kwargs.pop('cursorclass', None) or vtgate_cursor.VTGateCursor
return cursorclass(self, *pargs, **kwargs)
def begin(self, effective_caller_id=None):
try:
request = self.begin_request(effective_caller_id)
response = self.stub.Begin(request, self.timeout)
self.update_session(response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Begin')
def commit(self):
try:
request = self.commit_request()
self.stub.Commit(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Commit')
finally:
self.session = None
def rollback(self):
try:
request = self.rollback_request()
self.stub.Rollback(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Rollback')
finally:
self.session = None
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
entity_keyspace_id_map=None, entity_column_name=None,
not_in_transaction=False, effective_caller_id=None, **kwargs):
# FIXME(alainjobart): keyspace should be in routing_kwargs,
# as it's not used for v3.
try:
request, routing_kwargs, method_name = self.execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name, shards, keyspace_ids, keyranges,
entity_column_name, entity_keyspace_id_map,
not_in_transaction, effective_caller_id)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_response(method_name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
not_in_transaction=not_in_transaction,
**routing_kwargs)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute_batch(
self, sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list, tablet_type, as_transaction, effective_caller_id=None,
**kwargs):
try:
request, method_name = self.execute_batch_request_and_name(
sql_list, bind_variables_list, keyspace_list,
keyspace_ids_list, shards_list,
tablet_type, as_transaction, effective_caller_id)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_batch_response(method_name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables_list)
raise _convert_exception(
e, method_name,
sqls=sql_list, tablet_type=tablet_type,
as_transaction=as_transaction)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _stream_execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
effective_caller_id=None,
**kwargs):
try:
request, routing_kwargs, method_name = self.stream_execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
keyranges,
effective_caller_id)
method = getattr(self.stub, method_name)
it = method(request, self.timeout)
first_response = it.next()
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
**routing_kwargs)
fields, convs = self.build_conversions(first_response.result.fields)
def row_generator():
try:
for response in it:
for row in response.result.rows:
yield tuple(proto3_encoding.make_row(row, convs))
except Exception:
logging.exception('gRPC low-level error')
raise
return row_generator(), fields
def get_srv_keyspace(self, name):
try:
request = vtgate_pb2.GetSrvKeyspaceRequest(
keyspace=name,
)
response = self.stub.GetSrvKeyspace(request, self.timeout)
return self.keyspace_from_response(name, response)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, keyspace=name)
def _convert_exception(exc, *args, **kwargs):
"""This parses the protocol exceptions to the api interface exceptions.
This also logs the exception and increments the appropriate error counters.<|fim▁hole|>
Args:
exc: raw protocol exception.
*args: additional args from the raising site.
**kwargs: additional keyword args from the raising site.
They will be converted into a single string, and added as an extra
arg to the exception.
Returns:
Api interface exceptions - dbexceptions with new args.
"""
kwargs_as_str = vtgate_utils.convert_exception_kwargs(kwargs)
exc.args += args
if kwargs_as_str:
exc.args += kwargs_as_str,
new_args = (type(exc).__name__,) + exc.args
if isinstance(exc, vtgate_utils.VitessError):
new_exc = exc.convert_to_dbexception(new_args)
elif isinstance(exc, face.ExpirationError):
# face.ExpirationError is returned by the gRPC library when
# a request times out. Note it is a subclass of face.AbortionError
# so we have to test for it before.
new_exc = dbexceptions.TimeoutError(new_args)
elif isinstance(exc, face.AbortionError):
# face.AbortionError is the toplevel error returned by gRPC for any
# RPC that finishes earlier than expected.
msg = exc.details
if exc.code == interfaces.StatusCode.UNAVAILABLE:
if _throttler_err_pattern.search(msg):
return dbexceptions.ThrottledError(new_args)
else:
return dbexceptions.TransientError(new_args)
elif exc.code == interfaces.StatusCode.ALREADY_EXISTS:
new_exc = _prune_integrity_error(msg, new_args)
else:
# Unhandled RPC application error
new_exc = dbexceptions.DatabaseError(new_args + (msg,))
else:
new_exc = exc
vtgate_utils.log_exception(
new_exc,
keyspace=kwargs.get('keyspace'), tablet_type=kwargs.get('tablet_type'))
return new_exc
def _prune_integrity_error(msg, exc_args):
"""Prunes an integrity error message and returns an IntegrityError."""
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
exc_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(exc_args)
vtgate_client.register_conn_class('grpc', GRPCVTGateConnection)<|fim▁end|> | |
<|file_name|>analysis.rs<|end_file_name|><|fim▁begin|>/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* Copyright (C) 2015-2022 Christian Krause *
* *
* Christian Krause <[email protected]> *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* This file is part of strace-analyzer. *
* *
* strace-analyzer is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the license, or any *
* later version. *
* *
* strace-analyzer is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with strace-analyzer. If not, see <http://www.gnu.org/licenses/>. *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use anyhow::Result;
use lazy_static::lazy_static;
use regex::{Captures, Regex};
use crate::config::Config;
use crate::log;
use crate::output::Output;
use crate::summary::{show_table, Summary};
pub fn run<P>(input: P, config: Config) -> Result<()>
where
P: AsRef<Path> + Copy,
{
let mut analysis = Analysis::new(config);
match config.output {
Output::Continuous => {
analysis.analyze(input, |summary| summary.show(config))
}
Output::Table => {
let summaries = RefCell::new(vec![]);
analysis.analyze(input, |summary| {
summaries.borrow_mut().push(summary);
})?;
show_table(&summaries.into_inner(), config);
Ok(())
}
}
}
#[derive(Clone)]
struct Analysis {
fds: HashMap<u32, Summary>,
config: Config,
}
impl Analysis {
fn new(config: Config) -> Self {
let stdin = Summary::new("STDIN");
let stdout = Summary::new("STDOUT");
let stderr = Summary::new("STDERR");
let mut fds: HashMap<u32, Summary> = HashMap::new();
fds.insert(0, stdin);
fds.insert(1, stdout);
fds.insert(2, stderr);
Self { fds, config }
}
fn analyze<F, P>(&mut self, input: P, f: F) -> Result<()>
where
F: Fn(Summary) + Copy,
P: AsRef<Path> + Copy,
{
let file = File::open(input)?;
for line in BufReader::new(file).lines() {
let line = line?;
for cap in RE_CREAT.captures_iter(&line) {
self.syscall_creat(&cap, f);
}
for cap in RE_CLOSE.captures_iter(&line) {
self.syscall_close(&cap, f);
}
for cap in RE_CLONE.captures_iter(&line) {
self.syscall_clone(&cap, input, f)?;
}
for cap in RE_DUP.captures_iter(&line) {
self.syscall_dup(&cap, f);
}
for cap in RE_DUP2.captures_iter(&line) {
self.syscall_dup2(&cap, f);
}
for cap in RE_FCNTL_DUP.captures_iter(&line) {
self.syscall_fcntl_dup(&cap, f);
}
for cap in RE_OPEN.captures_iter(&line) {
self.syscall_open(&cap, f);
}
for cap in RE_OPENAT.captures_iter(&line) {
self.syscall_openat(&cap, f);
}
for cap in RE_PIPE.captures_iter(&line) {
self.syscall_pipe(&cap, f);
}
for cap in RE_PREAD.captures_iter(&line) {
self.syscall_pread(&cap);
}
for cap in RE_PWRITE.captures_iter(&line) {
self.syscall_pwrite(&cap);
}
for cap in RE_READ.captures_iter(&line) {
self.syscall_read(&cap);
}
for cap in RE_SOCKET.captures_iter(&line) {
self.syscall_socket(&cap, f);
}
for cap in RE_WRITE.captures_iter(&line) {
self.syscall_write(&cap);
}
}
for summary in self.fds.values() {
f(summary.clone());
}
Ok(())
}
fn insert<F>(&mut self, fd: u32, summary: Summary, syscall: &str, f: F)
where
F: Fn(Summary) + Copy,
{
if let Some(summary) = self.fds.insert(fd, summary) {
self.debug(format!(
"[{}] dropping {} without explicit close",
syscall, summary.file
));
f(summary);
};
}
fn dup<F>(&mut self, syscall: &str, oldfd: u32, newfd: u32, f: F)
where
F: Fn(Summary) + Copy,
{
let summary = self.fds.get(&oldfd).map_or_else(
|| {
self.debug(format!(
"[{}] couldn't find oldfd {}",
syscall, oldfd
));
Summary::new("DUP")
},
|summary_old| {
let old_file = &summary_old.file;
self.debug(format!(
"[{}] {} -> {} => {}",
syscall, oldfd, &newfd, old_file
));
Summary::new(old_file)
},
);
self.insert(newfd, summary, syscall, f);
}
fn syscall_clone<F, P>(
&mut self,
cap: &Captures,
input: P,
f: F,
) -> Result<()>
where
F: Fn(Summary) + Copy,
P: AsRef<Path>,
{
let pid = &cap[1];
let trace = input.as_ref().with_extension(pid);
self.verbose(format!(
"[clone] tracing pid {} in {:?} ...",
pid, trace
));
let mut cloned_fds = self.fds.clone();
for summary in cloned_fds.values_mut() {
summary.reset();
}
self.clone().analyze(&trace, f)?;
self.verbose(format!("[clone] tracing pid {} finished", pid));
Ok(())
}
fn syscall_close<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let fd: u32 = cap[1].parse().unwrap();
let status: i32 = cap[2].parse().unwrap();
let error = &cap[3];
let syscall = "close";
match (status, error) {
(0, _) => {
self.finish(fd, syscall, f);
}
(_, "EBADF") => {
self.debug(format!("[close] {} => bad fd", fd));
}
(_, error) => {
self.verbose(format!("[close] {} => {}", fd, error));
self.finish(fd, syscall, f);
}
}
}
fn syscall_creat<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let file = &cap[1];
let fd: u32 = cap[2].parse().unwrap();
self.debug(format!("[creat] {} => {}", fd, file));
let syscall = "creat";
self.insert(fd, Summary::new(file), syscall, f);
}
fn syscall_dup<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let oldfd: u32 = cap[1].parse().unwrap();
let newfd: u32 = cap[2].parse().unwrap();
self.dup("dup", oldfd, newfd, f);
}
fn syscall_dup2<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let oldfd: u32 = cap[1].parse().unwrap();
let newfd: u32 = cap[2].parse().unwrap();
self.dup("dup2", oldfd, newfd, f);
}
fn syscall_fcntl_dup<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let oldfd: u32 = cap[1].parse().unwrap();
let newfd: u32 = cap[2].parse().unwrap();
self.dup("fcntl-dup", oldfd, newfd, f);
}
fn syscall_open<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let file = &cap[1];
let fd: u32 = cap[2].parse().unwrap();
self.debug(format!("[open] {} => {}", fd, file));
let syscall = "open";
self.insert(fd, Summary::new(file), syscall, f);
}
fn syscall_openat<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let dirfd = &cap[1];
let pathname = &cap[2];
let fd: u32 = cap[3].parse().unwrap();
let file = self.join_paths(dirfd, pathname);
self.debug(format!("[openat] {} => {}", fd, file));
let syscall = "openat";
self.insert(fd, Summary::new(&file), syscall, f);
}
fn syscall_pipe<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let readend = cap[1].parse().unwrap();
let writeend = cap[2].parse().unwrap();
self.debug(format!("[pipe] {} => {}", readend, writeend));<|fim▁hole|> let syscall = "pipe";
self.insert(readend, Summary::pipe(), syscall, f);
self.insert(writeend, Summary::pipe(), syscall, f);
}
fn syscall_pread(&mut self, cap: &Captures) {
let fd: u32 = cap[1].parse().unwrap();
if let Some(summary) = self.fds.get_mut(&fd) {
let opsize: u64 = cap[2].parse().unwrap();
let bytes: u64 = cap[3].parse().unwrap();
summary.update_read(opsize, bytes);
} else {
self.verbose(format!("[pread] unknown fd {}", fd));
}
}
fn syscall_pwrite(&mut self, cap: &Captures) {
let fd: u32 = cap[1].parse().unwrap();
if let Some(summary) = self.fds.get_mut(&fd) {
let opsize: u64 = cap[2].parse().unwrap();
let bytes: u64 = cap[3].parse().unwrap();
summary.update_write(opsize, bytes);
} else {
self.verbose(format!("[pwrite] unknown fd {}", fd));
}
}
fn syscall_read(&mut self, cap: &Captures) {
let fd: u32 = cap[1].parse().unwrap();
if let Some(summary) = self.fds.get_mut(&fd) {
let opsize: u64 = cap[2].parse().unwrap();
let bytes: u64 = cap[3].parse().unwrap();
summary.update_read(opsize, bytes);
} else {
self.verbose(format!("[read] unknown fd {}", fd));
}
}
fn syscall_socket<F>(&mut self, cap: &Captures, f: F)
where
F: Fn(Summary) + Copy,
{
let fd: u32 = cap[1].parse().unwrap();
self.debug(format!("[socket] {}", fd));
let syscall = "socket";
self.insert(fd, Summary::socket(), syscall, f);
}
fn syscall_write(&mut self, cap: &Captures) {
let fd: u32 = cap[1].parse().unwrap();
if let Some(summary) = self.fds.get_mut(&fd) {
let opsize: u64 = cap[2].parse().unwrap();
let bytes: u64 = cap[3].parse().unwrap();
summary.update_write(opsize, bytes);
} else {
self.verbose(format!("[write] unknown fd {}", fd));
}
}
fn finish<F>(&mut self, fd: u32, syscall: &str, f: F)
where
F: Fn(Summary) + Copy,
{
if let Some(summary) = self.fds.remove(&fd) {
self.debug(format!("[{}] {} => {}", syscall, fd, summary.file));
f(summary);
} else {
self.verbose(format!("[{}] unknown fd {}", syscall, fd));
}
}
fn join_paths(&self, dirfd: &str, pathname: &str) -> String {
if dirfd == "AT_FDCWD" {
String::from(pathname)
} else {
let dirfd: u32 = dirfd.parse().unwrap();
self.fds.get(&dirfd).map_or_else(
|| String::from(pathname),
|dir_summary| {
let mut path = PathBuf::new();
path.push(dir_summary.file.clone());
path.push(pathname);
path.to_str()
.map_or_else(|| String::from(pathname), String::from)
},
)
}
}
fn debug<S: AsRef<str>>(&self, message: S) {
log::debug(message, self.config);
}
fn verbose<S: AsRef<str>>(&self, message: S) {
log::verbose(message, self.config);
}
}
// ----------------------------------------------------------------------------
// regexes
// ----------------------------------------------------------------------------
lazy_static! {
static ref RE_CLONE: Regex =
Regex::new(r#"^clone\(.*\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_CLOSE: Regex =
Regex::new(r#"^close\((\d+)\)\s+= (-?\d+)\s*([A-Z]*).*$"#).unwrap();
}
lazy_static! {
static ref RE_CREAT: Regex =
Regex::new(r#"^creat\("([^"]+)", .+\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_DUP: Regex =
Regex::new(r#"^dup\((\d+)\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_DUP2: Regex =
Regex::new(r#"^dup2\((\d+), \d+\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_FCNTL_DUP: Regex =
Regex::new(r#"^fcntl\((\d+), F_DUPFD, \d+\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_OPEN: Regex = Regex::new(
// we're ignoring failures on purpose because they don't open fd
r#"^open\("([^"]+)", .+\)\s+= (\d+)$"#
).unwrap();
}
lazy_static! {
static ref RE_OPENAT: Regex =
Regex::new(r#"^openat\((\d+|AT_FDCWD), "([^"]+)", .+\)\s+= (\d+)$"#)
.unwrap();
}
lazy_static! {
static ref RE_PIPE: Regex =
Regex::new(r#"^pipe\(\[(\d+), (\d+)\]\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_PREAD: Regex =
Regex::new(r#"^pread\((\d+),.*, (\d+), \d+\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_PWRITE: Regex =
Regex::new(r#"^pwrite\((\d+),.*, (\d+), \d+\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_READ: Regex =
Regex::new(r#"^read\((\d+),.*, (\d+)\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_SOCKET: Regex =
Regex::new(r#"^socket\(.*\)\s+= (\d+)$"#).unwrap();
}
lazy_static! {
static ref RE_WRITE: Regex =
Regex::new(r#"^write\((\d+),.*, (\d+)\)\s+= (\d+)$"#).unwrap();
}
// ----------------------------------------------------------------------------
// tests
// ----------------------------------------------------------------------------
#[cfg(test)]
pub mod tests {
use std::process::Command;
use assert_cmd::prelude::*;
use assert_fs::prelude::*;
use predicates::prelude::*;
use super::*;
#[test]
fn analyze_dd() {
let temp = assert_fs::TempDir::new().unwrap();
let trace = temp.child("dd.strace");
trace.assert(predicate::path::missing());
let mut strace_dd = Command::new("strace");
strace_dd.current_dir(&temp);
strace_dd.args(["-s", "0"]);
strace_dd.args(["-o", trace.path().to_string_lossy().as_ref()]);
strace_dd.args([
"dd",
"if=/dev/zero",
"of=/dev/null",
"bs=1M",
"count=1024",
"status=none",
]);
strace_dd.assert().success();
trace.assert(predicate::path::exists());
let config = Config::default();
let summaries = RefCell::new(vec![]);
let mut analysis = Analysis::new(config);
analysis
.analyze(trace.path(), |summary| {
summaries.borrow_mut().push(summary);
})
.unwrap();
let summaries = summaries.into_inner();
assert!(summaries.contains(&Summary {
file: "/dev/zero".into(),
read_freq: HashMap::from([(1_048_576, 1024)]),
write_freq: HashMap::default(),
read_bytes: 1_073_741_824,
write_bytes: 0,
}));
assert!(summaries.contains(&Summary {
file: "/dev/null".into(),
read_freq: HashMap::default(),
write_freq: HashMap::from([(1_048_576, 1024)]),
read_bytes: 0,
write_bytes: 1_073_741_824,
}));
temp.close().unwrap();
}
}<|fim▁end|> | |
<|file_name|>statics-and-consts.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// compile-flags:-Zprint-mono-items=eager
#![deny(dead_code)]
#![feature(start)]
static STATIC1: i64 = {
const STATIC1_CONST1: i64 = 2;
1 + CONST1 as i64 + STATIC1_CONST1
};
const CONST1: i64 = {
const CONST1_1: i64 = {
const CONST1_1_1: i64 = 2;
CONST1_1_1 + 1
};
1 + CONST1_1 as i64
};
fn foo() {
let _ = {
const CONST2: i64 = 0;
static STATIC2: i64 = CONST2;
let x = {
const CONST2: i64 = 1;
static STATIC2: i64 = CONST2;
STATIC2
};
<|fim▁hole|> const CONST2: i64 = 0;
static STATIC2: i64 = CONST2;
STATIC2
};
}
//~ MONO_ITEM fn statics_and_consts::start[0]
#[start]
fn start(_: isize, _: *const *const u8) -> isize {
foo();
let _ = STATIC1;
0
}
//~ MONO_ITEM static statics_and_consts::STATIC1[0]
//~ MONO_ITEM fn statics_and_consts::foo[0]
//~ MONO_ITEM static statics_and_consts::foo[0]::STATIC2[0]
//~ MONO_ITEM static statics_and_consts::foo[0]::STATIC2[1]
//~ MONO_ITEM static statics_and_consts::foo[0]::STATIC2[2]<|fim▁end|> | x + STATIC2
};
let _ = { |
<|file_name|>0011_thread_updated.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations<|fim▁hole|> dependencies = [
('fastapp', '0010_auto_20150910_2010'),
]
operations = [
migrations.AddField(
model_name='thread',
name='updated',
field=models.DateTimeField(auto_now=True, null=True),
preserve_default=True,
),
]<|fim▁end|> |
class Migration(migrations.Migration):
|
<|file_name|>settings.rs<|end_file_name|><|fim▁begin|>use crate::render::object::BodyColor;
use std::fs::File;
use std::path::PathBuf;
#[derive(Deserialize)]
pub struct Car {
pub id: String,
pub color: BodyColor,
pub slots: Vec<String>,
pub pos: Option<(i32, i32)>,
}
#[derive(Copy, Clone, Deserialize)]
pub enum View {
Flat,
Perspective,
}
#[derive(Copy, Clone, Deserialize)]
pub struct Camera {
pub angle: u8,
pub height: f32,
pub target_overhead: f32,
pub speed: f32,<|fim▁hole|>
#[derive(Copy, Clone, Deserialize)]
pub enum SpawnAt {
Player,
Random,
}
#[derive(Copy, Clone, Deserialize)]
pub struct Other {
pub count: usize,
pub spawn_at: SpawnAt,
}
#[derive(Copy, Clone, Deserialize)]
pub struct GpuCollision {
pub max_objects: usize,
pub max_polygons_total: usize,
pub max_raster_size: (u32, u32),
}
#[derive(Copy, Clone, Deserialize)]
pub struct Physics {
pub max_quant: f32,
pub shape_sampling: u8,
pub gpu_collision: Option<GpuCollision>,
}
#[derive(Deserialize)]
pub struct Game {
pub level: String,
pub cycle: String,
pub view: View,
pub camera: Camera,
pub other: Other,
pub physics: Physics,
}
#[derive(Deserialize)]
pub struct Window {
pub title: String,
pub size: [u32; 2],
pub reload_on_focus: bool,
}
#[derive(Copy, Clone, Deserialize)]
pub enum Backend {
Auto,
Metal,
Vulkan,
DX12,
DX11,
GL,
}
impl Backend {
pub fn to_wgpu(&self) -> wgpu::Backends {
match *self {
Backend::Auto => wgpu::Backends::PRIMARY,
Backend::Metal => wgpu::Backends::METAL,
Backend::Vulkan => wgpu::Backends::VULKAN,
Backend::DX12 => wgpu::Backends::DX12,
Backend::DX11 => wgpu::Backends::DX11,
Backend::GL => wgpu::Backends::GL,
}
}
}
#[derive(Copy, Clone, Default, Deserialize)]
pub struct DebugRender {
pub max_vertices: usize,
pub collision_shapes: bool,
pub collision_map: bool,
pub impulses: bool,
}
#[derive(Copy, Clone, Deserialize)]
pub enum ShadowTerrain {
RayTraced,
}
#[derive(Copy, Clone, Deserialize)]
pub struct Shadow {
pub size: u32,
pub terrain: ShadowTerrain,
}
#[derive(Copy, Clone, Deserialize)]
pub struct Light {
pub pos: [f32; 4],
pub color: [f32; 4],
pub shadow: Shadow,
}
#[derive(Copy, Clone, Deserialize)]
pub enum Terrain {
RayTraced,
RayMipTraced {
mip_count: u32,
max_jumps: u32,
max_steps: u32,
debug: bool,
},
Sliced,
Painted,
Scattered {
density: [u32; 3],
},
}
#[derive(Copy, Clone, Deserialize)]
pub struct Water {}
#[derive(Copy, Clone, Deserialize)]
pub struct Fog {
pub color: [f32; 4],
pub depth: f32,
}
#[derive(Clone, Deserialize)]
pub struct Render {
pub wgpu_trace_path: String,
pub light: Light,
pub terrain: Terrain,
pub water: Water,
pub fog: Fog,
pub debug: DebugRender,
}
#[derive(Deserialize)]
pub struct Settings {
pub data_path: PathBuf,
pub car: Car,
pub game: Game,
pub window: Window,
pub backend: Backend,
pub render: Render,
}
impl Settings {
pub fn load(path: &str) -> Self {
use std::io::Read;
const TEMPLATE: &str = "config/settings.template.ron";
const PATH: &str = "config/settings.ron";
let mut string = String::new();
File::open(path)
.unwrap_or_else(|e| panic!("Unable to open the settings file: {:?}.\nPlease copy '{}' to '{}' and adjust 'data_path'",
e, TEMPLATE, PATH))
.read_to_string(&mut string)
.unwrap();
let set: Settings = match ron::de::from_str(&string) {
Ok(set) => set,
Err(e) => panic!(
"Unable to parse settings RON: {:?}.\nPlease check if `{}` has changed and your local config needs to be adjusted.",
e,
TEMPLATE,
),
};
if !set.check_path("options.dat") {
panic!(
"Can't find the resources of the original Vangers game at {:?}, please check your `{}`",
set.data_path, PATH,
);
}
set
}
pub fn open_relative(&self, path: &str) -> File {
File::open(self.data_path.join(path))
.unwrap_or_else(|_| panic!("Unable to open game file: {}", path))
}
pub fn check_path(&self, path: &str) -> bool {
self.data_path.join(path).exists()
}
pub fn open_palette(&self) -> File {
let path = self
.data_path
.join("resource")
.join("pal")
.join("objects.pal");
File::open(path).expect("Unable to open palette")
}
pub fn _open_vehicle_model(&self, name: &str) -> File {
let path = self
.data_path
.join("resource")
.join("m3d")
.join("mechous")
.join(name)
.with_extension("m3d");
File::open(path).unwrap_or_else(|_| panic!("Unable to open vehicle {}", name))
}
}<|fim▁end|> | pub depth_range: (f32, f32),
} |
<|file_name|>git_gc.rs<|end_file_name|><|fim▁begin|>//! Tests for git garbage collection.
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Command;
use cargo_test_support::git;
use cargo_test_support::paths;
use cargo_test_support::project;
use cargo_test_support::registry::Package;
use url::Url;
fn find_index() -> PathBuf {
let dir = paths::home().join(".cargo/registry/index");
dir.read_dir().unwrap().next().unwrap().unwrap().path()
}
fn run_test(path_env: Option<&OsStr>) {
const N: usize = 50;
let foo = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
bar = "*"
"#,
)
.file("src/lib.rs", "")
.build();
Package::new("bar", "0.1.0").publish();
foo.cargo("build").run();
let index = find_index();
let path = paths::home().join("tmp");
let url = Url::from_file_path(&path).unwrap().to_string();
let repo = git2::Repository::init(&path).unwrap();
let index = git2::Repository::open(&index).unwrap();
let mut cfg = repo.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();<|fim▁hole|> cfg.set_str("user.name", "Foo Bar").unwrap();
for _ in 0..N {
git::commit(&repo);
index
.remote_anonymous(&url)
.unwrap()
.fetch(&["refs/heads/master:refs/remotes/foo/master"], None, None)
.unwrap();
}
drop((repo, index));
Package::new("bar", "0.1.1").publish();
let before = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(before > N);
let mut cmd = foo.cargo("update");
cmd.env("__CARGO_PACKFILE_LIMIT", "10");
if let Some(path) = path_env {
cmd.env("PATH", path);
}
cmd.env("CARGO_LOG", "trace");
cmd.run();
let after = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(
after < before,
"packfiles before: {}\n\
packfiles after: {}",
before,
after
);
}
#[cargo_test]
fn use_git_gc() {
if Command::new("git").arg("--version").output().is_err() {
return;
}
run_test(None);
}
#[cargo_test]
fn avoid_using_git() {
let path = env::var_os("PATH").unwrap_or_default();
let mut paths = env::split_paths(&path).collect::<Vec<_>>();
let idx = paths
.iter()
.position(|p| p.join("git").exists() || p.join("git.exe").exists());
match idx {
Some(i) => {
paths.remove(i);
}
None => return,
}
run_test(Some(&env::join_paths(&paths).unwrap()));
}<|fim▁end|> | let mut cfg = index.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap(); |
<|file_name|>lu.rs<|end_file_name|><|fim▁begin|>#![cfg_attr(rustfmt, rustfmt_skip)]
use na::Matrix3;
#[test]
fn lu_simple() {
let m = Matrix3::new(
2.0, -1.0, 0.0,
-1.0, 2.0, -1.0,
0.0, -1.0, 2.0);
let lu = m.lu();
assert_eq!(lu.determinant(), 4.0);
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
assert!(relative_eq!(m, lu, epsilon = 1.0e-7));
}
#[test]
fn lu_simple_with_pivot() {
let m = Matrix3::new(
0.0, -1.0, 2.0,
-1.0, 2.0, -1.0,
2.0, -1.0, 0.0);
let lu = m.lu();
assert_eq!(lu.determinant(), -4.0);
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
assert!(relative_eq!(m, lu, epsilon = 1.0e-7));
}
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
#[allow(unused_imports)]
use crate::core::helper::{RandScalar, RandComplex};
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use std::cmp;
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4};
#[allow(unused_imports)]
use crate::core::helper::{RandScalar, RandComplex};
quickcheck! {
fn lu(m: DMatrix<$scalar>) -> bool {
let mut m = m;
if m.len() == 0 {
m = DMatrix::<$scalar>::new_random(1, 1);
}
let m = m.map(|e| e.0);
let lu = m.clone().lu();
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn lu_static_3_5(m: Matrix3x5<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.lu();
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn lu_static_5_3(m: Matrix5x3<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.lu();
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn lu_static_square(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.lu();
let (p, l, u) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn lu_solve(n: usize, nb: usize) -> bool {
if n != 0 && nb != 0 {
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let lu = m.clone().lu();
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) &&
(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6))
}
return true;
}
fn lu_solve_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.lu();
let b1 = Vector4::<$scalar>::new_random().map(|e| e.0);
let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) &&
(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6))
}
fn lu_inverse(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let mut l = m.lower_triangle();
let mut u = m.upper_triangle();
// Ensure the matrix is well conditioned for inversion.
l.fill_diagonal(na::one());
u.fill_diagonal(na::one());
let m = l * u;
let m1 = m.clone().lu().try_inverse().unwrap();
let id1 = &m * &m1;
let id2 = &m1 * &m;
return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5);
}
fn lu_inverse_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.lu();
if let Some(m1) = lu.try_inverse() {<|fim▁hole|> id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5)
}
else {
true
}
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
}<|fim▁end|> | let id1 = &m * &m1;
let id2 = &m1 * &m;
|
<|file_name|>RevokeDriver.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* # Copyright 2015 InfinitiesSoft Solutions Inc.
* #
* # Licensed under the Apache License, Version 2.0 (the "License"); you may
* # not use this file except in compliance with the License. You may obtain
* # a copy of the License at
* #
* # http://www.apache.org/licenses/LICENSE-2.0
* #
* # Unless required by applicable law or agreed to in writing, software
* # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* # License for the specific language governing permissions and limitations
* # under the License.
*******************************************************************************/
package com.infinities.keystone4j.contrib.revoke.driver;
import java.util.Calendar;
import java.util.List;
import com.infinities.keystone4j.contrib.revoke.model.RevokeEvent;
public interface RevokeDriver {
<|fim▁hole|> void revoke(RevokeEvent event);
}<|fim▁end|> | // lastFetch=null
List<RevokeEvent> getEvents(Calendar lastFetch);
|
<|file_name|>default_test.go<|end_file_name|><|fim▁begin|>package test
import (
"net/http"
"net/http/httptest"
"testing"
"runtime"
"path/filepath"
_ "github.com/dvwallin/gopress/routers"
"github.com/astaxie/beego"
. "github.com/smartystreets/goconvey/convey"
)
func init() {
_, file, _, _ := runtime.Caller(1)
apppath, _ := filepath.Abs(filepath.Dir(filepath.Join(file, ".." + string(filepath.Separator))))
beego.TestBeegoInit(apppath)
}
// TestMain is a sample to run an endpoint test
func TestMain(t *testing.T) {
r, _ := http.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
beego.BeeApp.Handlers.ServeHTTP(w, r)
beego.Trace("testing", "TestMain", "Code[%d]\n%s", w.Code, w.Body.String())
Convey("Subject: Test Station Endpoint\n", t, func() {
Convey("Status Code Should Be 200", func() {
So(w.Code, ShouldEqual, 200)<|fim▁hole|> So(w.Body.Len(), ShouldBeGreaterThan, 0)
})
})
}<|fim▁end|> | })
Convey("The Result Should Not Be Empty", func() { |
<|file_name|>exceptions.cpp<|end_file_name|><|fim▁begin|>// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -emit-llvm -o - -fcxx-exceptions -fexceptions | FileCheck %s
typedef typeof(sizeof(0)) size_t;
// This just shouldn't crash.
namespace test0 {
struct allocator {
allocator();
allocator(const allocator&);
~allocator();
};
void f();
void g(bool b, bool c) {
if (b) {
if (!c)
throw allocator();
return;
}
f();
}
}
namespace test1 {
struct A { A(int); A(int, int); ~A(); void *p; };
A *a() {
// CHECK: define [[A:%.*]]* @_ZN5test11aEv()
// CHECK: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 5)
// CHECK: ret [[A]]* [[CAST]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
return new A(5);
}
A *b() {
// CHECK: define [[A:%.*]]* @_ZN5test11bEv()
// CHECK: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: [[FOO:%.*]] = invoke i32 @_ZN5test13fooEv()
// CHECK: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[FOO]])
// CHECK: ret [[A]]* [[CAST]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
extern int foo();
return new A(foo());
}
struct B { B(); ~B(); operator int(); int x; };
B makeB();
A *c() {
// CHECK: define [[A:%.*]]* @_ZN5test11cEv()
// CHECK: [[ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: store i1 true, i1* [[ACTIVE]]
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
// CHECK: [[T1:%.*]] = getelementptr inbounds [[B]]* [[T0]], i32 0, i32 0
// CHECK-NEXT: [[T2:%.*]] = load i32* [[T1]], align 4
// CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T2]])
// CHECK: store i1 false, i1* [[ACTIVE]]
// CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
// CHECK: ret [[A]]* [[CAST]]
// CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
// CHECK-NEXT: br i1 [[ISACTIVE]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
return new A(B().x);
}
// rdar://11904428
// Terminate landing pads should call __cxa_begin_catch first.
// CHECK: define linkonce_odr hidden void @__clang_call_terminate(i8*) [[NI_NR_NUW:#[0-9]+]]
// CHECK-NEXT: [[T0:%.*]] = call i8* @__cxa_begin_catch(i8* %0) [[NUW:#[0-9]+]]
// CHECK-NEXT: call void @_ZSt9terminatev() [[NR_NUW:#[0-9]+]]
// CHECK-NEXT: unreachable
A *d() {
// CHECK: define [[A:%.*]]* @_ZN5test11dEv()
// CHECK: [[ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: store i1 true, i1* [[ACTIVE]]
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
// CHECK: [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
// CHECK: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T1]])
// CHECK: store i1 false, i1* [[ACTIVE]]
// CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
// CHECK: ret [[A]]* [[CAST]]
// CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
// CHECK-NEXT: br i1 [[ISACTIVE]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
return new A(B());
}
A *e() {
// CHECK: define [[A:%.*]]* @_ZN5test11eEv()
// CHECK: [[ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: store i1 true, i1* [[ACTIVE]]
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
// CHECK: [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
// CHECK: invoke void @_ZN5test11BC1Ev([[B]]* [[T2:%.*]])
// CHECK: [[T3:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T2]])
// CHECK: invoke void @_ZN5test11AC1Eii([[A]]* [[CAST]], i32 [[T1]], i32 [[T3]])
// CHECK: store i1 false, i1* [[ACTIVE]]
// CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T2]])
// CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
// CHECK: ret [[A]]* [[CAST]]
// CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
// CHECK-NEXT: br i1 [[ISACTIVE]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
return new A(B(), B());
}
A *f() {
return new A(makeB().x);
}
A *g() {
return new A(makeB());
}
A *h() {
return new A(makeB(), makeB());
}
A *i() {
// CHECK: define [[A:%.*]]* @_ZN5test11iEv()
// CHECK: [[X:%.*]] = alloca [[A]]*, align 8
// CHECK: [[ACTIVE:%.*]] = alloca i1
// CHECK: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
// CHECK-NEXT: store i1 true, i1* [[ACTIVE]]
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test15makeBEv([[B:%.*]]* sret [[T0:%.*]])
// CHECK: [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
// CHECK: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T1]])
// CHECK: store i1 false, i1* [[ACTIVE]]
// CHECK-NEXT: store [[A]]* [[CAST]], [[A]]** [[X]], align 8
// CHECK: invoke void @_ZN5test15makeBEv([[B:%.*]]* sret [[T2:%.*]])
// CHECK: [[RET:%.*]] = load [[A]]** [[X]], align 8
// CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T2]])
// CHECK: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
// CHECK: ret [[A]]* [[RET]]
// CHECK: [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
// CHECK-NEXT: br i1 [[ISACTIVE]]
// CHECK: call void @_ZdlPv(i8* [[NEW]])
A *x;
return (x = new A(makeB()), makeB(), x);
}
}
namespace test2 {
struct A {
A(int); A(int, int); ~A();
void *p;
void *operator new(size_t);
void operator delete(void*, size_t);
};
A *a() {
// CHECK: define [[A:%.*]]* @_ZN5test21aEv()
// CHECK: [[NEW:%.*]] = call i8* @_ZN5test21AnwEm(i64 8)
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test21AC1Ei([[A]]* [[CAST]], i32 5)
// CHECK: ret [[A]]* [[CAST]]
// CHECK: invoke void @_ZN5test21AdlEPvm(i8* [[NEW]], i64 8)
// CHECK: call void @__clang_call_terminate(i8* {{%.*}}) [[NR_NUW]]
return new A(5);
}
}
namespace test3 {
struct A {
A(int); A(int, int); A(const A&); ~A();
void *p;
void *operator new(size_t, void*, double);
void operator delete(void*, void*, double);
};
void *foo();
double bar();
A makeA(), *makeAPtr();
A *a() {
// CHECK: define [[A:%.*]]* @_ZN5test31aEv()
// CHECK: [[FOO:%.*]] = call i8* @_ZN5test33fooEv()
// CHECK: [[BAR:%.*]] = call double @_ZN5test33barEv()
// CHECK: [[NEW:%.*]] = call i8* @_ZN5test31AnwEmPvd(i64 8, i8* [[FOO]], double [[BAR]])
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test31AC1Ei([[A]]* [[CAST]], i32 5)
// CHECK: ret [[A]]* [[CAST]]
// CHECK: invoke void @_ZN5test31AdlEPvS1_d(i8* [[NEW]], i8* [[FOO]], double [[BAR]])
// CHECK: call void @__clang_call_terminate(i8* {{%.*}}) [[NR_NUW]]
return new(foo(),bar()) A(5);
}
// rdar://problem/8439196
A *b(bool cond) {
// CHECK: define [[A:%.*]]* @_ZN5test31bEb(i1 zeroext
// CHECK: [[SAVED0:%.*]] = alloca i8*
// CHECK-NEXT: [[SAVED1:%.*]] = alloca i8*
// CHECK-NEXT: [[CLEANUPACTIVE:%.*]] = alloca i1
// CHECK: [[COND:%.*]] = trunc i8 {{.*}} to i1
// CHECK-NEXT: store i1 false, i1* [[CLEANUPACTIVE]]
// CHECK-NEXT: br i1 [[COND]]
return (cond ?
// CHECK: [[FOO:%.*]] = call i8* @_ZN5test33fooEv()
// CHECK-NEXT: [[NEW:%.*]] = call i8* @_ZN5test31AnwEmPvd(i64 8, i8* [[FOO]], double [[CONST:.*]])
// CHECK-NEXT: store i8* [[NEW]], i8** [[SAVED0]]
// CHECK-NEXT: store i8* [[FOO]], i8** [[SAVED1]]
// CHECK-NEXT: store i1 true, i1* [[CLEANUPACTIVE]]
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: invoke void @_ZN5test35makeAEv([[A]]* sret [[CAST]])
// CHECK: br label
// -> cond.end
new(foo(),10.0) A(makeA()) :
// CHECK: [[MAKE:%.*]] = call [[A]]* @_ZN5test38makeAPtrEv()
// CHECK: br label
// -> cond.end
makeAPtr());
// cond.end:
// CHECK: [[RESULT:%.*]] = phi [[A]]* {{.*}}[[CAST]]{{.*}}[[MAKE]]
// CHECK: ret [[A]]* [[RESULT]]
// in the EH path:
// CHECK: [[ISACTIVE:%.*]] = load i1* [[CLEANUPACTIVE]]
// CHECK-NEXT: br i1 [[ISACTIVE]]
// CHECK: [[V0:%.*]] = load i8** [[SAVED0]]
// CHECK-NEXT: [[V1:%.*]] = load i8** [[SAVED1]]
// CHECK-NEXT: invoke void @_ZN5test31AdlEPvS1_d(i8* [[V0]], i8* [[V1]], double [[CONST]])
}
}
namespace test4 {
struct A {
A(int); A(int, int); ~A();
void *p;
void *operator new(size_t, void*, void*);
void operator delete(void*, size_t, void*, void*); // not a match
};
A *a() {
// CHECK: define [[A:%.*]]* @_ZN5test41aEv()
// CHECK: [[FOO:%.*]] = call i8* @_ZN5test43fooEv()
// CHECK-NEXT: [[BAR:%.*]] = call i8* @_ZN5test43barEv()
// CHECK-NEXT: [[NEW:%.*]] = call i8* @_ZN5test41AnwEmPvS1_(i64 8, i8* [[FOO]], i8* [[BAR]])
// CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
// CHECK-NEXT: call void @_ZN5test41AC1Ei([[A]]* [[CAST]], i32 5)
// CHECK-NEXT: ret [[A]]* [[CAST]]
extern void *foo(), *bar();
return new(foo(),bar()) A(5);
}
}
// PR7908
namespace test5 {
struct T { T(); ~T(); };
struct A {
A(const A &x, const T &t = T());
~A();
};
void foo();
// CHECK: define void @_ZN5test54testEv()
// CHECK: [[EXNSLOT:%.*]] = alloca i8*
// CHECK-NEXT: [[SELECTORSLOT:%.*]] = alloca i32
// CHECK-NEXT: [[A:%.*]] = alloca [[A_T:%.*]], align 1
// CHECK-NEXT: [[T:%.*]] = alloca [[T_T:%.*]], align 1
// CHECK-NEXT: invoke void @_ZN5test53fooEv()
// CHECK: [[EXN:%.*]] = load i8** [[EXNSLOT]]
// CHECK-NEXT: [[ADJ:%.*]] = call i8* @__cxa_get_exception_ptr(i8* [[EXN]])
// CHECK-NEXT: [[SRC:%.*]] = bitcast i8* [[ADJ]] to [[A_T]]*
// CHECK-NEXT: invoke void @_ZN5test51TC1Ev([[T_T]]* [[T]])
// CHECK: invoke void @_ZN5test51AC1ERKS0_RKNS_1TE([[A_T]]* [[A]], [[A_T]]* [[SRC]], [[T_T]]* [[T]])
// CHECK: invoke void @_ZN5test51TD1Ev([[T_T]]* [[T]])
// CHECK: call i8* @__cxa_begin_catch(i8* [[EXN]]) [[NUW]]
// CHECK-NEXT: invoke void @_ZN5test51AD1Ev([[A_T]]* [[A]])
// CHECK: call void @__cxa_end_catch()
void test() {
try {
foo();
} catch (A a) {
}
}
}
// PR9303: invalid assert on this
namespace test6 {
bool cond();
void test() {
try {
lbl:
if (cond()) goto lbl;
} catch (...) {
}
}
}
// PR9298
namespace test7 {
struct A { A(); ~A(); };
struct B {
// The throw() operator means that a bad allocation is signalled
// with a null return, which means that the initializer is
// evaluated conditionally.
static void *operator new(size_t size) throw();
B(const A&, B*);
~B();
};
B *test() {
// CHECK: define [[B:%.*]]* @_ZN5test74testEv()
// CHECK: [[OUTER_NEW:%.*]] = alloca i1
// CHECK-NEXT: alloca [[A:%.*]],
// CHECK-NEXT: alloca i8*
// CHECK-NEXT: alloca i32
// CHECK-NEXT: [[OUTER_A:%.*]] = alloca i1
// CHECK-NEXT: alloca i8*
// CHECK-NEXT: [[INNER_NEW:%.*]] = alloca i1
// CHECK-NEXT: alloca [[A]]
// CHECK-NEXT: [[INNER_A:%.*]] = alloca i1
// Allocate the outer object.
// CHECK-NEXT: [[NEW:%.*]] = call i8* @_ZN5test71BnwEm(
// CHECK-NEXT: icmp eq i8* [[NEW]], null
// These stores, emitted before the outermost conditional branch,
// deactivate the temporary cleanups.
// CHECK-NEXT: store i1 false, i1* [[OUTER_NEW]]
// CHECK-NEXT: store i1 false, i1* [[OUTER_A]]
// CHECK-NEXT: store i1 false, i1* [[INNER_NEW]]
// CHECK-NEXT: store i1 false, i1* [[INNER_A]]
// CHECK-NEXT: br i1
// We passed the first null check; activate that cleanup and continue.
// CHECK: store i1 true, i1* [[OUTER_NEW]]
// CHECK-NEXT: bitcast
// Create the first A temporary and activate that cleanup.
// CHECK-NEXT: invoke void @_ZN5test71AC1Ev(
// CHECK: store i1 true, i1* [[OUTER_A]]
// Allocate the inner object.
// CHECK-NEXT: [[NEW:%.*]] = call i8* @_ZN5test71BnwEm(
// CHECK-NEXT: icmp eq i8* [[NEW]], null
// CHECK-NEXT: br i1
// We passed the second null check; save that pointer, activate
// that cleanup, and continue.
// CHECK: store i8* [[NEW]]
// CHECK-NEXT: store i1 true, i1* [[INNER_NEW]]
// CHECK-NEXT: bitcast
// Build the second A temporary and activate that cleanup.
// CHECK-NEXT: invoke void @_ZN5test71AC1Ev(
// CHECK: store i1 true, i1* [[INNER_A]]
// Build the inner B object and deactivate the inner delete cleanup.
// CHECK-NEXT: invoke void @_ZN5test71BC1ERKNS_1AEPS0_(
// CHECK: store i1 false, i1* [[INNER_NEW]]
// CHECK: phi
// Build the outer B object and deactivate the outer delete cleanup.
// CHECK-NEXT: invoke void @_ZN5test71BC1ERKNS_1AEPS0_(
// CHECK: store i1 false, i1* [[OUTER_NEW]]
// CHECK: phi
// CHECK-NEXT: store [[B]]*
// Destroy the inner A object.
// CHECK-NEXT: load i1* [[INNER_A]]
// CHECK-NEXT: br i1
// CHECK: invoke void @_ZN5test71AD1Ev(
// Destroy the outer A object.
// CHECK: load i1* [[OUTER_A]]
// CHECK-NEXT: br i1
// CHECK: invoke void @_ZN5test71AD1Ev(
return new B(A(), new B(A(), 0));
}
}
// Just don't crash.
namespace test8 {
struct A {
// Having both of these is required to trigger the assert we're
// trying to avoid.
A(const A&);
A&operator=(const A&);
~A();
};
A makeA();
void test() {
throw makeA();
}
// CHECK: define void @_ZN5test84testEv
}
<|fim▁hole|>// happens if A::A() throws. (We were previously calling delete[] on
// a pointer to the first array element, not the pointer returned by new[].)
// PR10870
namespace test9 {
struct A {
A();
~A();
};
A* test() {
return new A[10];
}
// CHECK: define {{%.*}}* @_ZN5test94testEv
// CHECK: [[TEST9_NEW:%.*]] = call noalias i8* @_Znam
// CHECK: call void @_ZdaPv(i8* [[TEST9_NEW]])
}
// In a destructor with a function-try-block, a return statement in a
// catch handler behaves differently from running off the end of the
// catch handler. PR13102.
namespace test10 {
extern void cleanup();
extern bool suppress;
struct A { ~A(); };
A::~A() try { cleanup(); } catch (...) { return; }
// CHECK: define void @_ZN6test101AD1Ev(
// CHECK: invoke void @_ZN6test107cleanupEv()
// CHECK-NOT: rethrow
// CHECK: ret void
struct B { ~B(); };
B::~B() try { cleanup(); } catch (...) {}
// CHECK: define void @_ZN6test101BD1Ev(
// CHECK: invoke void @_ZN6test107cleanupEv()
// CHECK: call i8* @__cxa_begin_catch
// CHECK-NEXT: invoke void @__cxa_rethrow()
// CHECK: unreachable
struct C { ~C(); };
C::~C() try { cleanup(); } catch (...) { if (suppress) return; }
// CHECK: define void @_ZN6test101CD1Ev(
// CHECK: invoke void @_ZN6test107cleanupEv()
// CHECK: call i8* @__cxa_begin_catch
// CHECK-NEXT: load i8* @_ZN6test108suppressE, align 1
// CHECK-NEXT: trunc
// CHECK-NEXT: br i1
// CHECK: call void @__cxa_end_catch()
// CHECK-NEXT: br label
// CHECK: invoke void @__cxa_rethrow()
// CHECK: unreachable
}
// Ensure that an exception in a constructor destroys
// already-constructed array members. PR14514
namespace test11 {
struct A {
A();
~A() {}
};
struct C {
A single;
A array[2][3];
C();
};
C::C() {
throw 0;
}
// CHECK: define void @_ZN6test111CC2Ev(
// CHECK: [[THIS:%.*]] = load [[C:%.*]]** {{%.*}}
// Construct single.
// CHECK-NEXT: [[SINGLE:%.*]] = getelementptr inbounds [[C]]* [[THIS]], i32 0, i32 0
// CHECK-NEXT: call void @_ZN6test111AC1Ev([[A:%.*]]* [[SINGLE]])
// Construct array.
// CHECK-NEXT: [[ARRAY:%.*]] = getelementptr inbounds [[C]]* [[THIS]], i32 0, i32 1
// CHECK-NEXT: [[ARRAYBEGIN:%.*]] = getelementptr inbounds [2 x [3 x [[A]]]]* [[ARRAY]], i32 0, i32 0, i32 0
// CHECK-NEXT: [[ARRAYEND:%.*]] = getelementptr inbounds [[A]]* [[ARRAYBEGIN]], i64 6
// CHECK-NEXT: br label
// CHECK: [[CUR:%.*]] = phi [[A]]* [ [[ARRAYBEGIN]], {{%.*}} ], [ [[NEXT:%.*]], {{%.*}} ]
// CHECK-NEXT: invoke void @_ZN6test111AC1Ev([[A:%.*]]* [[CUR]])
// CHECK: [[NEXT]] = getelementptr inbounds [[A]]* [[CUR]], i64 1
// CHECK-NEXT: [[DONE:%.*]] = icmp eq [[A]]* [[NEXT]], [[ARRAYEND]]
// CHECK-NEXT: br i1 [[DONE]],
// throw 0;
// CHECK: invoke void @__cxa_throw(
// Landing pad 1, from constructor in array-initialization loop:
// CHECK: landingpad
// - First, destroy already-constructed bits of array.
// CHECK: [[EMPTY:%.*]] = icmp eq [[A]]* [[ARRAYBEGIN]], [[CUR]]
// CHECK-NEXT: br i1 [[EMPTY]]
// CHECK: [[AFTER:%.*]] = phi [[A]]* [ [[CUR]], {{%.*}} ], [ [[ELT:%.*]], {{%.*}} ]
// CHECK-NEXT: [[ELT]] = getelementptr inbounds [[A]]* [[AFTER]], i64 -1
// CHECK-NEXT: invoke void @_ZN6test111AD1Ev([[A]]* [[ELT]])
// CHECK: [[DONE:%.*]] = icmp eq [[A]]* [[ELT]], [[ARRAYBEGIN]]
// CHECK-NEXT: br i1 [[DONE]],
// - Next, chain to cleanup for single.
// CHECK: br label
// Landing pad 2, from throw site.
// CHECK: landingpad
// - First, destroy all of array.
// CHECK: [[ARRAYBEGIN:%.*]] = getelementptr inbounds [2 x [3 x [[A]]]]* [[ARRAY]], i32 0, i32 0, i32 0
// CHECK-NEXT: [[ARRAYEND:%.*]] = getelementptr inbounds [[A]]* [[ARRAYBEGIN]], i64 6
// CHECK-NEXT: br label
// CHECK: [[AFTER:%.*]] = phi [[A]]* [ [[ARRAYEND]], {{%.*}} ], [ [[ELT:%.*]], {{%.*}} ]
// CHECK-NEXT: [[ELT]] = getelementptr inbounds [[A]]* [[AFTER]], i64 -1
// CHECK-NEXT: invoke void @_ZN6test111AD1Ev([[A]]* [[ELT]])
// CHECK: [[DONE:%.*]] = icmp eq [[A]]* [[ELT]], [[ARRAYBEGIN]]
// CHECK-NEXT: br i1 [[DONE]],
// - Next, chain to cleanup for single.
// CHECK: br label
// Finally, the cleanup for single.
// CHECK: invoke void @_ZN6test111AD1Ev([[A]]* [[SINGLE]])
// CHECK: br label
// CHECK: resume
// (After this is a terminate landingpad.)
}
// CHECK: attributes [[NI_NR_NUW]] = { noinline noreturn nounwind }<|fim▁end|> | // Make sure we generate the correct code for the delete[] call which |
<|file_name|>Proxy.js<|end_file_name|><|fim▁begin|>/*--------------------------------------------------------------------------------+
- Dateiname: app/proxy/Proxy.js
- Beschreibung: JsonP Proxy
- Autor(en): Andreas Gärtner <[email protected]>
+--------------------------------------------------------------------------------+
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 3 of the License, or any later version.
+--------------------------------------------------------------------------------+
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, see <http://www.gnu.org/licenses/>.
+--------------------------------------------------------------------------------+
Inoffizielle deutsche Übersetzung (http://www.gnu.de/documents/gpl.de.html):
Dieses Programm ist freie Software. Sie können es unter den Bedingungen der
GNU General Public License, wie von der Free Software Foundation veröffentlicht,
weitergeben und/oder modifizieren, entweder gemäß Version 3 der Lizenz oder
jeder späteren Version.
+--------------------------------------------------------------------------------+
Die Veröffentlichung dieses Programms erfolgt in der Hoffnung, daß es Ihnen von
Nutzen sein wird, aber OHNE IRGENDEINE GARANTIE, sogar ohne die implizite Garantie
der MARKTREIFE oder der VERWENDBARKEIT FÜR EINEN BESTIMMTEN ZWECK. Details finden
Sie in der GNU General Public License.
Sie sollten ein Exemplar der GNU General Public License zusammen mit diesem Programm
erhalten haben. Falls nicht, siehe <http://www.gnu.org/licenses/>.
+--------------------------------------------------------------------------------+
*/
Ext.define('LearningApp.proxy.Proxy', {<|fim▁hole|>
config: {
url: "http://ilias-staging.mni.thm.de:8080/connector/ilias/",
//url: "http://localhost:8080/connector-service/ilias/",
//url: "https://quizapp.uni-giessen.de/connector/ilias/",
useDefaultXhrHeader: false,
withCredentials: true,
disableCaching: false,
method: 'GET'
},
/**
* removes custom headers from requests
*/
resetDefaultRequestHeaders: function() {
this.setDefaultHeaders(null);
},
/**
* checks online status of service
*/
check: function(callback) {
this.request({
url: this.getUrl() + "check",
success: function(response) {
if(response.responseText = 'OK') {
callback.success.call(this, arguments);
}
},
failure: function(response) {
callback.failure.apply(this, arguments);
},
callback: function(response) {
callback.callback.apply(this, arguments);
},
scope: this
})
},
/**
* check login state
*/
checkLogin: function(callback) {
var me = this;
LearningApp.app.storageController.getLoggedInUserObj(function(loginObj) {
if(loginObj != null) me.setDefaultHeaders(loginObj.authObj);
});
},
/**
* perform login through basic authentication
* @param uname: username
* @param upass: password
*/
login: function(uname, upass, callback) {
this.request({
url: this.getUrl() + "login",
method : 'POST',
params: {
uname: uname,
upass: upass
},
success: function(response) {
callback.success.call(this, Ext.decode(response.responseText));
},
failure: function(response) {
callback.failure.apply(this, arguments);
},
scope: this
});
},
/**
* Gets the card index tree
* @param object with success-callback
* @return cardindex-objects, if found
* @return false, if nothing found
*/
getCardIndexTree: function(callback) {
this.request({
url: this.getUrl() + "1",
success: function(response) {
if (response.status === 204) {
callback.success.call(this, []);
} else {
callback.success.call(this, Ext.decode(response.responseText));
}
},
failure: function(response) {
Ext.Viewport.setMasked(false);
if (response.status === 401) {
Ext.Msg.alert('Login', 'Ihre Logindaten sind abgelaufen. Bitte erneut einloggen.', function() {
LearningApp.app.getController('LoginController').logout();
});
} else {
Ext.Msg.alert('Offline-Modus', 'Das Programm wird im Offline-Modus ausgeführt.');
callback.failure.apply(this, arguments);
}
},
scope: this
});
},
/**
* Gets random choosen questions from a test
* @param object with success-callback
* @return cardindex-objects, if found
* @return false, if nothing found
*/
getRandomQuestions: function(refId, callbacks) {
this.request({
url: this.getUrl() + "question/" + refId,
success: function(response) {
if (response.status === 204) {
callbacks.success.call(this, {});
} else {
callbacks.success.call(this, {
refId: refId,
data: Ext.decode(response.responseText)
});
}
},
failure: function(response) {
if (response.status === 401) {
callbacks.unauthorized.apply(this, arguments);
} else if (response.status === 404) {
callbacks.notFound.apply(this, arguments);
} else if (response.status = 403) {
callbacks.forbidden.apply(this, arguments);
} else {
callbacks.failure.apply(this, arguments);
}
},
scope: this
});
},
/**
* Gets all questions from a test
* @param object with success-callback
* @return cardindex-objects, if found
* @return false, if nothing found
*/
getAllQuestions: function(refId, callbacks) {
this.request({
url: this.getUrl() + "question/" + refId + "?source=ALL",
success: function(response) {
if (response.status === 204) {
callbacks.success.call(this, {});
} else {
callbacks.success.call(this, {
refId: refId,
data: Ext.decode(response.responseText)
});
}
},
failure: function(response) {
if (response.status === 401) {
callbacks.unauthorized.apply(this, arguments);
} else if (response.status === 404) {
callbacks.notFound.apply(this, arguments);
} else if (response.status = 403) {
callbacks.forbidden.apply(this, arguments);
} else {
callbacks.failure.apply(this, arguments);
}
},
scope: this
});
}
});<|fim▁end|> | extend: 'Ext.data.Connection',
xtype: 'proxy', |
<|file_name|>ConfigParser.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import with_statement
from time import sleep
from os.path import exists, join
from shutil import copy
from traceback import print_exc
from utils import chmod
# ignore these plugin configs, mainly because plugins were wiped out
IGNORE = (
"FreakshareNet", "SpeedManager", "ArchiveTo", "ShareCx", ('hooks', 'UnRar'),
'EasyShareCom', 'FlyshareCz'
)
CONF_VERSION = 1
class ConfigParser:
"""
holds and manage the configuration
current dict layout:
{
section : {
option : {
value:
type:
desc:
}
desc:
}
"""
def __init__(self):
"""Constructor"""
self.config = {} # the config values
self.plugin = {} # the config for plugins
self.oldRemoteData = {}
self.pluginCB = None # callback when plugin config value is changed
self.checkVersion()
self.readConfig()
self.deleteOldPlugins()
def checkVersion(self, n=0):
"""determines if config need to be copied"""
try:
if not exists("pyload.conf"):
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
if not exists("plugin.conf"):
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
f = open("pyload.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
print "Old version of config was replaced"
f = open("plugin.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
print "Old version of plugin-config replaced"
except:
if n < 3:
sleep(0.3)
self.checkVersion(n + 1)
else:
raise
def readConfig(self):
"""reads the config file"""
self.config = self.parseConfig(join(pypath, "module", "config", "default.conf"))
self.plugin = self.parseConfig("plugin.conf")
try:
homeconf = self.parseConfig("pyload.conf")
if "username" in homeconf["remote"]:
if "password" in homeconf["remote"]:
self.oldRemoteData = {"username": homeconf["remote"]["username"]["value"],
"password": homeconf["remote"]["username"]["value"]}
del homeconf["remote"]["password"]
del homeconf["remote"]["username"]
self.updateValues(homeconf, self.config)
except Exception, e:
print "Config Warning"
print_exc()
def parseConfig(self, config):
"""parses a given configfile"""
f = open(config)
config = f.read()
config = config.splitlines()[1:]
conf = {}
section, option, value, typ, desc = "", "", "", "", ""
listmode = False
for line in config:
comment = line.rfind("#")
if line.find(":", comment) < 0 > line.find("=", comment) and comment > 0 and line[comment - 1].isspace():
line = line.rpartition("#") # removes comments
if line[1]:
line = line[0]
else:
line = line[2]
line = line.strip()
try:
if line == "":
continue
elif line.endswith(":"):
section, none, desc = line[:-1].partition('-')
section = section.strip()
desc = desc.replace('"', "").strip()
conf[section] = {"desc": desc}
else:
if listmode:
if line.endswith("]"):
listmode = False
line = line.replace("]", "")
value += [self.cast(typ, x.strip()) for x in line.split(",") if x]
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
else:
content, none, value = line.partition("=")
content, none, desc = content.partition(":")
desc = desc.replace('"', "").strip()
typ, none, option = content.strip().rpartition(" ")
value = value.strip()
if value.startswith("["):
if value.endswith("]"):
listmode = False
value = value[:-1]
else:
listmode = True
value = [self.cast(typ, x.strip()) for x in value[1:].split(",") if x]
else:
value = self.cast(typ, value)
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
except Exception, e:
print "Config Warning"
print_exc()
f.close()
return conf
def updateValues(self, config, dest):
"""sets the config values from a parsed config file to values in destination"""
for section in config.iterkeys():
if section in dest:
for option in config[section].iterkeys():
if option in ("desc", "outline"): continue
if option in dest[section]:
dest[section][option]["value"] = config[section][option]["value"]
#else:
# dest[section][option] = config[section][option]
#else:
# dest[section] = config[section]
def saveConfig(self, config, filename):
"""saves config to filename"""
with open(filename, "wb") as f:
chmod(filename, 0600)
f.write("version: %i \n" % CONF_VERSION)
for section in config.iterkeys():
f.write('\n%s - "%s":\n' % (section, config[section]["desc"]))
for option, data in config[section].iteritems():
if option in ("desc", "outline"): continue
if isinstance(data["value"], list):
value = "[ \n"
for x in data["value"]:
value += "\t\t" + str(x) + ",\n"
value += "\t\t]\n"
else:
if type(data["value"]) in (str, unicode):
value = data["value"] + "\n"
else:
value = str(data["value"]) + "\n"
try:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value))
except UnicodeEncodeError:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value.encode("utf8")))
def cast(self, typ, value):
"""cast value to given format"""
if type(value) not in (str, unicode):
return value
elif typ == "int":
return int(value)
elif typ == "bool":
return True if value.lower() in ("1", "true", "on", "an", "yes") else False
elif typ == "time":
if not value: value = "0:00"
if not ":" in value: value += ":00"
return value
elif typ in ("str", "file", "folder"):
try:
return value.encode("utf8")
except:
return value
else:
return value
def save(self):
"""saves the configs to disk"""
self.saveConfig(self.config, "pyload.conf")
self.saveConfig(self.plugin, "plugin.conf")
def __getitem__(self, section):
"""provides dictonary like access: c['section']['option']"""
return Section(self, section)
def get(self, section, option):
"""get value"""
val = self.config[section][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def set(self, section, option, value):
"""set value"""
value = self.cast(self.config[section][option]["type"], value)
self.config[section][option]["value"] = value
self.save()
def getPlugin(self, plugin, option):
"""gets a value for a plugin"""
val = self.plugin[plugin][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def setPlugin(self, plugin, option, value):
"""sets a value for a plugin"""
value = self.cast(self.plugin[plugin][option]["type"], value)
if self.pluginCB: self.pluginCB(plugin, option, value)
self.plugin[plugin][option]["value"] = value
self.save()
def getMetaData(self, section, option):
""" get all config data for an option """
return self.config[section][option]
def addPluginConfig(self, name, config, outline=""):
"""adds config options with tuples (name, type, desc, default)"""
if name not in self.plugin:
conf = {"desc": name,
"outline": outline}
self.plugin[name] = conf
else:
conf = self.plugin[name]
conf["outline"] = outline
for item in config:
if item[0] in conf:
conf[item[0]]["type"] = item[1]
conf[item[0]]["desc"] = item[2]
else:
conf[item[0]] = {
"desc": item[2],
"type": item[1],
"value": self.cast(item[1], item[3])
}
values = [x[0] for x in config] + ["desc", "outline"]
#delete old values
for item in conf.keys():
if item not in values:
del conf[item]
def deleteConfig(self, name):
"""Removes a plugin config"""
if name in self.plugin:
del self.plugin[name]
def deleteOldPlugins(self):
""" remove old plugins from config """
for name in IGNORE:
if name in self.plugin:
del self.plugin[name]
class Section:
"""provides dictionary like access for configparser"""<|fim▁hole|> self.parser = parser
self.section = section
def __getitem__(self, item):
"""getitem"""
return self.parser.get(self.section, item)
def __setitem__(self, item, value):
"""setitem"""
self.parser.set(self.section, item, value)
if __name__ == "__main__":
pypath = ""
from time import time
a = time()
c = ConfigParser()
b = time()
print "sec", b - a
print c.config
c.saveConfig(c.config, "user.conf")<|fim▁end|> |
def __init__(self, parser, section):
"""Constructor""" |
<|file_name|>MllpComponent.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.mllp;
import java.util.Map;
import org.apache.camel.CamelContext;
import org.apache.camel.Endpoint;
import org.apache.camel.impl.UriEndpointComponent;
/**
* Represents the component that manages {@link MllpEndpoint}.
*/
public class MllpComponent extends UriEndpointComponent {
public static final String MLLP_LOG_PHI_PROPERTY = "org.apache.camel.component.mllp.logPHI";
public MllpComponent() {
super(MllpEndpoint.class);
}
public MllpComponent(CamelContext context) {
super(context, MllpEndpoint.class);
}
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
MllpEndpoint endpoint = new MllpEndpoint(uri, this);
setProperties(endpoint, parameters);
// mllp://hostname:port
String hostPort;
// look for options
int optionsStartIndex = uri.indexOf('?');
if (-1 == optionsStartIndex) {
// No options - just get the host/port stuff
hostPort = uri.substring(7);
} else {
hostPort = uri.substring(7, optionsStartIndex);
}
// Make sure it has a host - may just be a port
int colonIndex = hostPort.indexOf(':');<|fim▁hole|> endpoint.setPort(Integer.parseInt(hostPort.substring(colonIndex + 1)));
} else {
// No host specified - leave the default host and set the port
endpoint.setPort(Integer.parseInt(hostPort.substring(colonIndex + 1)));
}
return endpoint;
}
}<|fim▁end|> | if (-1 != colonIndex) {
endpoint.setHostname(hostPort.substring(0, colonIndex)); |
<|file_name|>scoping_rules_lifetimes.rs<|end_file_name|><|fim▁begin|>// Lifetimes are annotated below with lines denoting the creation
// and destruction of each variable.
// `i` has the longest lifetime because its scope entirely encloses
// both `borrow1` and `borrow2`. The duration of `borrow1` compared
// to `borrow2` is irrelevant since they are disjoint.
pub fn main() {
let i = 3; // Lifetime for `i` starts. ────────────────┐
// │
{ // │
let borrow1 = &i; // `borrow1` lifetime starts. ──┐│
// ││
println!("borrow1: {}", borrow1); // ││
} // `borrow1 ends. ──────────────────────────────────┘│
// │
// │
{ // │<|fim▁hole|> // │
} // Lifetime ends. ─────────────────────────────────────┘<|fim▁end|> | let borrow2 = &i; // `borrow2` lifetime starts. ──┐│
// ││
println!("borrow2: {}", borrow2); // ││
} // `borrow2` ends. ─────────────────────────────────┘│ |
<|file_name|>test_UrbanRunoffLiter.py<|end_file_name|><|fim▁begin|>import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import UrbRunoffLiter
class TestUrbanRunoffLiter(VariableUnitTest):
def test_UrbanRunoffLiter(self):
z = self.z
np.testing.assert_array_almost_equal(<|fim▁hole|> decimal=7)<|fim▁end|> | UrbRunoffLiter.UrbRunoffLiter_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA),
UrbRunoffLiter.UrbRunoffLiter(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), |
<|file_name|>mysql_impl.py<|end_file_name|><|fim▁begin|># Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
import os
import pexpect
import re
import tempfile
from trove.guestagent.strategies.restore import base
from trove.openstack.common import log as logging
from trove.common import exception
from trove.common import utils
import trove.guestagent.datastore.mysql.service as dbaas
from trove.common.i18n import _ # noqa
LOG = logging.getLogger(__name__)
class MySQLRestoreMixin(object):
"""Common utils for restoring MySQL databases."""
RESET_ROOT_RETRY_TIMEOUT = 100
RESET_ROOT_SLEEP_INTERVAL = 10
# Reset the root password in a single transaction with 'FLUSH PRIVILEGES'
# to ensure we never leave database wide open without 'grant tables'.
RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;",
"UPDATE `mysql`.`user` SET"
" `password`=PASSWORD('')"
" WHERE `user`='root';",
"FLUSH PRIVILEGES;",
"COMMIT;")
# This is a suffix MySQL appends to the file name given in
# the '--log-error' startup parameter.
_ERROR_LOG_SUFFIX = '.err'
_ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$")
def mysql_is_running(self):
try:
utils.execute_with_timeout("/usr/bin/mysqladmin", "ping")
LOG.debug("MySQL is up and running.")
return True
except exception.ProcessExecutionError:<|fim▁hole|> LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
LOG.info("MySQL is still running.")
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
try:
utils.poll_until(event,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
except exception.PollTimeOut:
raise exc
def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
child = pexpect.spawn("sudo mysqld_safe"
" --skip-grant-tables"
" --skip-networking"
" --init-file='%s'"
" --log-error='%s'" %
(init_file.name, err_log_file.name)
)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("Got a timeout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
#
# mysqld_safe will start even if init-file statement(s) fail.
# We therefore also check for errors in the log file.
self.poll_until_then_raise(
self.mysql_is_running,
base.RestoreError("Reset root password failed:"
" mysqld did not start!"))
first_err_message = self._find_first_error_message(err_log_file)
if first_err_message:
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown")
LOG.debug("Polling for shutdown to complete.")
try:
utils.poll_until(self.mysql_is_not_running,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
LOG.debug("Database successfully shutdown")
except exception.PollTimeOut:
LOG.debug("Timeout shutting down database "
"- performing killall on mysqld_safe.")
utils.execute_with_timeout("killall", "mysqld_safe",
root_helper="sudo",
run_as_root=True)
self.poll_until_then_raise(
self.mysql_is_not_running,
base.RestoreError("Reset root password failed: "
"mysqld did not stop!"))
def reset_root_password(self):
with tempfile.NamedTemporaryFile() as init_file:
utils.execute_with_timeout("sudo", "chmod", "a+r", init_file.name)
self._writelines_one_per_line(init_file,
self.RESET_ROOT_MYSQL_COMMANDS)
# Do not attempt to delete the file as the 'trove' user.
# The process writing into it may have assumed its ownership.
# Only owners can delete temporary
# files (restricted deletion).
err_log_file = tempfile.NamedTemporaryFile(
suffix=self._ERROR_LOG_SUFFIX,
delete=False)
try:
self._start_mysqld_safe_with_init_file(init_file, err_log_file)
finally:
err_log_file.close()
MySQLRestoreMixin._delete_file(err_log_file.name)
def _writelines_one_per_line(self, fp, lines):
fp.write(os.linesep.join(lines))
fp.flush()
def _find_first_error_message(self, fp):
if MySQLRestoreMixin._is_non_zero_file(fp):
return MySQLRestoreMixin._find_first_pattern_match(
fp,
self._ERROR_MESSAGE_PATTERN
)
return None
@classmethod
def _delete_file(self, file_path):
"""Force-remove a given file as root.
Do not raise an exception on failure.
"""
if os.path.isfile(file_path):
try:
utils.execute_with_timeout("rm", "-f", file_path,
run_as_root=True,
root_helper="sudo")
except Exception:
LOG.exception("Could not remove file: '%s'" % file_path)
@classmethod
def _is_non_zero_file(self, fp):
file_path = fp.name
return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0)
@classmethod
def _find_first_pattern_match(self, fp, pattern):
for line in fp:
if pattern.match(line):
return line
return None
class MySQLDump(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for MySQLDump."""
__strategy_name__ = 'mysqldump'
base_restore_cmd = 'sudo mysql'
class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for InnoBackupEx."""
__strategy_name__ = 'innobackupex'
base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
base_prepare_cmd = ('sudo innobackupex --apply-log %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupEx, self).__init__(*args, **kwargs)
self.prepare_cmd = self.base_prepare_cmd % kwargs
self.prep_retcode = None
def pre_restore(self):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_db()
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
utils.execute_with_timeout("chmod", "-R", "0777",
self.restore_location,
root_helper="sudo",
run_as_root=True)
utils.clean_out(self.restore_location)
def _run_prepare(self):
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def post_restore(self):
self._run_prepare()
utils.execute_with_timeout("chown", "-R", "-f", "mysql",
self.restore_location,
root_helper="sudo",
run_as_root=True)
self._delete_old_binlogs()
self.reset_root_password()
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.start_mysql()
def _delete_old_binlogs(self):
files = glob.glob(os.path.join(self.restore_location, "ib_logfile*"))
for f in files:
os.unlink(f)
class InnoBackupExIncremental(InnoBackupEx):
__strategy_name__ = 'innobackupexincremental'
incremental_prep = ('sudo innobackupex'
' --apply-log'
' --redo-only'
' %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup'
' %(incremental_args)s'
' 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
self.restore_location = kwargs.get('restore_location')
self.content_length = 0
def _incremental_restore_cmd(self, incremental_dir):
"""Return a command for a restore with a incremental location."""
args = {'restore_location': incremental_dir}
return (self.decrypt_cmd +
self.unzip_cmd +
(self.base_restore_cmd % args))
def _incremental_prepare_cmd(self, incremental_dir):
if incremental_dir is not None:
incremental_arg = '--incremental-dir=%s' % incremental_dir
else:
incremental_arg = ''
args = {
'restore_location': self.restore_location,
'incremental_args': incremental_arg,
}
return self.incremental_prep % args
def _incremental_prepare(self, incremental_dir):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
If we are the parent then we restore to the restore_location and
we apply the logs to the restore_location only.
Otherwise if we are an incremental we restore to a subfolder to
prevent stomping on the full restore data. Then we run apply log
with the '--incremental-dir' flag
"""
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s.") % metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
self._incremental_restore(parent_location, parent_checksum)
# for *this* backup set the incremental_dir
# just use the checksum for the incremental path as it is
# sufficiently unique /var/lib/mysql/<checksum>
incremental_dir = os.path.join(self.restore_location, checksum)
utils.execute("mkdir", "-p", incremental_dir,
root_helper="sudo",
run_as_root=True)
command = self._incremental_restore_cmd(incremental_dir)
else:
# The parent (full backup) use the same command from InnobackupEx
# super class and do not set an incremental_dir.
command = self.restore_cmd
self.content_length += self._unpack(location, checksum, command)
self._incremental_prepare(incremental_dir)
# Delete unpacked incremental backup metadata
if incremental_dir:
utils.execute("rm", "-fr", incremental_dir, root_helper="sudo",
run_as_root=True)
def _run_restore(self):
"""Run incremental restore.
First grab all parents and prepare them with '--redo-only'. After
all backups are restored the super class InnoBackupEx post_restore
method is called to do the final prepare with '--apply-log'
"""
self._incremental_restore(self.location, self.checksum)
return self.content_length<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An adapter providing access to IP cameras. Currently only the following IP cameras are
//! supported: DLink DCS-5010L, DLink DCS-5020L and DLink DCS-5025.
//!
extern crate serde_json;
mod api;
mod upnp_listener;
use config_store::ConfigService;
use foxbox_taxonomy::api::{Error, InternalError, User};
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::selector::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Value, Json, Binary, Type, TypeError};
use traits::Controller;
use transformable_channels::mpsc::*;
use self::api::*;
use self::upnp_listener::IpCameraUpnpListener;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
const CUSTOM_PROPERTY_MANUFACTURER: &'static str = "manufacturer";
const CUSTOM_PROPERTY_MODEL: &'static str = "model";
const CUSTOM_PROPERTY_NAME: &'static str = "name";
const CUSTOM_PROPERTY_URL: &'static str = "url";
const CUSTOM_PROPERTY_UDN: &'static str = "udn";
static ADAPTER_NAME: &'static str = "IP Camera adapter";
static ADAPTER_VENDOR: &'static str = "[email protected]";
static ADAPTER_VERSION: [u32; 4] = [0, 0, 0, 0];
static SNAPSHOT_DIR: &'static str = "snapshots";
pub type IpCameraServiceMap = Arc<Mutex<IpCameraServiceMapInternal>>;
pub struct IpCameraServiceMapInternal {
getters: HashMap<Id<Getter>, Arc<IpCamera>>,
setters: HashMap<Id<Setter>, Arc<IpCamera>>,
snapshot_root: String,
}
pub struct IPCameraAdapter {
services: IpCameraServiceMap,
}
impl IPCameraAdapter {
pub fn id() -> Id<AdapterId> {
Id::new("[email protected]")
}
pub fn init<C>(adapt: &Arc<AdapterManager>, controller: C) -> Result<(), Error>
where C: Controller<|fim▁hole|> {
let services = Arc::new(Mutex::new(IpCameraServiceMapInternal {
getters: HashMap::new(),
setters: HashMap::new(),
snapshot_root: controller.get_profile().path_for(SNAPSHOT_DIR),
}));
let ip_camera_adapter = Arc::new(IPCameraAdapter {
services: services.clone(),
});
try!(adapt.add_adapter(ip_camera_adapter));
// The UPNP listener will add camera service for discovered cameras
let upnp = controller.get_upnp_manager();
let listener = IpCameraUpnpListener::new(adapt, services, &controller.get_config());
upnp.add_listener("IpCameraTaxonomy".to_owned(), listener);
// The UPNP service searches for ssdp:all which the D-Link cameras
// don't seem to respond to. So we search for this instead, which
// they do respond to.
upnp.search(Some("urn:cellvision:service:Null:1".to_owned())).unwrap();
Ok(())
}
pub fn init_service(adapt: &Arc<AdapterManager>, services: IpCameraServiceMap, config: &Arc<ConfigService>,
udn: &str, url: &str, name: &str, manufacturer: &str, model_name: &str) -> Result<(), Error>
{
let service_id = create_service_id(udn);
let adapter_id = Self::id();
let mut service = Service::empty(service_id.clone(), adapter_id.clone());
service.properties.insert(CUSTOM_PROPERTY_MANUFACTURER.to_owned(),
manufacturer.to_owned());
service.properties.insert(CUSTOM_PROPERTY_MODEL.to_owned(), model_name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_NAME.to_owned(), name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_URL.to_owned(), url.to_owned());
service.properties.insert(CUSTOM_PROPERTY_UDN.to_owned(), udn.to_owned());
service.tags.insert(tag_id!(&format!("name:{}", name)));
// Since the upnp_discover will be called about once very 3 minutes we want to ignore
// discoveries if the camera is already registered.
if let Err(error) = adapt.add_service(service) {
if let Error::InternalError(ref internal_error) = error {
if let InternalError::DuplicateService(_) = *internal_error {
debug!("Found {} @ {} UDN {} (ignoring since it already exists)",
model_name,
url,
udn);
return Ok(());
}
}
panic!(error);
}
info!("Adding IpCamera {} Manufacturer: {} Model: {} Name: {}",
udn,
manufacturer,
model_name,
name);
let getter_image_list_id = create_getter_id("image_list", udn);
try!(adapt.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: getter_image_list_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("IPCam Adapter"),
kind: Id::new("image_list"),
typ: Type::Json,
},
updated: None,
},
}));
let getter_image_newest_id = create_getter_id("image_newest", udn);
try!(adapt.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: getter_image_newest_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("IPCam Adapter"),
kind: Id::new("latest image"),
typ: Type::Binary,
},
updated: None,
},
}));
let setter_snapshot_id = create_setter_id("snapshot", udn);
try!(adapt.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: setter_snapshot_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Setter {
kind: ChannelKind::TakeSnapshot,
updated: None,
},
}));
let getter_username_id = create_getter_id("username", udn);
try!(adapt.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: getter_username_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Username,
updated: None,
},
}));
let setter_username_id = create_setter_id("username", udn);
try!(adapt.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: setter_username_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Setter {
kind: ChannelKind::Username,
updated: None,
},
}));
let getter_password_id = create_getter_id("password", udn);
try!(adapt.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: getter_password_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Password,
updated: None,
},
}));
let setter_password_id = create_setter_id("password", udn);
try!(adapt.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: setter_password_id.clone(),
last_seen: None,
service: service_id.clone(),
mechanism: Setter {
kind: ChannelKind::Password,
updated: None,
},
}));
let mut serv = services.lock().unwrap();
let camera_obj = try!(IpCamera::new(udn, url, name, &serv.snapshot_root, config));
let camera = Arc::new(camera_obj);
serv.getters.insert(getter_image_list_id, camera.clone());
serv.getters.insert(getter_image_newest_id, camera.clone());
serv.setters.insert(setter_snapshot_id, camera.clone());
serv.getters.insert(getter_username_id, camera.clone());
serv.setters.insert(setter_username_id, camera.clone());
serv.getters.insert(getter_password_id, camera.clone());
serv.setters.insert(setter_password_id, camera.clone());
Ok(())
}
}
impl Adapter for IPCameraAdapter {
fn id(&self) -> Id<AdapterId> {
Self::id()
}
fn name(&self) -> &str {
ADAPTER_NAME
}
fn vendor(&self) -> &str {
ADAPTER_VENDOR
}
fn version(&self) -> &[u32; 4] {
&ADAPTER_VERSION
}
fn fetch_values(&self,
mut set: Vec<Id<Getter>>,
_: User)
-> ResultMap<Id<Getter>, Option<Value>, Error> {
set.drain(..).map(|id| {
let camera = match self.services.lock().unwrap().getters.get(&id) {
Some(camera) => camera.clone(),
None => return (id.clone(), Err(Error::InternalError(InternalError::NoSuchGetter(id))))
};
if id == camera.get_username_id {
let rsp = camera.get_username();
return (id, Ok(Some(Value::String(Arc::new(rsp)))));
}
if id == camera.get_password_id {
let rsp = camera.get_password();
return (id, Ok(Some(Value::String(Arc::new(rsp)))));
}
if id == camera.image_list_id {
let rsp = camera.get_image_list();
return (id, Ok(Some(Value::Json(Arc::new(Json(serde_json::to_value(&rsp)))))));
}
if id == camera.image_newest_id {
return match camera.get_newest_image() {
Ok(rsp) => (id, Ok(Some(Value::Binary(Binary {
data: Arc::new(rsp),
mimetype: Id::new("image/jpeg")
})))),
Err(err) => (id, Err(err))
};
}
(id.clone(), Err(Error::InternalError(InternalError::NoSuchGetter(id))))
}).collect()
}
fn send_values(&self, mut values: HashMap<Id<Setter>, Value>, _: User) -> ResultMap<Id<Setter>, (), Error> {
values.drain().map(|(id, value)| {
let camera = match self.services.lock().unwrap().setters.get(&id) {
Some(camera) => camera.clone(),
None => { return (id, Err(Error::InternalError(InternalError::InvalidInitialService))); }
};
if id == camera.set_username_id {
if let Value::String(ref username) = value {
camera.set_username(username);
return (id, Ok(()));
}
return (id, Err(Error::TypeError(TypeError {
got:value.get_type(),
expected: Type::String
})))
}
if id == camera.set_password_id {
if let Value::String(ref password) = value {
camera.set_password(password);
return (id, Ok(()));
}
return (id, Err(Error::TypeError(TypeError {
got:value.get_type(),
expected: Type::String
})))
}
if id == camera.snapshot_id {
return match camera.take_snapshot() {
Ok(_) => (id, Ok(())),
Err(err) => (id, Err(err))
};
}
(id.clone(), Err(Error::InternalError(InternalError::NoSuchSetter(id))))
}).collect()
}
fn register_watch(&self, mut watch: Vec<WatchTarget>) -> WatchResult
{
watch.drain(..).map(|(id, _, _)| {
(id.clone(), Err(Error::GetterDoesNotSupportWatching(id)))
}).collect()
}
}<|fim▁end|> | |
<|file_name|>DataMapStoreManager.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.core.datamap;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.carbondata.common.annotations.InterfaceAudience;
import org.apache.carbondata.common.exceptions.MetadataProcessException;
import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException;
import org.apache.carbondata.common.exceptions.sql.NoSuchDataMapException;
import org.apache.carbondata.common.logging.LogService;
import org.apache.carbondata.common.logging.LogServiceFactory;
import org.apache.carbondata.core.constants.CarbonCommonConstants;
import org.apache.carbondata.core.datamap.dev.DataMapFactory;
import org.apache.carbondata.core.indexstore.BlockletDetailsFetcher;
import org.apache.carbondata.core.indexstore.SegmentPropertiesFetcher;
import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMapFactory;
import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
import org.apache.carbondata.core.metadata.CarbonMetadata;
import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
import org.apache.carbondata.core.metadata.schema.table.DataMapSchemaStorageProvider;
import org.apache.carbondata.core.metadata.schema.table.DiskBasedDMSchemaStorageProvider;
import org.apache.carbondata.core.metadata.schema.table.RelationIdentifier;
import org.apache.carbondata.core.mutate.SegmentUpdateDetails;
import org.apache.carbondata.core.mutate.UpdateVO;
import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
import org.apache.carbondata.core.util.CarbonProperties;
import org.apache.carbondata.core.util.CarbonSessionInfo;
import org.apache.carbondata.core.util.ThreadLocalSessionInfo;
import static org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.MV;
import static org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.PREAGGREGATE;
/**
* It maintains all the DataMaps in it.
*/
@InterfaceAudience.Internal
public final class DataMapStoreManager {
private static DataMapStoreManager instance = new DataMapStoreManager();
public Map<String, List<TableDataMap>> getAllDataMaps() {
return allDataMaps;
}
/**
* Contains the list of datamaps for each table.
*/
private Map<String, List<TableDataMap>> allDataMaps = new ConcurrentHashMap<>();
/**
* Contains the datamap catalog for each datamap provider.
*/
private Map<String, DataMapCatalog> dataMapCatalogs = null;
private Map<String, TableSegmentRefresher> segmentRefreshMap = new ConcurrentHashMap<>();
private DataMapSchemaStorageProvider provider = new DiskBasedDMSchemaStorageProvider(
CarbonProperties.getInstance().getSystemFolderLocation());
private static final LogService LOGGER =
LogServiceFactory.getLogService(DataMapStoreManager.class.getName());
private DataMapStoreManager() {
}
/**
* It only gives the visible datamaps
*/
List<TableDataMap> getAllVisibleDataMap(CarbonTable carbonTable) throws IOException {
CarbonSessionInfo sessionInfo = ThreadLocalSessionInfo.getCarbonSessionInfo();
List<TableDataMap> allDataMaps = getAllDataMap(carbonTable);
Iterator<TableDataMap> dataMapIterator = allDataMaps.iterator();
while (dataMapIterator.hasNext()) {
TableDataMap dataMap = dataMapIterator.next();
String dbName = carbonTable.getDatabaseName();
String tableName = carbonTable.getTableName();
String dmName = dataMap.getDataMapSchema().getDataMapName();
// TODO: need support get the visible status of datamap without sessionInfo in the future
if (sessionInfo != null) {
boolean isDmVisible = sessionInfo.getSessionParams().getProperty(
String.format("%s%s.%s.%s", CarbonCommonConstants.CARBON_DATAMAP_VISIBLE,
dbName, tableName, dmName), "true").trim().equalsIgnoreCase("true");
if (!isDmVisible) {
LOGGER.warn(String.format("Ignore invisible datamap %s on table %s.%s",
dmName, dbName, tableName));
dataMapIterator.remove();
}
} else {
String message = "Carbon session info is null";
LOGGER.info(message);
}
}
return allDataMaps;
}
/**
* It gives all datamaps except the default datamap.
*
* @return
*/
public List<TableDataMap> getAllDataMap(CarbonTable carbonTable) throws IOException {
List<DataMapSchema> dataMapSchemas = getDataMapSchemasOfTable(carbonTable);
List<TableDataMap> dataMaps = new ArrayList<>();
if (dataMapSchemas != null) {
for (DataMapSchema dataMapSchema : dataMapSchemas) {
RelationIdentifier identifier = dataMapSchema.getParentTables().get(0);
if (dataMapSchema.isIndexDataMap() && identifier.getTableId()
.equals(carbonTable.getTableId())) {
dataMaps.add(getDataMap(carbonTable, dataMapSchema));
}
}
}
return dataMaps;
}
/**
* It gives all datamap schemas of a given table.
*
*/
public List<DataMapSchema> getDataMapSchemasOfTable(CarbonTable carbonTable) throws IOException {
return provider.retrieveSchemas(carbonTable);
}
/**
* It gives all datamap schemas from store.
*/
public List<DataMapSchema> getAllDataMapSchemas() throws IOException {
return provider.retrieveAllSchemas();
}
public DataMapSchema getDataMapSchema(String dataMapName)
throws NoSuchDataMapException, IOException {
return provider.retrieveSchema(dataMapName);
}
/**
* Saves the datamap schema to storage
* @param dataMapSchema
*/
public void saveDataMapSchema(DataMapSchema dataMapSchema) throws IOException {
provider.saveSchema(dataMapSchema);
}
/**
* Drops the datamap schema from storage
* @param dataMapName
*/
public void dropDataMapSchema(String dataMapName) throws IOException {
provider.dropSchema(dataMapName);
}
/**
* Update the datamap schema after table rename
* This should be invoked after changing table name
* @param dataMapSchemaList
* @param newTableName
*/
public void updateDataMapSchema(List<DataMapSchema> dataMapSchemaList,
String newTableName) throws IOException {
List<DataMapSchema> newDataMapSchemas = new ArrayList<>();
for (DataMapSchema dataMapSchema : dataMapSchemaList) {
RelationIdentifier relationIdentifier = dataMapSchema.getRelationIdentifier();
String dataBaseName = relationIdentifier.getDatabaseName();
String tableId = relationIdentifier.getTableId();
String providerName = dataMapSchema.getProviderName();
// if the preaggregate datamap,not be modified the schema
if (providerName.equalsIgnoreCase(PREAGGREGATE.toString())) {
continue;
}
// if the mv datamap,not be modified the relationIdentifier
if (!providerName.equalsIgnoreCase(MV.toString())) {
RelationIdentifier newRelationIdentifier = new RelationIdentifier(dataBaseName,
newTableName, tableId);
dataMapSchema.setRelationIdentifier(newRelationIdentifier);
}
List<RelationIdentifier> newParentTables = new ArrayList<>();
List<RelationIdentifier> parentTables = dataMapSchema.getParentTables();
for (RelationIdentifier identifier : parentTables) {
RelationIdentifier newParentTableIdentifier = new RelationIdentifier(
identifier.getDatabaseName(), newTableName, identifier.getTableId());
newParentTables.add(newParentTableIdentifier);
}
dataMapSchema.setParentTables(newParentTables);
newDataMapSchemas.add(dataMapSchema);
// frist drop old schema
String dataMapName = dataMapSchema.getDataMapName();
dropDataMapSchema(dataMapName);
}
// save new datamap schema to storage
for (DataMapSchema newDataMapSchema : newDataMapSchemas) {
saveDataMapSchema(newDataMapSchema);
}
}
/**
* Register datamap catalog for the datamap provider
* @param dataMapProvider
* @param dataMapSchema
*/
public synchronized void registerDataMapCatalog(DataMapProvider dataMapProvider,
DataMapSchema dataMapSchema) throws IOException {
initializeDataMapCatalogs(dataMapProvider);
String name = dataMapSchema.getProviderName();
DataMapCatalog dataMapCatalog = dataMapCatalogs.get(name);
if (dataMapCatalog == null) {
dataMapCatalog = dataMapProvider.createDataMapCatalog();
if (dataMapCatalog != null) {
dataMapCatalogs.put(name, dataMapCatalog);
dataMapCatalog.registerSchema(dataMapSchema);
}
} else {
dataMapCatalog.registerSchema(dataMapSchema);
}
}
/**
* Unregister datamap catalog.
* @param dataMapSchema
*/
public synchronized void unRegisterDataMapCatalog(DataMapSchema dataMapSchema) {
if (dataMapCatalogs == null) {
return;
}
String name = dataMapSchema.getProviderName();
DataMapCatalog dataMapCatalog = dataMapCatalogs.get(name);
if (dataMapCatalog != null) {
dataMapCatalog.unregisterSchema(dataMapSchema.getDataMapName());
}
}
/**
* Get the datamap catalog for provider.
* @param providerName
* @return
*/
public synchronized DataMapCatalog getDataMapCatalog(DataMapProvider dataMapProvider,
String providerName) throws IOException {
initializeDataMapCatalogs(dataMapProvider);
return dataMapCatalogs.get(providerName);
}
/**
* Initialize by reading all datamaps from store and re register it
* @param dataMapProvider
*/
private void initializeDataMapCatalogs(DataMapProvider dataMapProvider) throws IOException {
if (dataMapCatalogs == null) {
dataMapCatalogs = new ConcurrentHashMap<>();
List<DataMapSchema> dataMapSchemas = getAllDataMapSchemas();
for (DataMapSchema schema : dataMapSchemas) {
DataMapCatalog dataMapCatalog = dataMapCatalogs.get(schema.getProviderName());
if (dataMapCatalog == null) {
dataMapCatalog = dataMapProvider.createDataMapCatalog();
if (null == dataMapCatalog) {
throw new RuntimeException("Internal Error.");
}
dataMapCatalogs.put(schema.getProviderName(), dataMapCatalog);
}
try {
dataMapCatalog.registerSchema(schema);
} catch (Exception e) {
// Ignore the schema
LOGGER.error(e, "Error while registering schema");
}
}
}
}
/**
* It gives the default datamap of the table. Default datamap of any table is BlockletDataMap
*
* @param table
* @return
*/
public TableDataMap getDefaultDataMap(CarbonTable table) {
return getDataMap(table, BlockletDataMapFactory.DATA_MAP_SCHEMA);
}
/**
* Get the datamap for reading data.
*/
public TableDataMap getDataMap(CarbonTable table, DataMapSchema dataMapSchema) {
String tableUniqueName =
table.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableUniqueName();
List<TableDataMap> tableIndices = allDataMaps.get(tableUniqueName);
TableDataMap dataMap = null;
if (tableIndices != null) {
dataMap = getTableDataMap(dataMapSchema.getDataMapName(), tableIndices);
}
if (dataMap == null) {
synchronized (tableUniqueName.intern()) {
tableIndices = allDataMaps.get(tableUniqueName);
if (tableIndices != null) {
dataMap = getTableDataMap(dataMapSchema.getDataMapName(), tableIndices);
}
if (dataMap == null) {
try {
dataMap = createAndRegisterDataMap(table, dataMapSchema);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
if (dataMap == null) {
throw new RuntimeException("Datamap does not exist");
}
return dataMap;
}
/**
* Return a new datamap instance and registered in the store manager.
* The datamap is created using datamap name, datamap factory class and table identifier.
*/
public DataMapFactory getDataMapFactoryClass(CarbonTable table, DataMapSchema dataMapSchema)
throws MalformedDataMapCommandException {
try {
// try to create datamap by reflection to test whether it is a valid DataMapFactory class
return (DataMapFactory)
Class.forName(dataMapSchema.getProviderName()).getConstructors()[0]
.newInstance(table, dataMapSchema);
} catch (ClassNotFoundException e) {
// try to create DataMapClassProvider instance by taking providerName as short name
return DataMapRegistry.getDataMapFactoryByShortName(table, dataMapSchema);
} catch (Throwable e) {
throw new MetadataProcessException(
"failed to get DataMap factory for'" + dataMapSchema.getProviderName() + "'", e);
}
}
/**
* registered in the store manager.
* The datamap is created using datamap name, datamap factory class and table identifier.
*/
// TODO: make it private
public TableDataMap createAndRegisterDataMap(CarbonTable table,
DataMapSchema dataMapSchema) throws MalformedDataMapCommandException {
DataMapFactory dataMapFactory = getDataMapFactoryClass(table, dataMapSchema);
return registerDataMap(table, dataMapSchema, dataMapFactory);
}
public TableDataMap registerDataMap(CarbonTable table,
DataMapSchema dataMapSchema, DataMapFactory dataMapFactory) {
String tableUniqueName = table.getCarbonTableIdentifier().getTableUniqueName();
// Just update the segmentRefreshMap with the table if not added.
getTableSegmentRefresher(table);
List<TableDataMap> tableIndices = allDataMaps.get(tableUniqueName);
if (tableIndices == null) {
tableIndices = new ArrayList<>();
}
BlockletDetailsFetcher blockletDetailsFetcher;
SegmentPropertiesFetcher segmentPropertiesFetcher = null;
if (dataMapFactory instanceof BlockletDetailsFetcher) {
blockletDetailsFetcher = (BlockletDetailsFetcher) dataMapFactory;
} else {
blockletDetailsFetcher = getBlockletDetailsFetcher(table);
}
segmentPropertiesFetcher = (SegmentPropertiesFetcher) blockletDetailsFetcher;
TableDataMap dataMap = new TableDataMap(table.getAbsoluteTableIdentifier(),
dataMapSchema, dataMapFactory, blockletDetailsFetcher, segmentPropertiesFetcher);
tableIndices.add(dataMap);
allDataMaps.put(tableUniqueName, tableIndices);
return dataMap;
}
private TableDataMap getTableDataMap(String dataMapName, List<TableDataMap> tableIndices) {
TableDataMap dataMap = null;
for (TableDataMap tableDataMap : tableIndices) {
if (tableDataMap.getDataMapSchema().getDataMapName().equals(dataMapName)) {
dataMap = tableDataMap;
break;
}
}
return dataMap;
}
/**
* Clear the invalid segments from all the datamaps of the table
* @param carbonTable
* @param segments
*/
public void clearInvalidSegments(CarbonTable carbonTable, List<Segment> segments)
throws IOException {
getDefaultDataMap(carbonTable).clear(segments);
List<TableDataMap> allDataMap = getAllDataMap(carbonTable);
for (TableDataMap dataMap: allDataMap) {
dataMap.clear(segments);
}
}
/**
* Clear the datamap/datamaps of a table from memory
*
* @param identifier Table identifier
*/
public void clearDataMaps(AbsoluteTableIdentifier identifier) {
CarbonTable carbonTable = getCarbonTable(identifier);
String tableUniqueName = identifier.getCarbonTableIdentifier().getTableUniqueName();
List<TableDataMap> tableIndices = allDataMaps.get(tableUniqueName);
if (null != carbonTable && tableIndices != null) {
try {
DataMapUtil.executeDataMapJobForClearingDataMaps(carbonTable);
} catch (IOException e) {
LOGGER.error(e, "clear dataMap job failed");
// ignoring the exception
}
}
segmentRefreshMap.remove(identifier.uniqueName());
clearDataMaps(tableUniqueName);
allDataMaps.remove(tableUniqueName);
}
/**
* This method returns the carbonTable from identifier
* @param identifier
* @return
*/
public CarbonTable getCarbonTable(AbsoluteTableIdentifier identifier) {
CarbonTable carbonTable = null;
carbonTable = CarbonMetadata.getInstance()
.getCarbonTable(identifier.getDatabaseName(), identifier.getTableName());
if (carbonTable == null) {
try {
carbonTable = CarbonTable
.buildFromTablePath(identifier.getTableName(), identifier.getDatabaseName(),
identifier.getTablePath(), identifier.getCarbonTableIdentifier().getTableId());
} catch (IOException e) {
LOGGER.error("failed to get carbon table from table Path");
// ignoring exception
}
}
return carbonTable;
}
/**
* this methods clears the datamap of table from memory
*/
public void clearDataMaps(String tableUniqName) {
List<TableDataMap> tableIndices = allDataMaps.get(tableUniqName);
if (tableIndices != null) {
for (TableDataMap tableDataMap : tableIndices) {
if (tableDataMap != null) {
// clear the segmentMap in BlockletDetailsFetcher,else the Segment will remain in executor
// and the query fails as we will check whether the blocklet contains in the index or not
tableDataMap.getBlockletDetailsFetcher().clear();
tableDataMap.clear();
}
}
}
allDataMaps.remove(tableUniqName);
}
/**
* Clear the datamap/datamaps of a table from memory and disk
*
* @param identifier Table identifier
*/
public void clearDataMap(AbsoluteTableIdentifier identifier, String dataMapName) {
CarbonTable carbonTable = getCarbonTable(identifier);
String tableUniqueName = identifier.getCarbonTableIdentifier().getTableUniqueName();
List<TableDataMap> tableIndices = allDataMaps.get(tableUniqueName);
if (tableIndices != null) {
int i = 0;
for (TableDataMap tableDataMap : tableIndices) {
if (carbonTable != null && tableDataMap != null && dataMapName
.equalsIgnoreCase(tableDataMap.getDataMapSchema().getDataMapName())) {
try {
DataMapUtil.executeDataMapJobForClearingDataMaps(carbonTable);
tableDataMap.clear();
} catch (IOException e) {
LOGGER.error(e, "clear dataMap job failed");
// ignoring the exception
}
tableDataMap.deleteDatamapData();
tableIndices.remove(i);
break;
}
i++;
}
allDataMaps.put(tableUniqueName, tableIndices);
}
}
/**
* is datamap exist
* @return true if exist, else return false
*/
public boolean isDataMapExist(String dbName, String tableName, String dmName) {
List<TableDataMap> tableDataMaps = allDataMaps.get(dbName + '_' + tableName);
if (tableDataMaps != null) {
for (TableDataMap dm : tableDataMaps) {
if (dm != null && dmName.equalsIgnoreCase(dm.getDataMapSchema().getDataMapName())) {
return true;
}
}
}
return false;
}
/**
* Get the blocklet datamap factory to get the detail information of blocklets
*
* @param table
* @return
*/
private BlockletDetailsFetcher getBlockletDetailsFetcher(CarbonTable table) {
TableDataMap blockletMap = getDataMap(table, BlockletDataMapFactory.DATA_MAP_SCHEMA);
return (BlockletDetailsFetcher) blockletMap.getDataMapFactory();
}
/**
* Returns the singleton instance
*
* @return
*/
public static DataMapStoreManager getInstance() {
return instance;
}
/**
* Get the TableSegmentRefresher for the table. If not existed then add one and return.
*/
public TableSegmentRefresher getTableSegmentRefresher(CarbonTable table) {
String uniqueName = table.getAbsoluteTableIdentifier().uniqueName();
if (segmentRefreshMap.get(uniqueName) == null) {
segmentRefreshMap.put(uniqueName, new TableSegmentRefresher(table));
}
return segmentRefreshMap.get(uniqueName);
}
/**
* Keep track of the segment refresh time.
*/
public static class TableSegmentRefresher {
// This map stores the latest segment refresh time.So in case of update/delete we check the
// time against this map.
private Map<String, SegmentRefreshInfo> segmentRefreshTime = new HashMap<>();
// This map keeps the manual refresh entries from users. It is mainly used for partition
// altering.
private Map<String, Boolean> manualSegmentRefresh = new HashMap<>();
TableSegmentRefresher(CarbonTable table) {
SegmentUpdateStatusManager statusManager = new SegmentUpdateStatusManager(table);
SegmentUpdateDetails[] updateStatusDetails = statusManager.getUpdateStatusDetails();
for (SegmentUpdateDetails updateDetails : updateStatusDetails) {
UpdateVO updateVO = statusManager.getInvalidTimestampRange(updateDetails.getSegmentName());
segmentRefreshTime.put(updateVO.getSegmentId(),
new SegmentRefreshInfo(updateVO.getCreatedOrUpdatedTimeStamp(), 0));
}
}
public boolean isRefreshNeeded(Segment seg, UpdateVO updateVo) throws IOException {
SegmentRefreshInfo segmentRefreshInfo =
seg.getSegmentRefreshInfo(updateVo);
String segmentId = seg.getSegmentNo();
if (segmentRefreshTime.get(segmentId) == null
&& segmentRefreshInfo.getSegmentUpdatedTimestamp() != null) {
segmentRefreshTime.put(segmentId, segmentRefreshInfo);
return true;
}
if (manualSegmentRefresh.get(segmentId) != null && manualSegmentRefresh.get(segmentId)) {
manualSegmentRefresh.put(segmentId, false);
return true;
}
boolean isRefresh = segmentRefreshInfo.compare(segmentRefreshTime.get(segmentId));
if (isRefresh) {
segmentRefreshTime.remove(segmentId);
}
return isRefresh;
}
public void refreshSegments(List<String> segmentIds) {
for (String segmentId : segmentIds) {
manualSegmentRefresh.put(segmentId, true);
}
}
public boolean isRefreshNeeded(String segmentId) {
if (manualSegmentRefresh.get(segmentId) != null && manualSegmentRefresh.get(segmentId)) {<|fim▁hole|> return false;
}
}
}
}<|fim▁end|> | manualSegmentRefresh.put(segmentId, false);
return true;
} else { |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
class Group(models.Model):
BASE_URL = "https://www.facebook.com/groups/%s"
def __unicode__(self):
return self.name
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length = 100)
school = models.CharField(max_length = 100)
class User(models.Model):
def __unicode__(self):
return self.name
id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length = 100)
class Listing(models.Model):
BASE_URL = "https://www.facebook.com/%s/posts/%s"
def __unicode__(self):
return self.message or u'No Text'
id = models.BigIntegerField(primary_key=True)
created_time = models.DateTimeField(null=False)
updated_time = models.DateTimeField(null=False)
type = models.CharField(max_length = 6)
message = models.TextField(null=False, blank=True, default="")
picture = models.TextField(null=False, blank=True, default="")
parsed = models.BooleanField(default=False)
user = models.ForeignKey(User)
approved = models.BooleanField(default=False)
buy_or_sell = models.CharField(max_length = 4, null=True)
category = models.CharField(max_length = 15, null=True)
object_id = models.BigIntegerField(null=True)
group = models.ForeignKey(Group)
likers = models.ManyToManyField(User, related_name="listings")
sold = models.BooleanField(default=False)
def url(self): return self.BASE_URL % (self.group.id, self.id)
class Comment(models.Model):<|fim▁hole|> return self.message or u'No Text'
id = models.BigIntegerField(primary_key=True)
message = models.TextField(null=False, blank=True, default="")
created_time = models.DateTimeField('date published')
user = models.ForeignKey(User)
listing = models.ForeignKey(Listing, related_name="comments")<|fim▁end|> |
def __unicode__(self): |
<|file_name|>database.cc<|end_file_name|><|fim▁begin|>#include <map>
#include <assert.h>
#include <exception>
#include "pf/base/string.h"
#include "pf/file/database.h"
namespace pf_file {
Database::Database(uint32_t id) {
__ENTER_FUNCTION
id_ = id;
string_buffer_ = NULL;
index_column_ = -1;
record_number_ = 0;
__LEAVE_FUNCTION
}
Database::~Database() {
__ENTER_FUNCTION
SAFE_DELETE_ARRAY(string_buffer_);
__LEAVE_FUNCTION
}
bool Database::open_from_txt(const char *filename) {
__ENTER_FUNCTION
assert(filename);
FILE* fp = fopen(filename, "rb");
if (NULL == fp) return false;
fseek(fp, 0, SEEK_END);
int32_t filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
//read in memory
char *memory = new char[filesize + 1];
memset(memory, 0, filesize + 1); //use memset to memory pointer
fread(memory, 1, filesize, fp);
fclose(fp);
memory[filesize] = '\0';
bool result = open_from_memory(memory, memory + filesize + 1, filename);
SAFE_DELETE_ARRAY(memory);
return result;
__LEAVE_FUNCTION
return false;
}
bool Database::open_from_memory(const char *memory,
const char *end,
const char *filename) {
__ENTER_FUNCTION
bool result = true;
if (end - memory >= static_cast<int32_t>(sizeof(file_head_t)) &&
*((uint32_t*)memory) == FILE_DATABASE_INDENTIFY) {
result = open_from_memory_binary(memory, end, filename);
} else {
result = open_from_memory_text(memory, end, filename);
}
return result;
__LEAVE_FUNCTION
return false;
}
const Database::field_data* Database::search_index_equal(int32_t index) const {
__ENTER_FUNCTION
field_hashmap::const_iterator it_find = hash_index_.find(index);
if (it_find == hash_index_.end()) return NULL;
return it_find->second;
__LEAVE_FUNCTION
return NULL;
}
const char *Database::get_fieldname(int32_t index) {
__ENTER_FUNCTION
const char *name = NULL;
Assert(index >= 0 && index <= field_number_);
name = fieldnames_[index].c_str();
return name;
__LEAVE_FUNCTION
return NULL;
}
int32_t Database::get_fieldindex(const char *name) {
__ENTER_FUNCTION
int32_t result = -1;
uint32_t i;
for (i = 0; i < fieldnames_.size(); ++i) {
if (0 == strcmp(name, fieldnames_[i].c_str())) {
result = i;
break;
}
}
return result;
__LEAVE_FUNCTION
return -1;
}
uint8_t Database::get_fieldtype(int32_t index) {
__ENTER_FUNCTION
Assert(index >= 0 && index <= field_number_);
uint8_t result = static_cast<uint8_t>(type_[index]);
return result;
__LEAVE_FUNCTION
return kTypeString;
}
const Database::field_data *Database::search_position(int32_t line,
int32_t column) const {
__ENTER_FUNCTION
int32_t position = line * get_field_number() + column;
if (line < 0 || position > static_cast<int32_t>(data_buffer_.size())) {
char temp[256];
memset(temp, '\0', sizeof(temp));
snprintf(temp,
sizeof(temp) - 1,
"pf_file::Database::search_position is failed,"
" position out for range[line:%d, column:%d] position:%d",
line,
column,
position);
#ifdef _PF_THROW_EXCEPTION_AS_STD_STRING
throw std::string(temp);
#else
AssertEx(false, temp);
#endif
return NULL;
}
return &(data_buffer_[position]);
__LEAVE_FUNCTION
return NULL;
}
const Database::field_data* Database::search_first_column_equal(
int32_t column,
const field_data &value) const {
__ENTER_FUNCTION
if (column < 0 || column > field_number_) return NULL;
field_type_enum type = type_[column];
register int32_t i;
for (i = 0; i < record_number_; ++i) {
const field_data &_field_data =
data_buffer_[(field_number_ * i) + column];
bool result;
if (kTypeInt == type) {
result = field_equal(kTypeInt, _field_data, value);
} else if (kTypeFloat == type) {
result = field_equal(kTypeFloat, _field_data, value);
} else {
result = field_equal(kTypeString, _field_data, value);
}
if (result) {
return &(data_buffer_[field_number_ * i]);
}
}
return NULL;
__LEAVE_FUNCTION
return NULL;
}
uint32_t Database::get_id() const {
__ENTER_FUNCTION
return id_;
__LEAVE_FUNCTION
return 0;
}
int32_t Database::get_field_number() const {
__ENTER_FUNCTION
return field_number_;
__LEAVE_FUNCTION
return -1;
}
int32_t Database::get_record_number() const {
__ENTER_FUNCTION
return record_number_;
__LEAVE_FUNCTION
return -1;
}
void Database::create_index(int32_t column, const char *filename) {
__ENTER_FUNCTION
if (column < 0 || column > field_number_ || index_column_ == column) return;
hash_index_.clear();
int32_t i;
for (i = 0; i < record_number_; ++i) {
field_data* _field_data = &(data_buffer_[i * field_number_]);
field_hashmap::iterator it_find = hash_index_.find(_field_data->int_value);
if (it_find != hash_index_.end()) {
char temp[256];
memset(temp, '\0', sizeof(temp));
snprintf(temp,
sizeof(temp) - 1,
"[%s]multi index at line: %d(smae value: %d)",
filename,
i + 1,
_field_data->int_value);
#ifdef _PF_THROW_EXCEPTION_AS_STD_STRING
throw std::string(temp);
#else
AssertEx(false, temp);
#endif
}
hash_index_.insert(std::make_pair(_field_data->int_value, _field_data));
}
__LEAVE_FUNCTION
}
const char *Database::get_line_from_memory(char *str,
int32_t size,
const char *memory,
const char *end) {
__ENTER_FUNCTION
register const char *_memory = memory;
if (_memory >= end || 0 == *_memory) return NULL;
while (_memory < end &&
_memory - memory + 1 < size &&
*_memory != 0 &&
*_memory != '\n' &&
*_memory != '\r') {
*(str++) = *(_memory++);
}
*str = 0;
while (_memory < end &&
*_memory != 0 &&
(*_memory == '\r' || *_memory == '\n')) ++_memory;
return _memory;
__LEAVE_FUNCTION
return NULL;
}
bool Database::field_equal(field_type_enum type,
const field_data &a,
const field_data &b) {
__ENTER_FUNCTION
bool result = false;
if (kTypeInt == type) {
result = a.int_value == b.int_value;
} else if (kTypeFloat == type) {
result = a.float_value == b.float_value;
} else {
try {
result = 0 == strcmp(a.string_value, b.string_value);
} catch(...) {
//do nothing
}
}
return result;
__LEAVE_FUNCTION
return false;
}
bool Database::open_from_memory_text(const char *memory,
const char *end,
const char *filename) {
__ENTER_FUNCTION
using namespace pf_base;
char line[(1024 * 10) + 1]; //long string
memset(line, '\0', sizeof(line));
register const char *_memory = memory;
_memory = get_line_from_memory(line, sizeof(line) - 1, _memory, end);
if (!_memory) return false;
std::vector<std::string> result;
string::explode(line, result, "\t", true, true);
if (result.empty()) return false;
field_type _field_type;
_field_type.resize(result.size());
int32_t i;
uint32_t result_size = static_cast<uint32_t>(result.size());
for (i = 0; i < static_cast<int32_t>(result_size); ++i) {
if ("INT" == result[i]) {
_field_type[i] = kTypeInt;
} else if("FLOAT" == result[i]) {
_field_type[i] = kTypeFloat;
} else if("STRING" == result[i]) {
_field_type[i] = kTypeString;
} else {
return false;
}
}
//init
int32_t record_number = 0;
int32_t field_number = static_cast<int32_t>(_field_type.size());
std::vector<std::pair<std::string, int32_t> > string_buffer;
std::map<std::string, int32_t> map_string_buffer;
_memory = get_line_from_memory(line, sizeof(line) - 1, _memory, end);
//第二行为列名(相当于数据库的字段名),应尽量使用英文
string::explode(line, fieldnames_, "\t", true, true);
if (!_memory) return false;
int32_t string_buffer_size = 0;
bool loop = true;
do {
//以行读取数据
_memory = get_line_from_memory(line, sizeof(line) - 1, _memory, end);
if (!_memory) break;
if ('#' == line[0]) continue; //注释行
string::explode(line, result, "\t", true, false);
if (result.empty()) continue; //空行
if (static_cast<int32_t>(result.size()) != field_number) { //列数不对
int32_t left_number =
field_number - static_cast<int32_t>(result.size());
for (i = 0; i < left_number; ++i) {
result.push_back("");
}
}
if (result[0].empty()) continue;
for (i = 0; i < field_number; ++i) {
field_data _field_data;
switch(_field_type[i]) {
case kTypeInt: {
_field_data.int_value = atoi(result[i].c_str());
data_buffer_.push_back(_field_data);
break;
}
case kTypeFloat: {
_field_data.float_value = static_cast<float>(atof(result[i].c_str()));
data_buffer_.push_back(_field_data);
break;
}
case kTypeString: {
#ifdef FILE_DATABASE_CONVERT_GBK_TO_UTF8
const char *value = result[i].c_str();
//convert charset
//utf8 -> gbk 1.5multiple length
int32_t convert_strlength = static_cast<int32_t>(strlen(value) * 2);
char *convert_str = new char[convert_strlength];
memset(convert_str, 0, convert_strlength);
int32_t convert_result =
string::charset_convert("GBK",
"UTF-8",
convert_str,
convert_strlength,
value,
static_cast<int32_t>(strlen(value)));
if (convert_result > 0) {
value = convert_str;
result[i] = convert_str;
}
SAFE_DELETE_ARRAY(convert_str);
#endif
std::map<std::string, int32_t>::iterator it =
map_string_buffer.find(result[i]);
if (it == map_string_buffer.end()) {
string_buffer.push_back(
std::make_pair(result[i], string_buffer_size));
map_string_buffer.insert(
std::make_pair(result[i],
static_cast<int32_t>(string_buffer.size()) - 1));
_field_data.int_value = string_buffer_size + 1;
string_buffer_size +=
static_cast<int32_t>(strlen(result[i].c_str())) + 1;
} else {
_field_data.int_value = string_buffer[it->second].second + 1;
}
data_buffer_.push_back(_field_data);
break;
}
default: {
return false;
}
}
}
++record_number;
} while (loop);
//database init
record_number_ = record_number;
field_number_ = field_number;
string_buffer_size_ = string_buffer_size + 1;
string_buffer_ = new char[string_buffer_size_];
type_ = _field_type;
unsigned char blank = '\0';
USE_PARAM(blank);
string_buffer_[0] = '\0';
register char *temp = string_buffer_ + 1;
for (i = 0; i < static_cast<int32_t>(string_buffer.size()); ++i) {
memcpy(temp,
string_buffer[i].first.c_str(),
string_buffer[i].first.size());
temp += string_buffer[i].first.size();
*(temp++) = '\0';
}
//relocate string block
register uint16_t m, n;
for (m = 0; m < field_number; ++m) {
if (type_[m] != kTypeString) continue;
for (n = 0; n < record_number; ++n) {
field_data &_field_data1 = data_buffer_[(n * field_number) + m];
_field_data1.string_value = string_buffer_ + _field_data1.int_value;
}
}
create_index(0, filename);
return true;
__LEAVE_FUNCTION
return false;
}
bool Database::open_from_memory_binary(const char *memory,
const char *end,
const char *filename) {
__ENTER_FUNCTION
register const char *_memory = memory;
file_head_t file_head;
memcpy(&file_head, _memory, sizeof(file_head_t));
if (file_head.identify != FILE_DATABASE_INDENTIFY) return false;
//check memory size
if (sizeof(file_head) +
sizeof(uint32_t) * file_head.field_number +
sizeof(field_data) * file_head.record_number * file_head.field_number +
+ file_head.string_block_size > static_cast<uint64_t>(end - memory)) {
return false;
}
_memory += sizeof(file_head);
//init
record_number_ = file_head.record_number;
field_number_ = file_head.field_number;
string_buffer_size_= file_head.string_block_size;
//create string blok
string_buffer_ = new char[string_buffer_size_];
if (!string_buffer_) return false;
std::vector<uint32_t> field_type;
field_type.resize(field_number_);
memcpy(&(field_type[0]), _memory, sizeof(uint32_t) * field_number_);
//check it
type_.resize(field_number_);
int32_t i;
for (i = 0; i < field_number_; ++i) {
switch(field_type[i]) {
case kTypeInt: {
//do nothing
}
case kTypeFloat: {
//do nothing
}
case kTypeString: {
type_[i] = static_cast<field_type_enum>(field_type[i]);
break;
}
default: {
SAFE_DELETE_ARRAY(string_buffer_);
return false;
}
}
}
//read all field
data_buffer_.resize(field_number_ * record_number_);
memcpy(&(data_buffer_[0]),
_memory,
sizeof(field_data) * field_number_ * record_number_);
_memory += sizeof(field_data) * field_number_ * record_number_;
memcpy(string_buffer_, _memory, string_buffer_size_);
string_buffer_[string_buffer_size_ - 1] = '\0';
//runtime address
for (i = 0; i < field_number_; ++i) {
if (field_type[i] != kTypeString) continue;
std::string str;
int32_t j;
for (j = 0; j < record_number_; ++j) {
data_buffer_[i + field_number_ + j].string_value +=
reinterpret_cast<uint64_t>(string_buffer_);
}
}
create_index(0, filename);
__LEAVE_FUNCTION
return false;
}
bool Database::save_tobinary(const char *filename) {
__ENTER_FUNCTION
file_head_t filehead;
filehead.field_number = field_number_;
filehead.record_number = record_number_;
filehead.string_block_size = string_buffer_size_;
FILE *fp = fopen(filename, "wb");
if (NULL == fp) return false;
fwrite(&filehead, sizeof(filehead), 1, fp);
fwrite(&(type_[0]), sizeof(field_type_enum) *filehead.field_number, 1, fp);
fwrite(&(data_buffer_[0]),
filehead.field_number * filehead.record_number,
1,
fp);
fwrite(string_buffer_, filehead.string_block_size, 1, fp);
fclose(fp);
return true;
__LEAVE_FUNCTION
return false;
}
bool Database::save_totext(const char *filename) {
__ENTER_FUNCTION
FILE *fp = fopen(filename, "wb");
if (NULL == fp) return false;
for(int i=0; i< static_cast<int>(type_.size()); ++i){
if(type_[i] == kTypeInt){
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, "INT");
fwrite(temp, strlen(temp), 1, fp);
} else if(type_[i] == kTypeFloat){
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, "FLOAT");
fwrite(temp, strlen(temp), 1, fp);
} else if(type_[i] == kTypeString){
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, "STRING");
fwrite(temp, strlen(temp), 1, fp);
}
if(i == static_cast<int>(type_.size()-1)){
fwrite("\n", 1, 1, fp);
} else{
fwrite("\t", 1, 1, fp);
}
}
for(int i=0; i<static_cast<int>(fieldnames_.size()); ++i){
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, fieldnames_[i].c_str());
fwrite(temp, strlen(temp), 1, fp);
if(i == static_cast<int>(fieldnames_.size()-1)){
fwrite("\n", 1, 1, fp);
} else{
fwrite("\t", 1, 1, fp);
}
}
for (int32_t line = 0; line < record_number_; ++line) {
for (int32_t column = 0; column < field_number_; ++column) {
const field_data *_field_data = search_position(line, column);
switch (type_[column]) {
case kTypeInt: {
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, "%d", _field_data->int_value);
fwrite(temp, strlen(temp), 1, fp);
break;
}
case kTypeFloat: {
char temp[64] = {0};
snprintf(temp, sizeof(temp) - 1, "%.3f", _field_data->float_value);
fwrite(temp, strlen(temp), 1, fp);
break;
}
case kTypeString: {
fwrite(_field_data->string_value,
strlen(_field_data->string_value),
1,
fp);
break;
}
default:
break;
}
fwrite("\t", 1, 1, fp);
} //for
fwrite(LF, strlen(LF), 1, fp);
} //for
fclose(fp);
return true;
__LEAVE_FUNCTION
return false;<|fim▁hole|> const char *name) {
__ENTER_FUNCTION
const field_data *_field_data = NULL;
int32_t column = get_fieldindex(name);
_field_data = search_position(line, column);
return _field_data;
__LEAVE_FUNCTION
return NULL;
}
bool Database::save_totext_line(std::vector<std::string> _data){
__ENTER_FUNCTION
if(static_cast<int32_t>(_data.size()) != field_number_)
return false;
for(int32_t column = 0; column < field_number_; ++column){
switch (type_[column]) {
case kTypeInt: {
break;
}
case kTypeFloat: {
break;
}
case kTypeString: {
break;
}
default:
break;
}
}
return true;
__LEAVE_FUNCTION
return false;
}
} //namespace pf_file<|fim▁end|> | }
const Database::field_data *Database::get_fielddata(int32_t line, |
<|file_name|>WdLayoutMode.java<|end_file_name|><|fim▁begin|>package com.charlesdream.office.word.enums;
import com.charlesdream.office.BaseEnum;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
/**
* Specifies how text is laid out in the layout mode for the current document.
* <p>
*
* @author Charles Cui on 3/4/16.
* @since 1.0
*/
public enum WdLayoutMode implements BaseEnum {
/**
* No grid is used to lay out text.
*
* @since 1.0
*/
wdLayoutModeDefault(0),
/**
* Text is laid out on a grid; the user specifies the number of lines and the number of characters per line. As the user types, Microsoft Word automatically aligns characters with gridlines.
*
* @since 1.0
*/
wdLayoutModeGenko(3),
/**
* Text is laid out on a grid; the user specifies the number of lines and the number of characters per line. As the user types, Microsoft Word doesn't automatically align characters with gridlines.
*
* @since 1.0
*/
wdLayoutModeGrid(1),
/**
* Text is laid out on a grid; the user specifies the number of lines, but not the number of characters per line.
*
* @since 1.0
*/
wdLayoutModeLineGrid(2);
private static final Map<Integer, WdLayoutMode> lookup;
static {
lookup = new HashMap<>();<|fim▁hole|> }
}
private final int value;
WdLayoutMode(int value) {
this.value = value;
}
/**
* Find the enum type by its value.
*
* @param value The enum value.
* @return The enum type, or null if this enum value does not exists.
* @since 1.0
*/
public static WdLayoutMode find(int value) {
WdLayoutMode result = lookup.get(value);
return result;
}
/**
* Find the enum type by its value, with the default value.
*
* @param value The enum value.
* @param defaultValue The default return value if the enum value does not exists.
* @return The enum type, or the default value if this enum value does not exists.
* @since 1.0
*/
public static WdLayoutMode find(int value, WdLayoutMode defaultValue) {
WdLayoutMode result = WdLayoutMode.find(value);
if (result == null) {
result = defaultValue;
}
return result;
}
/**
* Get the value of a enum type.
*
* @return The value of a enum type.
* @since 1.0
*/
public int value() {
return this.value;
}
}<|fim▁end|> |
for (WdLayoutMode e : EnumSet.allOf(WdLayoutMode.class)) {
lookup.put(e.value(), e); |
<|file_name|>RedisOutputStream.java<|end_file_name|><|fim▁begin|>package redis.clients.jedis.util;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* The class implements a buffered output stream without synchronization There are also special
* operations like in-place string encoding. This stream fully ignore mark/reset and should not be
* used outside Jedis
*/
public final class RedisOutputStream extends FilterOutputStream {
protected final byte[] buf;
protected int count;
private final static int[] sizeTable = { 9, 99, 999, 9999, 99999, 999999, 9999999, 99999999,
999999999, Integer.MAX_VALUE };
private final static byte[] DigitTens = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1',
'1', '1', '1', '1', '1', '1', '1', '1', '1', '2', '2', '2', '2', '2', '2', '2', '2', '2',
'2', '3', '3', '3', '3', '3', '3', '3', '3', '3', '3', '4', '4', '4', '4', '4', '4', '4',
'4', '4', '4', '5', '5', '5', '5', '5', '5', '5', '5', '5', '5', '6', '6', '6', '6', '6',
'6', '6', '6', '6', '6', '7', '7', '7', '7', '7', '7', '7', '7', '7', '7', '8', '8', '8',
'8', '8', '8', '8', '8', '8', '8', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', };
private final static byte[] DigitOnes = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6',
'7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2',
'3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', };
private final static byte[] digits = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z' };
public RedisOutputStream(final OutputStream out) {
this(out, 8192);
}
public RedisOutputStream(final OutputStream out, final int size) {
super(out);
if (size <= 0) {
throw new IllegalArgumentException("Buffer size <= 0");
}
buf = new byte[size];
}
private void flushBuffer() throws IOException {
if (count > 0) {
out.write(buf, 0, count);
count = 0;
}
}
public void write(final byte b) throws IOException {<|fim▁hole|> if (count == buf.length) {
flushBuffer();
}
buf[count++] = b;
}
@Override
public void write(final byte[] b) throws IOException {
write(b, 0, b.length);
}
@Override
public void write(final byte[] b, final int off, final int len) throws IOException {
if (len >= buf.length) {
flushBuffer();
out.write(b, off, len);
} else {
if (len >= buf.length - count) {
flushBuffer();
}
System.arraycopy(b, off, buf, count, len);
count += len;
}
}
public void writeCrLf() throws IOException {
if (2 >= buf.length - count) {
flushBuffer();
}
buf[count++] = '\r';
buf[count++] = '\n';
}
public void writeIntCrLf(int value) throws IOException {
if (value < 0) {
write((byte) '-');
value = -value;
}
int size = 0;
while (value > sizeTable[size])
size++;
size++;
if (size >= buf.length - count) {
flushBuffer();
}
int q, r;
int charPos = count + size;
while (value >= 65536) {
q = value / 100;
r = value - ((q << 6) + (q << 5) + (q << 2));
value = q;
buf[--charPos] = DigitOnes[r];
buf[--charPos] = DigitTens[r];
}
for (;;) {
q = (value * 52429) >>> (16 + 3);
r = value - ((q << 3) + (q << 1));
buf[--charPos] = digits[r];
value = q;
if (value == 0) break;
}
count += size;
writeCrLf();
}
@Override
public void flush() throws IOException {
flushBuffer();
out.flush();
}
}<|fim▁end|> | |
<|file_name|>init.js<|end_file_name|><|fim▁begin|>angular.module('SpamExpertsApp')
.run(['$rootScope', '$state', 'uiService', 'AuthService', 'API_EVENTS',
function ($rootScope, $state, uiService, AuthService, API_EVENTS) {
$rootScope.$on('$stateChangeStart', function (event, next) {
if (next.name !== 'login') {
$rootScope.username = AuthService.getUsername();
$rootScope.role = AuthService.getRole();
if (!AuthService.isAuthenticated()) {
event.preventDefault();
$state.go('login');
} else if ('data' in next && 'authorizedRoles' in next.data) {
var authorizedRoles = next.data.authorizedRoles;
if (!AuthService.isAuthorized(authorizedRoles)) {
event.preventDefault();
$state.go($state.current, {}, {reload: true});
$rootScope.$broadcast(API_EVENTS.notAuthorized);
}
}
} else {
if (AuthService.isAuthenticated()) {
event.preventDefault();
$state.go('main.dash');
}
}
});
$rootScope.$on('$logout', function () {
uiService.confirm({
title: 'Confirm logout',
template: 'Are you sure you want to log out?'
}, function() {
AuthService.logout();
$state.go('login');
});
});
$rootScope.$on(API_EVENTS.notAuthorized, function() {
uiService.alert({
title: 'Unauthorized!',<|fim▁hole|> template: 'You are not allowed to access this resource.'
});
});
$rootScope.$on(API_EVENTS.userNotAllowed, function() {
uiService.alert({
title: 'Error logging in!',
template: 'Sorry, admin users are not able to use this app yet. Please log in as a domain or email user.'
});
});
$rootScope.$on(API_EVENTS.notAuthenticated, function() {
AuthService.logout();
AuthService.clearPassword();
$state.go('login');
uiService.alert({
title: 'Authentication expired',
template: 'Sorry, you have to login again.'
});
});
}
]);<|fim▁end|> | |
<|file_name|>rforest_on_patch_lean.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Sat Dec 6 17:01:05 2014
@author: remi
@TODO :
in the function train_RForest_with_kfold we should keep all result proba for each class, this could be very intersting.
"""
import numpy as np ; #efficient arrays
import pandas as pd; # data frame to do sql like operation
import sklearn
reload(sklearn)
from sklearn.ensemble import RandomForestClassifier ; #base lib
from sklearn import cross_validation, preprocessing ; #normalizing data, creating kfold validation
def create_test_data(feature_number, data_size, class_list):
"""simple function to emulate input, gid is a unique int, other are features"""
import random ; #used to chose a class randomly
#create test vector
feature = np.random.random_sample((data_size,feature_number)) * 10 ;
gid = np.arange(13,data_size+13) ;
#create ground truth class vector : a 1,N vector containing randomly one of the possible class
ground_truth_class = np.zeros(data_size);
for i,(not_used) in enumerate(ground_truth_class):
ground_truth_class[i] = np.random.choice(class_list) ;
return gid, feature, ground_truth_class ;
def create_label_equivalency(labels_name, labels_number):
"""we create an equivalency list between class name and class number"""
import numpy as np;
labels = np.zeros(len(labels_name), dtype={'names':['class_id', 'class_name']\
, 'formats':['i4','a10']}) ;
for i in np.arange(0,len(labels_name)):
labels['class_id'][i] = labels_number[i]
labels['class_name'][i] = labels_name[i]
return labels;
def preprocess_data(X):
from sklearn import preprocessing ;
scaler = preprocessing.StandardScaler(copy=False,with_std=False);
scaler.fit_transform(X) ;
#scaler.transform(Y);
#scaler.transform(X);
return scaler;
def train_RForest_with_kfold(i,train, test, gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time ):
import datetime;
import time;
# creating data for train and test
X_train, X_test, Y_train, Y_test, Weight_train, Weight_test = X[train],X[test], Y[train], Y[test], weight[train], weight[test] ;
#learning
time_temp = time.clock();
print ' starting learning at \n\t\t\t\t%s' % datetime.datetime.now() ;
clf.fit(X_train,Y_train,Weight_train) ;
learning_time = learning_time+ time.clock() - time_temp;
#predicting
print ' learning finished, starting prediction at \n\t\t\t\t%s' % datetime.datetime.now() ;
time_temp = time.clock();
tmp_prob = clf.predict(X_test) ;
predicting_time += time.clock() - time_temp;
print ' prediction finished at \n\t\t\t\t%s' % datetime.datetime.now() ;
#grouping for score per class
proba_class_chosen = np.column_stack( \
(np.array(gid)[test],tmp_prob, Y_test,Weight_test ) ) ;
#constructinig the result data frame
df = pd.DataFrame(proba_class_chosen, columns = ("gid","class_chosen","ground_truth_class" ,"weight")) ;
if (i==0):
result = result.append(df, ignore_index=True) ;
else:
#print 'entering here, df is : ', df
result = result.append( df,ignore_index=True) ;
#plpy.notice("feature used, by importcy");
#plpy.notice(clf.feature_importances_)
#storing how important was each feature to make the prediction
feature_importances.append(clf.feature_importances_) ;
return learning_time,predicting_time,result
def Rforest_learn_predict(gid, X, Y,weight, labels, k_folds, random_forest_trees ,plot_directory):
from sklearn.metrics import classification_report
import datetime;
scaler = preprocess_data(X);
#creating the random forest object
clf = RandomForestClassifier(random_forest_trees, criterion="entropy" ,min_samples_leaf=20) ;
#cutting the set into 10 pieces, then propossing 10 partiion of 9(trainng)+1(test) data
kf_total = cross_validation.KFold(len(X), n_folds = k_folds, shuffle = True, random_state = 4) ;<|fim▁hole|> predicting_time = 0.0 ;
for i ,(train, test) in enumerate(kf_total) :
print ' workingg on kfold %s , %s' % (i+1,datetime.datetime.now())
learning_time,predicting_time, result = train_RForest_with_kfold(i,train, test,gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time) ;
report = classification_report( result['ground_truth_class'],result['class_chosen'],target_names = labels)#,sample_weight=result['weight']) ;
return np.column_stack((result['gid']
,result['ground_truth_class'].astype(int)
, result['class_chosen'].astype(int)
, np.zeros(len(result['ground_truth_class'])) )),report,feature_importances,learning_time,predicting_time;
def RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list, k_folds,random_forest_ntree, plot_directory):
"""Compute random forest classifiers using feature_iar and gt_classes ground trhuth. Divide the data set into kfolds to perform the operation K times
@param gids is a int[n]
@param feature_iar is a float[m x n], where m is the number of feature, and the matrix is wirtten row by row
@param gt_classes is a int[n] giving the ground truth class for each observation
@param k_folds is a int describing in how much part we should split the data set
@param random_forest_ntree how much tree in the frest?
@param plot_directory is a string like '/tmp', describing the directory where to write the figures generated
"""
#reshape input feature vector into feature matrix
feature_iar = np.array( feature_iar, dtype=np.float)
feature = np.reshape(feature_iar,( len(gids),len(feature_iar)/len(gids) ) ) ;
gids = np.array(gids);
gt_classes = np.array(gt_classes)
#plpy.notice('toto')
feature[np.isnan(feature)]=0 ;
labels = create_label_equivalency(labels_name,class_list )
weight_iar = np.array(weight)
return Rforest_learn_predict(gids
,feature
,gt_classes
,weight_iar
,labels
, k_folds
, random_forest_ntree
,plot_directory) ;
def RForest_learn_predict_pg_test():
#param
nfeature = 3
n_obs = 1000 ;
class_list = [1,2,3,4,5,6,7]
labels = ['FF1', 'FF2', 'FF3', 'FO2', 'FO3', 'LA6', 'NoC']
k_folds = 10
random_forest_ntree = 10;
plot_directory = '/media/sf_E_RemiCura/PROJETS/point_cloud/PC_in_DB/LOD_ordering_for_patches_of_points/result_rforest/vosges';
#creating input of function
gids = np.arange(13,n_obs+13);
feature_iar = np.random.rand(nfeature*n_obs)*10 ;
gt_classes = np.zeros(n_obs);
for i,(not_used) in enumerate(gt_classes):
gt_classes[i] = np.random.choice(class_list) ;
#
gids= [8736, 8737, 8738, 8739, 8742, 8743, 8744, 8746, 8748, 8749]
feature_iar = [0.0, 0.0, 0.0, 0.0, 1.0, 28.0, 2.0, 593.17, 0.0, 2.0, 4.0, 0.0, 0.0, 1.0, 36.511, 1.0, 592.176, 7.52, 0.0, 0.0, 0.0, 0.0, 1.0, 46.0, 1.0, 598.33, 0.0, 4.0, 23.0, 91.0, 347.0, 1.0, 33.2, 1.0, 585.271, 22.89, 6.0, 36.0, 189.0, 517.0, 1.0, 15.42, 2.0, 616.146, 39.41, 7.0, 37.0, 171.0, 497.0, 1.0, 13.532, 2.0, 607.817, 46.73, 6.0, 33.0, 155.0, 360.0, 1.0, 14.62, 2.0, 596.008, 42.09, 3.0, 29.0, 99.0, 255.0, 1.0, 11.295, 2.0, 572.784, 45.55, 3.0, 30.0, 118.0, 274.0, 1.0, 12.154, 2.0, 517.455, 49.62, 3.0, 28.0, 110.0, 278.0, 0.99, 11.016, 2.0, 495.071, 50.03] ;
gt_classes =[4, 4, 4, 4, 3, 3, 3, 2, 1, 1]
labels_name = ['FF1', 'FF2', 'FF3', 'NoC']
class_list =[1, 2, 3, 4]
weight = [0.25, 0.25, 0.25, 0.25, 0.3333, 0.3333, 0.3333, 1.0, 0.5, 0.5]
random_forest_ntree = 10 ;
#launching function
result = RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list,k_folds,random_forest_ntree, plot_directory)
return result ;
#print RForest_learn_predict_pg_test()<|fim▁end|> | result = pd.DataFrame() ;
feature_importances = [] ;
learning_time = 0.0 ; |
<|file_name|>LocationModel.java<|end_file_name|><|fim▁begin|>package com.sva.model;
import java.math.BigDecimal;
public class LocationModel
{
private String idType;
private BigDecimal timestamp;
private String dataType;
private BigDecimal x;
private BigDecimal y;
private BigDecimal z;
private String userID;
private String path;
private String xo;
private String yo;
private String scale;
public String getXo()
{
return xo;
}
public void setXo(String xo)
{
this.xo = xo;
}
public String getYo()
{
return yo;
}
public void setYo(String yo)
{
this.yo = yo;
}
public String getScale()
{
return scale;
}
public void setScale(String scale)
{
this.scale = scale;
}
public String getPath()
{
return path;
}<|fim▁hole|> }
public String getIdType()
{
return idType;
}
public void setIdType(String idType)
{
this.idType = idType;
}
public BigDecimal getTimestamp()
{
return timestamp;
}
public void setTimestamp(BigDecimal timestamp)
{
this.timestamp = timestamp;
}
public String getDataType()
{
return dataType;
}
public void setDataType(String dataType)
{
this.dataType = dataType;
}
public BigDecimal getX()
{
return x;
}
public void setX(BigDecimal x)
{
this.x = x;
}
public BigDecimal getY()
{
return y;
}
public void setY(BigDecimal y)
{
this.y = y;
}
public BigDecimal getZ()
{
return z;
}
public void setZ(BigDecimal z)
{
this.z = z;
}
public String getUserID()
{
return userID;
}
public void setUserID(String userID)
{
this.userID = userID;
}
}<|fim▁end|> |
public void setPath(String path)
{
this.path = path; |
<|file_name|>liveness-use-after-move.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
<|fim▁hole|> y.clone();
}<|fim▁end|> | fn main() {
let x = ~5;
let y = x;
info!("{:?}", *x); //~ ERROR use of moved value: `x` |
<|file_name|>threads.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation ([email protected])
**
** This file is part of the documentation of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial Usage
** Licensees holding valid Qt Commercial licenses may use this file in
** accordance with the Qt Commercial License Agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Nokia.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Nokia gives you certain additional
** rights. These rights are described in the Nokia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3.0 as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU General Public License version 3.0 requirements will be
** met: http://www.gnu.org/copyleft/gpl.html.<|fim▁hole|>** $QT_END_LICENSE$
**
****************************************************************************/
#include <QCache>
#include <QMutex>
#include <QThreadStorage>
#include "threads.h"
//! [0]
void MyThread::run()
//! [0] //! [1]
{
//! [1] //! [2]
}
//! [2]
#define Counter ReentrantCounter
//! [3]
class Counter
//! [3] //! [4]
{
public:
Counter() { n = 0; }
void increment() { ++n; }
void decrement() { --n; }
int value() const { return n; }
private:
int n;
};
//! [4]
#undef Counter
#define Counter ThreadSafeCounter
//! [5]
class Counter
//! [5] //! [6]
{
public:
Counter() { n = 0; }
void increment() { QMutexLocker locker(&mutex); ++n; }
void decrement() { QMutexLocker locker(&mutex); --n; }
int value() const { QMutexLocker locker(&mutex); return n; }
private:
mutable QMutex mutex;
int n;
};
//! [6]
typedef int SomeClass;
//! [7]
QThreadStorage<QCache<QString, SomeClass> *> caches;
void cacheObject(const QString &key, SomeClass *object)
//! [7] //! [8]
{
if (!caches.hasLocalData())
caches.setLocalData(new QCache<QString, SomeClass>);
caches.localData()->insert(key, object);
}
void removeFromCache(const QString &key)
//! [8] //! [9]
{
if (!caches.hasLocalData())
return;
caches.localData()->remove(key);
}
//! [9]
int main()
{
return 0;
}<|fim▁end|> | **
** If you have questions regarding the use of this file, please contact
** Nokia at [email protected]. |
<|file_name|>scalar_unittest.py<|end_file_name|><|fim▁begin|># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import improvement_direction
from telemetry.value import none_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testRepr(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
description='desc', tir_label='my_ir',
improvement_direction=improvement_direction.DOWN)
expected = ('ScalarValue(http://www.bar.com/, x, unit, 3, important=True, '
'description=desc, tir_label=my_ir, '
'improvement_direction=down, grouping_keys={}')
self.assertEquals(expected, str(v))
def testBuildbotValueType(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
improvement_direction=improvement_direction.DOWN)
self.assertEquals('default', v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertEquals([3], v.GetBuildbotValue())
self.assertEquals(('x', page0.name),
v.GetChartAndTraceNameForPerPageResult())
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False,
improvement_direction=improvement_direction.DOWN)
self.assertEquals(
'unimportant',
v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
def testScalarSamePageMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page0, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)<|fim▁hole|> self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page1, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(
page0, 'x', 'unit', 1, improvement_direction=improvement_direction.DOWN)
v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(None, vM.values)
expected_none_value_reason = (
'Merging values containing a None value results in a None value. '
'None values: [ScalarValue(http://www.bar.com/, x, unit, None, '
'important=True, description=None, tir_label=None, '
'improvement_direction=down, grouping_keys={}]')
self.assertEquals(expected_none_value_reason, vM.none_value_reason)
def testScalarWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', None,
improvement_direction=improvement_direction.UP))
def testScalarWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', 1, none_value_reason='n',
improvement_direction=improvement_direction.UP))
def testAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False,
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': 42})
def testNoneValueAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': None, 'none_value_reason': 'n'})
def testFromDictInt(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
'improvement_direction': improvement_direction.DOWN,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42)
self.assertEquals(v.improvement_direction, improvement_direction.DOWN)
def testFromDictFloat(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42.4,
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42.4)
def testFromDictWithoutImprovementDirection(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertIsNone(v.improvement_direction)
def testFromDictNoneValue(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': None,
'none_value_reason': 'n',
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, None)
self.assertEquals(v.none_value_reason, 'n')<|fim▁end|> | self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values) |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
#from __future__ import print_function
from setuptools import setup, Extension
import sys
import os
import psutil
# monkey-patch for parallel compilation
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = psutil.cpu_count(logical=False) # number of parallel compilations
import multiprocessing.pool
def _single_compile(obj):
try: src, ext = build[obj]
except KeyError: return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
#import distutils.ccompiler
#distutils.ccompiler.CCompiler.compile=parallelCCompile
''' Note:
to build Boost.Python on Windows with mingw
bjam target-os=windows/python=3.4 toolset=gcc variant=debug,release link=static,shared threading=multi runtime-link=shared cxxflags="-include cmath "
also insert this on top of boost/python.hpp :
#include <cmath> //fix cmath:1096:11: error: '::hypot' has not been declared
'''
def getExtensions():
platform = sys.platform
extensionsList = []
sources = ['src/Genome.cpp',
'src/Innovation.cpp',
'src/NeuralNetwork.cpp',
'src/Parameters.cpp',
'src/PhenotypeBehavior.cpp',
'src/Population.cpp',
'src/Random.cpp',
'src/Species.cpp',
'src/Substrate.cpp',
'src/Utils.cpp']
extra = ['-march=native',
'-mtune=native',
'-g',
]
if platform == 'darwin':
extra += ['-stdlib=libc++',
'-std=c++11',]
else:
extra += ['-std=gnu++11']
is_windows = 'win' in platform and platform != 'darwin'
if is_windows:
extra.append('/EHsc')
else:
extra.append('-w')
prefix = os.getenv('PREFIX')
if prefix and len(prefix) > 0:
extra += ["-I{}/include".format(prefix)]
build_sys = os.getenv('MN_BUILD')
if build_sys is None:
if os.path.exists('_MultiNEAT.cpp'):
sources.insert(0, '_MultiNEAT.cpp')
extra.append('-O3')
extensionsList.extend([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
)
else:
print('Source file is missing and MN_BUILD environment variable is not set.\n'
'Specify either \'cython\' or \'boost\'. Example to build in Linux with Cython:\n'
'\t$ export MN_BUILD=cython')
exit(1)
elif build_sys == 'cython':
from Cython.Build import cythonize
sources.insert(0, '_MultiNEAT.pyx')
extra.append('-O3')
extensionsList.extend(cythonize([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
))
elif build_sys == 'boost':
is_python_2 = sys.version_info[0] < 3
sources.insert(0, 'src/PythonBindings.cpp')
if is_windows:
if is_python_2:
raise RuntimeError("Python prior to version 3 is not supported on Windows due to limits of VC++ compiler version")
libs = ['boost_system', 'boost_serialization']
if is_python_2:
libs += ['boost_python', "boost_numpy"]
else:
# with boost 1.67 you need boost_python3x and boost_numpy3x where x is python version 3.x
libs += ['boost_python36', "boost_numpy36"] # in Ubuntu 14 there is only 'boost_python-py34'
# for Windows with mingw
# libraries= ['libboost_python-mgw48-mt-1_58',
# 'libboost_serialization-mgw48-mt-1_58'],
# include_dirs = ['C:/MinGW/include', 'C:/Users/Peter/Desktop/boost_1_58_0'],
# library_dirs = ['C:/MinGW/lib', 'C:/Users/Peter/Desktop/boost_1_58_0/stage/lib'],
extra.extend(['-DUSE_BOOST_PYTHON', '-DUSE_BOOST_RANDOM', #'-O0',
#'-DVDEBUG',
])
exx = Extension('MultiNEAT._MultiNEAT',
sources,
libraries=libs,
extra_compile_args=extra)
print(dir(exx))
print(exx)
print(exx.extra_compile_args)
extensionsList.append(exx)
else:
raise AttributeError('Unknown tool: {}'.format(build_sys))<|fim▁hole|>
setup(name='multineat',
version='0.5', # Update version in conda/meta.yaml as well
packages=['MultiNEAT'],
ext_modules=getExtensions())<|fim▁end|> |
return extensionsList |
<|file_name|>TruncatedLorentzian.cpp<|end_file_name|><|fim▁begin|>//
// $Id: TruncatedLorentzian.cpp 2051 2010-06-15 18:39:13Z chambm $
//
//
// Original author: Darren Kessner <[email protected]>
//
// Copyright 2006 Louis Warschaw Prostate Cancer Center
// Cedars Sinai Medical Center, Los Angeles, California 90048
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#define PWIZ_SOURCE
#include "TruncatedLorentzian.hpp"
#include "pwiz/utility/misc/Std.hpp"
#define i_ (complex<double>(0,1))
#define pi_ M_PI
namespace pwiz {
namespace frequency {
struct TruncatedLorentzian::Impl
{
public:
Impl(double T)
: T_(T),
cacheLevel_(-1),
f_(0), alpha_(0,0), tau_(0), f0_(0), x_(0), L_(0),
dLdx_(0), dxdt_(0), dxdf_(0), dLdt_(0), dLdf_(0),
d2Ldx2_(0), d2xdt2_(0), d2Ldt2_(0), d2Ldf2_(0), d2Ldtdf_(0)
{}
complex<double> value(double f, const ublas::vector<double>& p);
void d1(double f,
const ublas::vector<double>& p,
ublas::vector< complex<double> >& result);
void d2(double f,
const ublas::vector<double>& p,
ublas::matrix< complex<double> >& result);
double fwhm(const ublas::vector<double>& p) const;
private:
double T_;
int cacheLevel_; // (-1 == invalid)
// function values (valid if cacheLevel >= 0)
double f_;
complex<double> alpha_;
double tau_;
double f0_;
complex<double> x_;
complex<double> L_;
// first derivatives (valid if cacheLevel >= 1)
complex<double> dLdx_;
complex<double> dxdt_;
complex<double> dxdf_;
complex<double> dLdt_;
complex<double> dLdf_;
// second derivatives (valid if cacheLevel >= 2)
complex<double> d2Ldx2_;
complex<double> d2xdt2_;
complex<double> d2Ldt2_;
complex<double> d2Ldf2_;
complex<double> d2Ldtdf_;
void calculate(double f, const ublas::vector<double>& p, int cacheLevel);
};
complex<double> TruncatedLorentzian::Impl::value(double f, const ublas::vector<double>& p)
{
calculate(f, p, 0);
return alpha_ * L_;
}
void TruncatedLorentzian::Impl::d1(double f,
const ublas::vector<double>& p,
ublas::vector< complex<double> >& result)
{
calculate(f, p, 1);
result.resize(4);
result.clear();
result(0) = L_;
result(1) = i_ * L_;
result(2) = alpha_ * dLdt_;
result(3) = alpha_ * dLdf_;
}
void TruncatedLorentzian::Impl::d2(double f,
const ublas::vector<double>& p,
ublas::matrix< complex<double> >& result)
{
calculate(f, p, 2);
result.resize(4,4);
result.clear();
result(0,0) = result(0,1) = result(1,0) = result(1,1) = 0;
result(0,2) = result(2,0) = dLdt_;
result(0,3) = result(3,0) = dLdf_;
result(1,2) = result(2,1) = i_ * dLdt_;
result(1,3) = result(3,1) = i_ * dLdf_;
result(2,2) = alpha_ * d2Ldt2_;
result(2,3) = result(3,2) = alpha_ * d2Ldtdf_;
result(3,3) = alpha_ * d2Ldf2_;
}
double TruncatedLorentzian::Impl::fwhm(const ublas::vector<double>& p) const
{
return sqrt(T_*T_+p(Tau)*p(Tau))/(T_*p(Tau));
}
void TruncatedLorentzian::Impl::calculate(double f, const ublas::vector<double>& p, int cacheLevel)
{
// cache with key <f,p>
if (f != f_ ||
p(AlphaR) != alpha_.real() ||
p(AlphaI) != alpha_.imag() ||
p(Tau) != tau_ ||
p(F0) != f0_)
{
// recache
*this = Impl(T_); // zero out everything except T_
f_ = f;
alpha_ = complex<double>(p(AlphaR), p(AlphaI));
tau_ = p(Tau);
f0_ = p(F0);
}
else
{
// cache hit
//cout << "cache hit!\n";
}
if (cacheLevel>=0 && cacheLevel_<0)
{
x_ = 1/tau_ + 2*pi_*i_*(f_-f0_);
L_ = (1.-exp(-x_*T_))/x_;
cacheLevel_ = 0;
}
if (cacheLevel>=1 && cacheLevel_<1)
{
dLdx_ = ((T_*x_+1.)*exp(-x_*T_) - 1.) / (x_*x_);
dxdt_ = -1/(tau_*tau_);
dxdf_ = -2*pi_*i_;
dLdt_ = dLdx_ * dxdt_;
dLdf_ = dLdx_ * dxdf_;
cacheLevel_ = 1;
}
if (cacheLevel>=2 && cacheLevel_<2)
{
d2Ldx2_ = (2. - (pow(T_*x_+1.,2)+1.)*exp(-x_*T_)) / pow(x_,3);
d2xdt2_ = 2/pow(tau_,3);
d2Ldt2_ = d2Ldx2_*pow(dxdt_,2) + dLdx_*d2xdt2_;
d2Ldf2_ = d2Ldx2_*pow(dxdf_,2);
d2Ldtdf_ = d2Ldx2_ * dxdt_ * dxdf_;
cacheLevel_ = 2;
}
}
PWIZ_API_DECL TruncatedLorentzian::TruncatedLorentzian(double T)
: impl_(new Impl(T))
{}
PWIZ_API_DECL TruncatedLorentzian::~TruncatedLorentzian()
{} // this must be here to delete Impl properly
PWIZ_API_DECL complex<double> TruncatedLorentzian::operator()(double f, const ublas::vector<double>& p) const
{
return impl_->value(f, p);
}
PWIZ_API_DECL ublas::vector< complex<double> > TruncatedLorentzian::dp(double f, const ublas::vector<double>& p) const
{
ublas::vector< complex<double> > result;
impl_->d1(f, p, result);
return result;
}
PWIZ_API_DECL ublas::matrix< complex<double> > TruncatedLorentzian::dp2(double f, const ublas::vector<double>& p) const
{
ublas::matrix< complex<double> > result;
impl_->d2(f, p, result);
return result;
}
<|fim▁hole|>
PWIZ_API_DECL void TruncatedLorentzian::outputSamples(const string& filename, const ublas::vector<double>& p, double shift, double scale) const
{
cout << "[TruncatedLorentzian] Writing file " << filename << endl;
ofstream os(filename.c_str());
if (!os)
{
cout << "[TruncatedLorentzian::outputSamples()] Unable to write to file " << filename << endl;
return;
}
os.precision(8);
double fwhm = impl_->fwhm(p);
for (double f=p(F0)-5*fwhm; f<p(F0)+5*fwhm; f+=.01*fwhm)
{
complex<double> value = impl_->value(f, p);
os << f+shift << " 0 " << value.real()*scale << ' ' << value.imag()*scale << ' ' << sqrt(norm(value))*scale << endl;
}
}
} // namespace frequency
} // namespace pwiz<|fim▁end|> | |
<|file_name|>Canalave Gym Set.tsx<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="UTF-8"?>
<tileset name="Canalave Gym Set" firstgid="1" tilewidth="16" tileheight="16"><|fim▁hole|><|fim▁end|> | <image source="Canalave Gym Set.png" trans="000000"/>
</tileset> |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![allow(non_camel_case_types)]
extern crate libc;
use libc::{c_char, c_float, c_int, c_uint, c_void};
use libc::{size_t, uint64_t};
/// manually define unsigned int
pub type mx_uint = c_uint;
/// manually define float
pub type mx_float = c_float;
pub enum OpaqueNDArrayHandle {}
pub enum OpaqueFunctionHandle {}
pub enum OpaqueAtomicSymbolCreator {}
pub enum OpaqueSymbolHandle {}
pub enum OpaqueAtomicSymbolHandle {}
pub enum OpaqueExecutorHandle {}
pub enum OpaqueDataIterCreator {}
pub enum OpaqueDataIterHandle {}
pub enum OpaqueKVStoreHandle {}
pub enum OpaqueRecordIOHandle {}
pub enum OpaqueRtcHandle {}
pub enum OpaqueOptimizerCreator {}
pub enum OpaqueOptimizerHandle {}
/// handle to NDArray
pub type NDArrayHandle = *mut OpaqueNDArrayHandle;
/// handle to a mxnet narray function that changes NDArray
pub type FunctionHandle = *const OpaqueFunctionHandle;
/// handle to a function that takes param and creates symbol
pub type AtomicSymbolCreator = *mut OpaqueAtomicSymbolCreator;
/// handle to a symbol that can be bind as operator
pub type SymbolHandle = *mut OpaqueSymbolHandle;
/// handle to a AtomicSymbol
pub type AtomicSymbolHandle = *mut OpaqueAtomicSymbolHandle;
/// handle to an Executor
pub type ExecutorHandle = *mut OpaqueExecutorHandle;
/// handle a dataiter creator
pub type DataIterCreator = *mut OpaqueDataIterCreator;
/// handle to a DataIterator
pub type DataIterHandle = *mut OpaqueDataIterHandle;
/// handle to KVStore
pub type KVStoreHandle = *mut OpaqueKVStoreHandle;
/// handle to RecordIO
pub type RecordIOHandle = *mut OpaqueRecordIOHandle;
/// handle to MXRtc
pub type RtcHandle = *mut OpaqueRtcHandle;
/// handle to a function that takes param and creates optimizer
pub type OptimizerCreator = *mut OpaqueOptimizerCreator;
/// handle to Optimizer
pub type OptimizerHandle = *mut OpaqueOptimizerHandle;
pub type ExecutorMonitorCallback = extern "C" fn(*const c_char, NDArrayHandle, c_void);
#[repr(C)]
pub struct NativeOpInfo {
pub forward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_float,
ndims: *mut c_int,
shapes: *mut *mut c_uint,
tags: *mut c_int,
state: *mut c_void),
pub backward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_float,
ndims: *mut c_int,
shapes: *mut *mut c_uint,
tags: *mut c_int,
state: *mut c_void),
pub infer_shape: extern "C" fn(size: c_int,
ndims: *mut c_int,
shapes: *mut *mut c_uint,
state: *mut c_void),
pub list_outputs: extern "C" fn(args: *mut *mut *mut c_char, state: *mut c_void),
pub list_arguments: extern "C" fn(args: *mut *mut *mut c_char, state: *mut c_void),
// all functions also pass a payload void* pointer
pub p_forward: *mut c_void,
pub p_backward: *mut c_void,
pub p_infer_shape: *mut c_void,
pub p_list_outputs: *mut c_void,
pub p_list_arguments: *mut c_void,
}
#[repr(C)]
pub struct NDArrayOpInfo {
pub forward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_void,
tags: *mut c_int,
state: *mut c_void)
-> bool,
pub backward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_void,
tags: *mut c_int,
state: *mut c_void)
-> bool,
pub infer_shape: extern "C" fn(num_input: c_int,
ndims: *mut c_int,
shapes: *mut *mut c_uint,
state: *mut c_void)
-> bool,
pub list_outputs: extern "C" fn(outputs: *mut *mut *mut c_char, state: *mut c_void) -> bool,
pub list_arguments: extern "C" fn(outputs: *mut *mut *mut c_char, state: *mut c_void) -> bool,
pub declare_backward_dependency: extern "C" fn(out_grad: *const c_int,
in_data: *const c_int,
out_data: *const c_int,
num_deps: *mut c_int,
rdeps: *mut *mut c_int,
state: *mut c_void)
-> bool,
// all functions also pass a payload void* pointer
pub p_forward: *mut c_void,
pub p_backward: *mut c_void,
pub p_infer_shape: *mut c_void,
pub p_list_outputs: *mut c_void,
pub p_list_arguments: *mut c_void,
pub p_declare_backward_dependency: *mut c_void,
}
#[repr(C)]
pub struct CustomOpInfo {
pub forward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_void,
tags: *mut c_int,
reqs: *const c_int,
is_train: bool,
state: *mut c_void)
-> bool,
pub backward: extern "C" fn(size: c_int,
ptrs: *mut *mut c_void,
tags: *mut c_int,
reqs: *const c_int,
is_train: bool,
state: *mut c_void)
-> bool,
pub del: extern "C" fn(*mut c_void /* state */) -> bool,
// all functions also pass a payload void* pointer
pub p_forward: *mut c_void,
pub p_backward: *mut c_void,
pub p_del: *mut c_void,
}
#[repr(C)]
pub struct CustomOpPropInfo {
pub list_arguments: extern "C" fn(args: *mut *mut *mut c_char, state: *mut c_void) -> bool,
pub list_outputs: extern "C" fn(outputs: *mut *mut *mut c_char, state: *mut c_void) -> bool,
pub infer_shape: extern "C" fn(num_input: c_int,
ndims: *mut c_int,
shapes: *mut *mut c_uint,
state: *mut c_void)
-> bool,
pub declare_backward_dependency: extern "C" fn(out_grad: *const c_int,
in_data: *const c_int,
out_data: *const c_int,
num_deps: *mut c_int,
rdeps: *mut *mut c_int,
state: *mut c_void)
-> bool,
pub create_operator: extern "C" fn(*const c_char, // ctx
c_int, // num_inputs
*mut *mut c_uint, // shapes
*mut c_int, // ndims
*mut c_int, // dtypes
*mut CustomOpInfo, // ret
*mut c_void /* state */)
-> bool,
pub list_auxiliary_states: extern "C" fn(*mut *mut *mut c_char, // aux
*mut c_void /* state */)
-> bool,
pub del: extern "C" fn(state: *mut c_void) -> bool,
// all functions also pass a payload void* pointer
pub p_list_arguments: *mut c_void,
pub p_list_outputs: *mut c_void,
pub p_infer_shape: *mut c_void,
pub p_declare_backward_dependency: *mut c_void,
pub p_create_operator: *mut c_void,
pub p_list_auxiliary_states: *mut c_void,
pub p_del: *mut c_void,
}
pub type CustomOpPropCreator = extern "C" fn(op_type: *const c_char,
num_kwargs: c_int,
keys: *const c_char,
values: *const *const c_char,
ret: *mut CustomOpPropInfo)
-> bool;
/// user-defined updater for the kvstore
///
/// It's this updater's responsibility to delete recv and local
///
/// - param: the key
/// - param: recv the pushed value on this key
/// - param: local the value stored on local on this key
/// - param: handle The additional handle to the updater
pub type MXKVStoreUpdater = extern "C" fn(key: c_int,
recv: NDArrayHandle,
local: NDArrayHandle,
handle: *mut c_void);
/// the prototype of a server controller
///
/// - param: head the head of the command
/// - param: body the body of the command
/// - param: controller_handle helper handle for implementing controller
pub type MXKVStoreServerController = extern "C" fn(head: c_int,
body: *const c_char,
controller_handle: *mut c_void);
#[link(name = "mxnet")]
extern "C" {
/// return str message of the last error
///
/// all function in this file will return 0 when success
/// and -1 when an error occured,
/// MXGetLastError can be called to retrieve the error
///
/// this function is threadsafe and can be called by different thread
///
/// -return: error info
pub fn MXGetLastError() -> *const c_char;
// -------------------------------------
// Part 0: Global State setups
// -------------------------------------
/// Seed the global random number generators in mxnet.
///
/// - param seed the random number seed.
/// - return: 0 when success, -1 when failure happens.
pub fn MXRandomSeed(seed: c_int) -> c_int;
/// Notify the engine about a shutdown,
///
/// This can help engine to print less messages into display.
///
/// User do not have to call this function.
///
/// - return: 0 when success, -1 when failure happens.
pub fn MXNotifyShutdown() -> c_int;
// -------------------------------------
// Part 1: NDArray creation and deletion
// -------------------------------------
/// create a NDArray handle that is not initialized
/// can be used to pass in as mutate variables
/// to hold the result of NDArray
///
/// - param: out the returning handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayCreateNone(out: *mut NDArrayHandle) -> c_int;
/// create a NDArray with specified shape
///
/// - param: shape the pointer to the shape
/// - param: ndim the dimension of the shape
/// - param: dev_type device type, specify device we want to take
/// - param: dev_id the device id of the specific device
/// - param: delay_alloc whether to delay allocation until
/// the narray is first mutated
/// - param: out the returning handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayCreate(shape: *const mx_uint,
ndim: mx_uint,
dev_type: c_int,
dev_id: c_int,
delay_alloc: c_int,
out: *mut NDArrayHandle)
-> c_int;
/// create a NDArray with specified shape and data type
///
/// - param: shape the pointer to the shape
/// - param: ndim the dimension of the shape
/// - param: dev_type device type, specify device we want to take
/// - param: dev_id the device id of the specific device
/// - param: delay_alloc whether to delay allocation until
/// the narray is first mutated
/// - param: dtype data type of created array
/// - param: out the returning handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayCreateEx(shape: *const mx_uint,
ndim: mx_uint,
dev_type: c_int,
dev_id: c_int,
delay_alloc: c_int,
dtype: c_int,
out: *mut NDArrayHandle)
-> c_int;
/// create a NDArray handle that is loaded from raw bytes.
///
/// - param: buf the head of the raw bytes
/// - param: size size of the raw bytes
/// - param: out the returning handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayLoadFromRawBytes(buf: *const c_void,
size: size_t,
out: *mut NDArrayHandle)
-> c_int;
/// save the NDArray into raw bytes.
///
/// - param: handle the NDArray handle
/// - param: out_size size of the raw bytes
/// - param: out_buf the head of returning memory bytes.
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArraySaveRawBytes(handle: NDArrayHandle,
out_size: *mut size_t,
out_buf: *mut *const c_char)
-> c_int;
/// Save list of narray into the file.
///
/// - param: fname name of the file.
/// - param: num_args number of arguments to save.
/// - param: args the array of NDArrayHandles to be saved.
/// - param: keys the name of the NDArray, optional, can be NULL
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArraySave(fname: *const c_char,
num_args: mx_uint,
args: *mut NDArrayHandle,
keys: *const *const c_char)
-> c_int;
/// Load list of narray from the file.
///
/// - param: fname name of the file.
/// - param: out_size number of narray loaded.
/// - param: out_arr head of the returning narray handles.
/// - param: out_name_size size of output name arrray.
/// - param: out_names the names of returning NDArrays, can be NULL
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayLoad(fname: *const c_char,
out_size: *mut mx_uint,
out_arr: *mut *mut NDArrayHandle,
out_name_size: *mut mx_uint,
out_names: *mut *const *const c_char)
-> c_int;
/// Perform a synchronize copy from a continugous CPU memory region.
///
/// This function will call WaitToWrite before the copy is performed.
/// This is useful to copy data from existing memory region that are
/// not wrapped by NDArray(thus dependency not being tracked).
///
/// - param: handle the NDArray handle
/// - param: data the data source to copy from.
/// - param: size the memory size we want to copy from.
pub fn MXNDArraySyncCopyFromCPU(handle: NDArrayHandle,
data: *const c_void,
size: size_t)
-> c_int;
/// Perform a synchronize copyto a continugous CPU memory region.
///
/// This function will call WaitToRead before the copy is performed.
/// This is useful to copy data from existing memory region that are
/// not wrapped by NDArray(thus dependency not being tracked).
///
/// - param: handle the NDArray handle
/// - param: data the data source to copy into.
/// - param: size the memory size we want to copy into.
pub fn MXNDArraySyncCopyToCPU(handle: NDArrayHandle, data: *mut c_void, size: size_t) -> c_int;
/// Wait until all the pending writes with respect NDArray are finished.
/// Always call this before read data out synchronizely.
///
/// - param: handle the NDArray handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayWaitToRead(handle: NDArrayHandle) -> c_int;
/// Wait until all the pending read/write with respect NDArray are finished.
/// Always call this before write data into NDArray synchronizely.
///
/// - param: handle the NDArray handle
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayWaitToWrite(handle: NDArrayHandle) -> c_int;
/// wait until all delayed operations in
/// the system is completed
///
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayWaitAll() -> c_int;
/// free the narray handle
///
/// - param: handle the handle to be freed
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayFree(handle: NDArrayHandle) -> c_int;
/// Slice the NDArray along axis 0.
///
/// - param: handle the handle to the narraya
/// - param: slice_begin The beginning index of slice
/// - param: slice_end The ending index of slice
/// - param: out The NDArrayHandle of sliced NDArray
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArraySlice(handle: NDArrayHandle,
slice_begin: mx_uint,
slice_end: mx_uint,
out: *mut NDArrayHandle)
-> c_int;
/// Index the NDArray along axis 0.
///
/// - param: handle the handle to the narraya
/// - param: idx the index
/// - param: out The NDArrayHandle of sliced NDArray
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayAt(handle: NDArrayHandle, idx: mx_uint, out: *mut NDArrayHandle) -> c_int;
/// Reshape the NDArray.
///
/// - param: handle the handle to the narray
/// - param: ndim number of dimensions of new shape
/// - param: dims new shape
/// - param: out the NDArrayHandle of reshaped NDArray
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayReshape(handle: NDArrayHandle,
ndim: c_int,
dims: *const c_int,
out: *mut NDArrayHandle)
-> c_int;
<|fim▁hole|> /// - param: handle the handle to the narray
/// - param: out_dim the output dimension
/// - param: out_pdata pointer holder to get data pointer of the shape
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayGetShape(handle: NDArrayHandle,
out_dim: *mut mx_uint,
out_pdata: *mut *const mx_uint)
-> c_int;
/// get the content of the data in NDArray
///
/// - param: handle the handle to the narray
/// - param: out_pdata pointer holder to get pointer of data
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayGetData(handle: NDArrayHandle, out_pdata: *mut *mut mx_float) -> c_int;
/// get the type of the data in NDArray
///
/// - param: handle the handle to the narray
/// - param: out_dtype pointer holder to get type of data
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayGetDType(handle: NDArrayHandle, out_dtype: *mut c_int) -> c_int;
/// get the context of the NDArray
///
/// - param: handle the handle to the narray
/// - param: out_dev_type the output device type
/// - param: out_dev_id the output device id
/// - return: 0 when success, -1 when failure happens
pub fn MXNDArrayGetContext(handle: NDArrayHandle,
out_dev_type: *mut c_int,
out_dev_id: *mut c_int)
-> c_int;
// --------------------------------
// Part 2: functions on NDArray
// --------------------------------
/// list all the available functions handles
/// most user can use it to list all the needed functions
///
/// - param: out_size the size of returned array
/// - param: out_array the output function array
/// - return: 0 when success, -1 when failure happens
pub fn MXListFunctions(out_size: *mut mx_uint, out_array: *mut *mut FunctionHandle) -> c_int;
/// get the function handle by name
///
/// - param: name the name of the function
/// - param: out the corresponding function handle
/// - return: 0 when success, -1 when failure happens
pub fn MXGetFunction(name: *const c_char, out: *mut FunctionHandle) -> c_int;
/// Get the information of the function handle.
///
/// - param: fun The function handle.
/// - param: name The returned name of the function.
/// - param: description The returned description of the function.
/// - param: num_args Number of arguments.
/// - param: arg_names Name of the arguments.
/// - param: arg_type_infos Type informations about the arguments.
/// - param: arg_descriptions Description information about the arguments.
/// - param: return_type Return type of the function.
/// - return: 0 when success, -1 when failure happens
pub fn MXFuncGetInfo(fun: FunctionHandle,
name: *mut *const c_char,
description: *mut *const c_char,
num_args: *mut mx_uint,
arg_names: *mut *const *const c_char,
arg_type_infos: *mut *const *const c_char,
arg_descriptions: *mut *const *const c_char,
return_type: *mut *const c_char)
-> c_int;
/// get the argument requirements of the function
///
/// - param: fun input function handle
/// - param: num_use_vars how many NDArrays to be passed in as used_vars
/// - param: num_scalars scalar variable is needed
/// - param: num_mutate_vars how many NDArrays to be passed in as mutate_vars
/// - param: type_mask the type mask of this function
/// - return: 0 when success, -1 when failure happens
/// - see: MXFuncInvoke
pub fn MXFuncDescribe(fun: FunctionHandle,
num_use_vars: *mut mx_uint,
num_scalars: *mut mx_uint,
num_mutate_vars: *mut mx_uint,
type_mask: *mut c_int)
-> c_int;
/// invoke a function, the array size of passed in arguments
/// must match the values in the
///
/// - param: fun the function
/// - param: use_vars the normal arguments passed to function
/// - param: scalar_args the scalar qarguments
/// - param: mutate_vars the mutate arguments
/// - return: 0 when success, -1 when failure happens
/// - see: MXFuncDescribeArgs
pub fn MXFuncInvoke(fun: FunctionHandle,
use_vars: *mut NDArrayHandle,
scalar_args: *mut mx_float,
mutate_vars: *mut NDArrayHandle)
-> c_int;
/// invoke a function, the array size of passed in arguments
/// must match the values in the
///
/// - param: fun the function
/// - param: use_vars the normal arguments passed to function
/// - param: scalar_args the scalar qarguments
/// - param: mutate_vars the mutate arguments
/// - param: num_params number of keyword parameters
/// - param: param_keys keys for keyword parameters
/// - param: param_vals values for keyword parameters
/// - return: 0 when success, -1 when failure happens
/// - see: MXFuncDescribeArgs
pub fn MXFuncInvokeEx(fun: FunctionHandle,
use_vars: *mut NDArrayHandle,
scalar_args: *mut mx_float,
mutate_vars: *mut NDArrayHandle,
num_params: c_int,
param_keys: *mut *mut c_char,
param_vals: *mut *mut c_char)
-> c_int;
// --------------------------------------------
// Part 3: symbolic configuration generation
// --------------------------------------------
/// list all the available AtomicSymbolEntry
///
/// - param: out_size the size of returned array
/// - param: out_array the output AtomicSymbolCreator array
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListAtomicSymbolCreators(out_size: *mut mx_uint,
out_array: *mut *mut AtomicSymbolCreator)
-> c_int;
/// Get the name of an atomic symbol.
///
/// - param: creator the AtomicSymbolCreator.
/// - param: name The returned name of the creator.
pub fn MXSymbolGetAtomicSymbolName(creator: AtomicSymbolCreator,
name: *mut *const c_char)
-> c_int;
/// Get the detailed information about atomic symbol.
///
/// - param: creator the AtomicSymbolCreator.
/// - param: name The returned name of the creator.
/// - param: description The returned description of the symbol.
/// - param: num_args Number of arguments.
/// - param: arg_names Name of the arguments.
/// - param: arg_type_infos Type informations about the arguments.
/// - param: arg_descriptions Description information about the arguments.
/// - param: key_var_num_args The keyword argument for specifying variable number of arguments.
/// When this parameter has non-zero length, the function allows variable number
/// of positional arguments, and will need the caller to pass it in in
/// MXSymbolCreateAtomicSymbol,
/// With key = key_var_num_args, and value = number of positional arguments.
/// - param: return_type Return type of the function, can be Symbol or Symbol[]
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGetAtomicSymbolInfo(creator: AtomicSymbolCreator,
name: *mut *const c_char,
description: *mut *const c_char,
num_args: *mut mx_uint,
arg_names: *mut *const *const c_char,
arg_type_infos: *mut *const *const c_char,
arg_descriptions: *mut *const *const c_char,
key_var_num_args: *mut *const c_char,
return_type: *mut *const c_char)
-> c_int;
/// Create an AtomicSymbol.
///
/// - param: creator the AtomicSymbolCreator
/// - param: num_param the number of parameters
/// - param: keys the keys to the params
/// - param: vals the vals of the params
/// - param: out pointer to the created symbol handle
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCreateAtomicSymbol(creator: AtomicSymbolCreator,
num_param: mx_uint,
keys: *const *const c_char,
vals: *const *const c_char,
out: *mut SymbolHandle)
-> c_int;
/// Create a Variable Symbol.
///
/// - param: name name of the variable
/// - param: out pointer to the created symbol handle
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCreateVariable(name: *const c_char, out: *mut SymbolHandle) -> c_int;
/// Create a Symbol by grouping list of symbols together
///
/// - param: num_symbols number of symbols to be grouped
/// - param: symbols array of symbol handles
/// - param: out pointer to the created symbol handle
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCreateGroup(num_symbols: mx_uint,
symbols: *mut SymbolHandle,
out: *mut SymbolHandle)
-> c_int;
/// Load a symbol from a json file.
///
/// - param: fname the file name.
/// - param: out the output symbol.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCreateFromFile(fname: *const c_char, out: *mut SymbolHandle) -> c_int;
/// Load a symbol from a json string.
///
/// - param: json the json string.
/// - param: out the output symbol.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCreateFromJSON(json: *const c_char, out: *mut SymbolHandle) -> c_int;
/// Save a symbol into a json file.
///
/// - param: symbol the input symbol.
/// - param: fname the file name.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolSaveToFile(symbol: SymbolHandle, fname: *const c_char) -> c_int;
/// Save a symbol into a json string
///
/// - param: symbol the input symbol.
/// - param: out_json output json string.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolSaveToJSON(symbol: SymbolHandle, out_json: *mut *const c_char) -> c_int;
/// Free the symbol handle.
///
/// - param: symbol the symbol
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolFree(symbol: SymbolHandle) -> c_int;
/// Copy the symbol to another handle
///
/// - param: symbol the source symbol
/// - param: out used to hold the result of copy
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCopy(symbol: SymbolHandle, out: *mut SymbolHandle) -> c_int;
/// Print the content of symbol, used for debug.
///
/// - param: symbol the symbol
/// - param: out_str pointer to hold the output string of the printing.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolPrint(symbol: SymbolHandle, out_str: *mut *const c_char) -> c_int;
/// Get string name from symbol
///
/// - param: symbol the source symbol
/// - param: out The result name.
/// - param: success Whether the result is contained in out.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGetName(symbol: SymbolHandle,
out: *mut *const c_char,
success: *mut c_int)
-> c_int;
/// Get string attribute from symbol
///
/// - param: symbol the source symbol
/// - param: key The key of the symbol.
/// - param: out The result attribute, can be NULL if the attribute do not exist.
/// - param: success Whether the result is contained in out.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGetAttr(symbol: SymbolHandle,
key: *const c_char,
out: *mut *const c_char,
success: *mut c_int)
-> c_int;
/// Set string attribute from symbol.
///
/// NOTE: Setting attribute to a symbol can affect the semantics
/// (mutable/immutable) of symbolic graph.
///
/// Safe recommendaton: use immutable graph
/// - Only allow set attributes during creation of new symbol as optional parameter
///
/// Mutable graph (be careful about the semantics):
/// - Allow set attr at any point.
/// - Mutating an attribute of some common node of two graphs can cause confusion from user.
///
/// - param: symbol the source symbol
/// - param: key The key of the symbol.
/// - param: value The value to be saved.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolSetAttr(symbol: SymbolHandle, key: *const c_int, value: *const c_int) -> c_int;
/// Get all attributes from symbol, including all descendents.
///
/// - param: symbol the source symbol
/// - param: out_size The number of output attributes
/// - param: out 2*out_size strings representing key value pairs.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListAttr(symbol: SymbolHandle,
out_size: *mut mx_uint,
out: *mut *const *const c_char)
-> c_int;
/// Get all attributes from symbol, excluding descendents.
///
/// - param: symbol the source symbol
/// - param: out_size The number of output attributes
/// - param: out 2*out_size strings representing key value pairs.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListAttrShallow(symbol: SymbolHandle,
out_size: *mut mx_uint,
out: *mut *const *const c_char)
-> c_int;
/// List arguments in the symbol.
///
/// - param: symbol the symbol
/// - param: out_size output size
/// - param: out_str_array pointer to hold the output string array
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListArguments(symbol: SymbolHandle,
out_size: *mut mx_uint,
out_str_array: *mut *const *const c_char)
-> c_int;
/// List returns in the symbol.
///
/// - param: symbol the symbol
/// - param: out_size output size
/// - param: out_str_array pointer to hold the output string array
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListOutputs(symbol: SymbolHandle,
out_size: *mut mx_uint,
out_str_array: *mut *const *const c_char)
-> c_int;
/// Get a symbol that contains all the internals.
///
/// - param: symbol The symbol
/// - param: out The output symbol whose outputs are all the internals.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGetInternals(symbol: SymbolHandle, out: *mut SymbolHandle) -> c_int;
/// Get index-th outputs of the symbol.
///
/// - param: symbol The symbol
/// - param: index the Index of the output.
/// - param: out The output symbol whose outputs are the index-th symbol.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGetOutput(symbol: SymbolHandle,
index: mx_uint,
out: *mut SymbolHandle)
-> c_int;
/// List auxiliary states in the symbol.
///
/// - param: symbol the symbol
/// - param: out_size output size
/// - param: out_str_array pointer to hold the output string array
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolListAuxiliaryStates(symbol: SymbolHandle,
out_size: *mut mx_uint,
out_str_array: *mut *const *const c_char)
-> c_int;
/// Compose the symbol on other symbols.
///
/// This function will change the sym hanlde.
/// To achieve function apply behavior, copy the symbol first
/// before apply.
///
/// - param: sym the symbol to apply
/// - param: name the name of symbol
/// - param: num_args number of arguments
/// - param: keys the key of keyword args (optional)
/// - param: args arguments to sym
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolCompose(sym: SymbolHandle,
name: *const c_char,
num_args: mx_uint,
keys: *const *const c_char,
args: *const SymbolHandle)
-> c_int;
/// Get the gradient graph of the symbol
///
/// - param: sym the symbol to get gradient
/// - param: num_wrt number of arguments to get gradient
/// - param: wrt the name of the arguments to get gradient
/// - param: out the returned symbol that has gradient
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolGrad(sym: SymbolHandle,
num_wrt: mx_uint,
wrt: *const *const c_char,
out: *const SymbolHandle)
-> c_int;
/// infer shape of unknown input shapes given the known one.
///
/// The shapes are packed into a CSR matrix represented by arg_ind_ptr
/// and arg_shape_data
///
/// The call will be treated as a kwargs call if key != nullptr or
/// num_args==0, otherwise it is positional.
///
/// - param: sym symbol handle
/// - param: num_args numbe of input arguments.
/// - param: keys the key of keyword args (optional)
/// - param: arg_ind_ptr the head pointer of the rows in CSR
/// - param: arg_shape_data the content of the CSR
/// - param: in_shape_size sizeof the returning array of in_shapes
/// - param: in_shape_ndim returning array of shape dimensions of eachs input shape.
/// - param: in_shape_data returning array of pointers to head of the input shape.
/// - param: out_shape_size sizeof the returning array of out_shapes
/// - param: out_shape_ndim returning array of shape dimensions of eachs input shape.
/// - param: out_shape_data returning array of pointers to head of the input shape.
/// - param: aux_shape_size sizeof the returning array of aux_shapes
/// - param: aux_shape_ndim returning array of shape dimensions of eachs auxiliary shape.
/// - param: aux_shape_data returning array of pointers to head of the auxiliary shape.
/// - param: complete whether infer shape completes or more information is needed.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolInferShape(sym: SymbolHandle,
num_args: mx_uint,
keys: *const *const c_char,
arg_ind_ptr: *const mx_uint,
arg_shape_data: *const mx_uint,
in_shape_size: *mut mx_uint,
in_shape_ndim: *const *const mx_uint,
in_shape_data: *const *const *const mx_uint,
out_shape_size: *mut mx_uint,
out_shape_ndim: *const *const mx_uint,
out_shape_data: *const *const *const mx_uint,
aux_shape_size: *mut mx_uint,
aux_shape_ndim: *const *const mx_uint,
aux_shape_data: *const *const *const mx_uint,
complete: *mut c_int);
/// partially infer shape of unknown input shapes given the known one.
///
/// Return partially inferred results if not all shapes could be inferred.
///
/// The shapes are packed into a CSR matrix represented by arg_ind_ptr and arg_shape_data
///
/// The call will be treated as a kwargs call if key != nullptr or num_args==0,
/// otherwise it is positional.
///
/// - param: sym symbol handle
/// - param: num_args numbe of input arguments.
/// - param: keys the key of keyword args (optional)
/// - param: arg_ind_ptr the head pointer of the rows in CSR
/// - param: arg_shape_data the content of the CSR
/// - param: in_shape_size sizeof the returning array of in_shapes
/// - param: in_shape_ndim returning array of shape dimensions of eachs input shape.
/// - param: in_shape_data returning array of pointers to head of the input shape.
/// - param: out_shape_size sizeof the returning array of out_shapes
/// - param: out_shape_ndim returning array of shape dimensions of eachs input shape.
/// - param: out_shape_data returning array of pointers to head of the input shape.
/// - param: aux_shape_size sizeof the returning array of aux_shapes
/// - param: aux_shape_ndim returning array of shape dimensions of eachs auxiliary shape.
/// - param: aux_shape_data returning array of pointers to head of the auxiliary shape.
/// - param: complete whether infer shape completes or more information is needed.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolInferShapePartial(sym: SymbolHandle,
num_args: mx_uint,
keys: *const *const c_int,
arg_ind_ptr: *const mx_uint,
arg_shape_data: *const mx_uint,
in_shape_size: *mut mx_uint,
tin_shape_ndim: *const *const mx_uint,
ntin_shape_data: *const *const *const mx_uint,
out_shape_size: *mut mx_uint,
tout_shape_ndim: *const *const mx_uint,
ntout_shape_data: *const *const *const mx_uint,
aux_shape_size: *mut mx_uint,
taux_shape_ndim: *const *const mx_uint,
ntaux_shape_data: *const *const *const mx_uint,
complete: *mut c_int);
/// infer type of unknown input types given the known one.
///
/// The types are packed into a CSR matrix represented by arg_ind_ptr and arg_type_data
///
/// The call will be treated as a kwargs call if key != nullptr or num_args==0,
/// otherwise it is positional.
///
/// - param: sym symbol handle
/// - param: num_args numbe of input arguments.
/// - param: keys the key of keyword args (optional)
/// - param: arg_type_data the content of the CSR
/// - param: in_type_size sizeof the returning array of in_types
/// - param: in_type_data returning array of pointers to head of the input type.
/// - param: out_type_size sizeof the returning array of out_types
/// - param: out_type_data returning array of pointers to head of the input type.
/// - param: aux_type_size sizeof the returning array of aux_types
/// - param: aux_type_data returning array of pointers to head of the auxiliary type.
/// - param: complete whether infer type completes or more information is needed.
/// - return: 0 when success, -1 when failure happens
pub fn MXSymbolInferType(sym: SymbolHandle,
num_args: mx_uint,
keys: *const *const c_char,
arg_type_data: *const c_int,
in_type_size: *mut mx_uint,
in_type_data: *const *const c_int,
out_type_size: *mut mx_uint,
out_type_data: *const *const c_int,
aux_type_size: *mut mx_uint,
aux_type_data: *const *const c_int,
complete: *mut c_int)
-> c_int;
// --------------------------------------------
// Part 4: Executor interface
// --------------------------------------------
/// Delete the executor
///
/// - param: handle the executor.
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorFree(handle: ExecutorHandle) -> c_int;
/// Print the content of execution plan, used for debug.
///
/// - param: handle the executor.
/// - param: out_str pointer to hold the output string of the printing.
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorPrint(handle: ExecutorHandle, out_str: *mut *const c_char) -> c_int;
/// Executor forward method
///
/// - param: handle executor handle
/// - param: is_train bool value to indicate whether the forward pass is for evaluation
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorForward(handle: ExecutorHandle, is_train: c_int) -> c_int;
/// Excecutor run backward
///
/// - param: handle execute handle
/// - param: len lenth
/// - param: head_grads NDArray handle for heads' gradient
///
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorBackward(handle: ExecutorHandle,
len: mx_uint,
head_grads: *mut NDArrayHandle)
-> c_int;
/// Get executor's head NDArray
///
/// - param: handle executor handle
/// - param: out_size output narray vector size
/// - param: out out put narray handles
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorOutputs(handle: ExecutorHandle,
out_size: *mut mx_uint,
out: *mut *mut NDArrayHandle)
-> c_int;
/// Generate Executor from symbol
///
/// - param: symbol_handle symbol handle
/// - param: dev_type device type
/// - param: dev_id device id
/// - param: len length
/// - param: in_args in args array
/// - param: arg_grad_store arg grads handle array
/// - param: grad_req_type grad req array
/// - param: aux_states_len length of auxiliary states
/// - param: aux_states auxiliary states array
/// - param: out output executor handle
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorBind(symbol_handle: SymbolHandle,
dev_type: c_int,
dev_id: c_int,
len: mx_uint,
in_args: *mut NDArrayHandle,
arg_grad_store: *mut NDArrayHandle,
grad_req_type: *mut mx_uint,
aux_states_len: mx_uint,
aux_states: *mut NDArrayHandle,
out: *mut ExecutorHandle)
-> c_int;
/// Generate Executor from symbol,
///
/// This is advanced function, allow specify group2ctx map.
/// The user can annotate "ctx_group" attribute to name each group.
///
/// - param: symbol_handle symbol handle
/// - param: dev_type device type of default context
/// - param: dev_id device id of default context
/// - param: num_map_keys size of group2ctx map
/// - param: map_keys keys of group2ctx map
/// - param: map_dev_types device type of group2ctx map
/// - param: map_dev_ids device id of group2ctx map
/// - param: len length
/// - param: in_args in args array
/// - param: arg_grad_store arg grads handle array
/// - param: grad_req_type grad req array
/// - param: aux_states_len length of auxiliary states
/// - param: aux_states auxiliary states array
/// - param: out output executor handle
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorBindX(symbol_handle: SymbolHandle,
dev_type: c_int,
dev_id: c_int,
num_map_keys: mx_uint,
map_keys: *const *const c_char,
map_dev_types: *const c_int,
map_dev_ids: *const c_int,
len: mx_uint,
in_args: *mut NDArrayHandle,
arg_grad_store: *mut NDArrayHandle,
grad_req_type: *mut mx_uint,
aux_states_len: mx_uint,
aux_states: *mut NDArrayHandle,
out: *mut ExecutorHandle)
-> c_int;
/// Generate Executor from symbol,
///
/// This is advanced function, allow specify group2ctx map.
/// The user can annotate "ctx_group" attribute to name each group.
///
/// - param: symbol_handle symbol handle
/// - param: dev_type device type of default context
/// - param: dev_id device id of default context
/// - param: num_map_keys size of group2ctx map
/// - param: map_keys keys of group2ctx map
/// - param: map_dev_types device type of group2ctx map
/// - param: map_dev_ids device id of group2ctx map
/// - param: len length
/// - param: in_args in args array
/// - param: arg_grad_store arg grads handle array
/// - param: grad_req_type grad req array
/// - param: aux_states_len length of auxiliary states
/// - param: aux_states auxiliary states array
/// - param: shared_exec input executor handle for memory sharing
/// - param: out output executor handle
/// - return: 0 when success, -1 when failure happens
pub fn MXExecutorBindEX(symbol_handle: SymbolHandle,
dev_type: c_int,
dev_id: c_int,
num_map_keys: mx_uint,
map_keys: *const *const c_char,
map_dev_types: *const c_int,
map_dev_ids: *const c_int,
len: mx_uint,
in_args: *mut NDArrayHandle,
arg_grad_store: *mut NDArrayHandle,
grad_req_type: *mut mx_uint,
aux_states_len: mx_uint,
aux_states: *mut NDArrayHandle,
shared_exec: ExecutorHandle,
out: *mut ExecutorHandle)
-> c_int;
/// set a call back to notify the completion of operation
pub fn MXExecutorSetMonitorCallback(handle: ExecutorHandle,
callback: ExecutorMonitorCallback,
callback_handle: *mut c_void)
-> c_int;
// --------------------------------------------
// Part 5: IO Interface
// --------------------------------------------
/// List all the available iterator entries
///
/// - param: out_size the size of returned iterators
/// - param: out_array the output iteratos entries
/// - return: 0 when success, -1 when failure happens
pub fn MXListDataIters(out_size: *mut mx_uint, out_array: *mut *mut DataIterCreator) -> c_int;
/// Init an iterator, init with parameters
/// the array size of passed in arguments
///
/// - param: handle of the iterator creator
/// - param: num_param number of parameter
/// - param: keys parameter keys
/// - param: vals parameter values
/// - param: out resulting iterator
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterCreateIter(handle: DataIterCreator,
num_param: mx_uint,
keys: *const *const c_char,
vals: *const *const c_char,
out: *mut DataIterHandle)
-> c_int;
/// Get the detailed information about data iterator.
///
/// - param: creator the DataIterCreator.
/// - param: name The returned name of the creator.
/// - param: description The returned description of the symbol.
/// - param: num_args Number of arguments.
/// - param: arg_names Name of the arguments.
/// - param: arg_type_infos Type informations about the arguments.
/// - param: arg_descriptions Description information about the arguments.
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterGetIterInfo(creator: DataIterCreator,
name: *mut *const c_char,
description: *mut *const c_char,
num_args: *mut mx_uint,
arg_names: *mut *const *const c_char,
arg_type_infos: *mut *const *const c_char,
arg_descriptions: *mut *const *const c_char)
-> c_int;
/// Free the handle to the IO module
///
/// - param: handle the handle pointer to the data iterator
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterFree(handle: DataIterHandle) -> c_int;
/// Move iterator to next position
///
/// - param: handle the handle to iterator
/// - param: out return value of next
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterNext(handle: DataIterHandle, out: *mut c_int) -> c_int;
/// Call iterator.Reset
///
/// - param: handle the handle to iterator
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterBeforeFirst(handle: DataIterHandle) -> c_int;
/// Get the handle to the NDArray of underlying data
///
/// - param: handle the handle pointer to the data iterator
/// - param: out handle to underlying data NDArray
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterGetData(handle: DataIterHandle, out: *mut NDArrayHandle) -> c_int;
/// Get the image index by array.
///
/// - param: handle the handle pointer to the data iterator
/// - param: out_index output index of the array.
/// - param: out_size output size of the array.
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterGetIndex(handle: DataIterHandle,
out_index: *mut *mut uint64_t,
out_size: *mut uint64_t)
-> c_int;
/// Get the padding number in current data batch
///
/// - param: handle the handle pointer to the data iterator
/// - param: pad pad number ptr
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterGetPadNum(handle: DataIterHandle, pad: *const c_int) -> c_int;
/// Get the handle to the NDArray of underlying label
///
/// - param: handle the handle pointer to the data iterator
/// - param: out the handle to underlying label NDArray
/// - return: 0 when success, -1 when failure happens
pub fn MXDataIterGetLabel(handle: DataIterHandle, out: *mut NDArrayHandle) -> c_int;
// --------------------------------------------
// Part 6: basic KVStore interface
// --------------------------------------------
/// Initialized ps-lite environment variables
///
/// - param: num_vars number of variables to initialize
/// - param: keys environment keys
/// - param: vals environment values
pub fn MXInitPSEnv(num_vars: mx_uint,
keys: *mut *const c_char,
vals: *mut *const c_char)
-> c_int;
/// Create a kvstore
///
/// - param: type the type of KVStore
/// - param: out The output type of KVStore
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreCreate(kvtype: *const c_char, out: *mut KVStoreHandle) -> c_int;
/// Delete a KVStore handle.
///
/// - param: handle handle to the kvstore
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreFree(handle: KVStoreHandle) -> c_int;
/// Init a list of (key,value) pairs in kvstore
///
/// - param: handle handle to the kvstore
/// - param: num the number of key-value pairs
/// - param: keys the list of keys
/// - param: vals the list of values
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreInit(handle: KVStoreHandle,
num: mx_uint,
keys: *const c_int,
vals: *mut NDArrayHandle)
-> c_int;
/// Push a list of (key,value) pairs to kvstore
///
/// - param: handle handle to the kvstore
/// - param: num the number of key-value pairs
/// - param: keys the list of keys
/// - param: vals the list of values
/// - param: priority the priority of the action
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStorePush(handle: KVStoreHandle,
num: mx_uint,
keys: *const c_int,
vals: *mut NDArrayHandle,
priority: c_int)
-> c_int;
/// pull a list of (key, value) pairs from the kvstore
///
/// - param: handle handle to the kvstore
/// - param: num the number of key-value pairs
/// - param: keys the list of keys
/// - param: vals the list of values
/// - param: priority the priority of the action
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStorePull(handle: KVStoreHandle,
num: mx_uint,
keys: c_int,
vals: *mut NDArrayHandle,
priority: c_int)
-> c_int;
/// register an push updater
///
/// - param: handle handle to the KVStore
/// - param: updater udpater function
/// - param: updater_handle The additional handle used to invoke the updater
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreSetUpdater(handle: KVStoreHandle,
updater: MXKVStoreUpdater,
updater_handle: *mut c_void)
-> c_int;
/// get the type of the kvstore
///
/// - param: handle handle to the KVStore
/// - param: type a string type
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreGetType(handle: KVStoreHandle, kvtype: *mut *const c_char) -> c_int;
// --------------------------------------------
// Part 6: advanced KVStore for multi-machines
// --------------------------------------------
/// return The rank of this node in its group, which is in [0, GroupSize).
///
/// - param: handle handle to the KVStore
/// - param: ret the node rank
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreGetRank(handle: KVStoreHandle, ret: *mut c_int) -> c_int;
/// return The number of nodes in this group, which is
///
/// - number of workers if if `IsWorkerNode() == true`,
/// - number of servers if if `IsServerNode() == true`,
/// - 1 if `IsSchedulerNode() == true`,
/// - param: handle handle to the KVStore
/// - param: ret the group size
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreGetGroupSize(handle: KVStoreHandle, ret: *mut c_int) -> c_int;
/// return whether or not this process is a worker node.
///
/// - param: ret 1 for yes, 0 for no
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreIsWorkerNode(ret: *mut c_int) -> c_int;
/// return whether or not this process is a server node.
///
/// - param: ret 1 for yes, 0 for no
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreIsServerNode(ret: *mut c_int) -> c_int;
/// return whether or not this process is a scheduler node.
///
/// - param: ret 1 for yes, 0 for no
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreIsSchedulerNode(ret: *mut c_int) -> c_int;
/// global barrier among all worker machines
///
/// - param: handle handle to the KVStore
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreBarrier(handle: KVStoreHandle) -> c_int;
/// whether to do barrier when finalize
///
/// - param: handle handle to the KVStore
/// - param: barrier_before_exit whether to do barrier when kvstore finalize
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreSetBarrierBeforeExit(handle: KVStoreHandle,
barrier_before_exit: c_int)
-> c_int;
/// Run as server (or scheduler)
///
/// - param: handle handle to the KVStore
/// - param: controller the user-defined server controller
/// - param: controller_handle helper handle for implementing controller
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreRunServer(handle: KVStoreHandle,
controller: MXKVStoreServerController,
controller_handle: *mut c_void)
-> c_int;
/// Send a command to all server nodes
///
/// - param: handle handle to the KVStore
/// - param: cmd_id the head of the command
/// - param: cmd_body the body of the command
/// - return: 0 when success, -1 when failure happens
pub fn MXKVStoreSendCommmandToServers(handle: KVStoreHandle,
cmd_id: c_int,
cmd_body: *const c_char)
-> c_int;
/// Get the number of ps dead node(s) specified by {node_id}
///
/// - param: handle handle to the KVStore
/// - param: node_id Can be a node group or a single node.
/// kScheduler = 1, kServerGroup = 2, kWorkerGroup = 4
/// - param: number Ouptut number of dead nodes
/// - param: timeout_sec A node fails to send heartbeart in {timeout_sec} seconds
/// will be presumed as 'dead'. Default is 60 seconds.
pub fn MXKVStoreGetNumDeadNode(handle: KVStoreHandle,
node_id: c_int,
number: *mut c_int,
timeout_sec: c_int)
-> c_int;
/// Create a RecordIO writer object
///
/// - param: uri path to file
/// - param: out handle pointer to the created object
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOWriterCreate(uri: *const c_char, out: *mut RecordIOHandle) -> c_int;
/// Delete a RecordIO writer object
///
/// - param: handle handle to RecordIO object
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOWriterFree(handle: RecordIOHandle) -> c_int;
/// Write a record to a RecordIO object
///
/// - param: handle handle to RecordIO object
/// - param: buf buffer to write
/// - param: size size of buffer
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOWriterWriteRecord(handle: *mut RecordIOHandle,
buf: *const c_char,
size: size_t)
-> c_int;
/// Get the current writer pointer position
///
/// - param: handle handle to RecordIO object
/// - param: pos handle to output position
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOWriterTell(handle: *mut RecordIOHandle, pos: *mut size_t) -> c_int;
/// Create a RecordIO reader object
///
/// - param: uri path to file
/// - param: out handle pointer to the created object
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOReaderCreate(uri: *const c_char, out: *mut RecordIOHandle) -> c_int;
/// Delete a RecordIO reader object
///
/// - param: handle handle to RecordIO object
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOReaderFree(handle: *mut RecordIOHandle) -> c_int;
/// Write a record to a RecordIO object
///
/// - param: handle handle to RecordIO object
/// - param: buf pointer to return buffer
/// - param: size point to size of buffer
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOReaderReadRecord(handle: *mut RecordIOHandle,
buf: *mut *const c_char,
size: *mut size_t)
-> c_int;
/// Set the current reader pointer position
///
/// - param: handle handle to RecordIO object
/// - param: pos target position
/// - return: 0 when success, -1 when failure happens
pub fn MXRecordIOReaderSeek(handle: *mut RecordIOHandle, pos: size_t) -> c_int;
/// Create a MXRtc object
pub fn MXRtcCreate(name: *mut c_char,
num_input: mx_uint,
num_output: mx_uint,
input_names: *mut *mut c_char,
output_names: *mut *mut c_char,
inputs: *mut NDArrayHandle,
outputs: *mut NDArrayHandle,
kernel: *mut c_char,
out: *mut RtcHandle)
-> c_int;
/// Run cuda kernel
pub fn MXRtcPush(handle: RtcHandle,
num_input: mx_uint,
num_output: mx_uint,
inputs: *mut NDArrayHandle,
outputs: *mut NDArrayHandle,
gridDimX: mx_uint,
gridDimY: mx_uint,
gridDimZ: mx_uint,
blockDimX: mx_uint,
blockDimY: mx_uint,
blockDimZ: mx_uint)
-> c_int;
/// Delete a MXRtc object
pub fn MXRtcFree(handle: RtcHandle) -> c_int;
pub fn MXOptimizerFindCreator(key: *const c_char, out: *mut OptimizerCreator) -> c_int;
pub fn MXOptimizerCreateOptimizer(creator: OptimizerCreator,
num_param: mx_uint,
keys: *const *const c_char,
vals: *const *const c_char,
out: *mut OptimizerHandle)
-> c_int;
pub fn MXOptimizerFree(handle: OptimizerHandle) -> c_int;
pub fn MXOptimizerUpdate(handle: OptimizerHandle,
index: c_int,
weight: NDArrayHandle,
grad: NDArrayHandle,
lr: mx_float,
wd: mx_float)
-> c_int;
pub fn MXCustomOpRegister(op_type: *const c_char, creator: CustomOpPropCreator) -> c_int;
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}<|fim▁end|> | /// get the shape of the array
/// |
<|file_name|>provider.go<|end_file_name|><|fim▁begin|>package test
import (
"github.com/tidepool-org/platform/config"
"github.com/tidepool-org/platform/log"
"github.com/tidepool-org/platform/version"
)
type Provider struct {
VersionReporterInvocations int
VersionReporterStub func() version.Reporter
VersionReporterOutputs []version.Reporter
VersionReporterOutput *version.Reporter
ConfigReporterInvocations int
ConfigReporterStub func() config.Reporter
ConfigReporterOutputs []config.Reporter
ConfigReporterOutput *config.Reporter
LoggerInvocations int
LoggerStub func() log.Logger
LoggerOutputs []log.Logger
LoggerOutput *log.Logger
PrefixInvocations int
PrefixStub func() string
PrefixOutputs []string
PrefixOutput *string
NameInvocations int
NameStub func() string
NameOutputs []string
NameOutput *string
UserAgentInvocations int
UserAgentStub func() string
UserAgentOutputs []string
UserAgentOutput *string
}
func NewProvider() *Provider {
return &Provider{}
}
func (p *Provider) VersionReporter() version.Reporter {
p.VersionReporterInvocations++
if p.VersionReporterStub != nil {
return p.VersionReporterStub()
}
if len(p.VersionReporterOutputs) > 0 {
output := p.VersionReporterOutputs[0]
p.VersionReporterOutputs = p.VersionReporterOutputs[1:]
return output
}
if p.VersionReporterOutput != nil {
return *p.VersionReporterOutput
}
panic("VersionReporter has no output")<|fim▁hole|>func (p *Provider) ConfigReporter() config.Reporter {
p.ConfigReporterInvocations++
if p.ConfigReporterStub != nil {
return p.ConfigReporterStub()
}
if len(p.ConfigReporterOutputs) > 0 {
output := p.ConfigReporterOutputs[0]
p.ConfigReporterOutputs = p.ConfigReporterOutputs[1:]
return output
}
if p.ConfigReporterOutput != nil {
return *p.ConfigReporterOutput
}
panic("ConfigReporter has no output")
}
func (p *Provider) Logger() log.Logger {
p.LoggerInvocations++
if p.LoggerStub != nil {
return p.LoggerStub()
}
if len(p.LoggerOutputs) > 0 {
output := p.LoggerOutputs[0]
p.LoggerOutputs = p.LoggerOutputs[1:]
return output
}
if p.LoggerOutput != nil {
return *p.LoggerOutput
}
panic("Logger has no output")
}
func (p *Provider) Prefix() string {
p.PrefixInvocations++
if p.PrefixStub != nil {
return p.PrefixStub()
}
if len(p.PrefixOutputs) > 0 {
output := p.PrefixOutputs[0]
p.PrefixOutputs = p.PrefixOutputs[1:]
return output
}
if p.PrefixOutput != nil {
return *p.PrefixOutput
}
panic("Prefix has no output")
}
func (p *Provider) Name() string {
p.NameInvocations++
if p.NameStub != nil {
return p.NameStub()
}
if len(p.NameOutputs) > 0 {
output := p.NameOutputs[0]
p.NameOutputs = p.NameOutputs[1:]
return output
}
if p.NameOutput != nil {
return *p.NameOutput
}
panic("Name has no output")
}
func (p *Provider) UserAgent() string {
p.UserAgentInvocations++
if p.UserAgentStub != nil {
return p.UserAgentStub()
}
if len(p.UserAgentOutputs) > 0 {
output := p.UserAgentOutputs[0]
p.UserAgentOutputs = p.UserAgentOutputs[1:]
return output
}
if p.UserAgentOutput != nil {
return *p.UserAgentOutput
}
panic("UserAgent has no output")
}
func (p *Provider) AssertOutputsEmpty() {
if len(p.VersionReporterOutputs) > 0 {
panic("VersionReporterOutputs is not empty")
}
if len(p.ConfigReporterOutputs) > 0 {
panic("ConfigReporterOutputs is not empty")
}
if len(p.LoggerOutputs) > 0 {
panic("LoggerOutputs is not empty")
}
if len(p.PrefixOutputs) > 0 {
panic("PrefixOutputs is not empty")
}
if len(p.NameOutputs) > 0 {
panic("NameOutputs is not empty")
}
if len(p.UserAgentOutputs) > 0 {
panic("UserAgentOutputs is not empty")
}
}<|fim▁end|> | }
|
<|file_name|>cookieeditor.js<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright The Closure Library Authors.
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Displays and edits the value of a cookie.
* Intended only for debugging.
*/
goog.provide('goog.ui.CookieEditor');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.dom.TagName');
goog.require('goog.events.EventType');
goog.require('goog.net.Cookies');
goog.require('goog.string');
goog.require('goog.style');
goog.require('goog.ui.Component');
goog.requireType('goog.events.Event');
/**
* Displays and edits the value of a cookie.
* @final
* @unrestricted
*/
goog.ui.CookieEditor = class extends goog.ui.Component {
/**
* @param {goog.dom.DomHelper=} opt_domHelper Optional DOM helper.
*/
constructor(opt_domHelper) {
'use strict';
super(opt_domHelper);
}
/**
* Sets the cookie which this component will edit.
* @param {string} cookieKey Cookie key.
*/
selectCookie(cookieKey) {
'use strict';
goog.asserts.assert(goog.net.Cookies.getInstance().isValidName(cookieKey));
this.cookieKey_ = cookieKey;
if (this.textAreaElem_) {
this.textAreaElem_.value =
goog.net.Cookies.getInstance().get(cookieKey) || '';
}
}
/** @override */
canDecorate() {
'use strict';
return false;
}
/** @override */
createDom() {
'use strict';
// Debug-only, so we don't need i18n.
this.clearButtonElem_ = goog.dom.createDom(
goog.dom.TagName.BUTTON, /* attributes */ null, 'Clear');
this.updateButtonElem_ = goog.dom.createDom(
goog.dom.TagName.BUTTON, /* attributes */ null, 'Update');
var value =
this.cookieKey_ && goog.net.Cookies.getInstance().get(this.cookieKey_);
this.textAreaElem_ = goog.dom.createDom(
goog.dom.TagName.TEXTAREA, /* attibutes */ null, value || '');
this.valueWarningElem_ = goog.dom.createDom(
goog.dom.TagName.SPAN,
/* attibutes */ {'style': 'display:none;color:red'},
'Invalid cookie value.');
this.setElementInternal(goog.dom.createDom(
goog.dom.TagName.DIV,
/* attibutes */ null, this.valueWarningElem_,
goog.dom.createDom(goog.dom.TagName.BR), this.textAreaElem_,
goog.dom.createDom(goog.dom.TagName.BR), this.clearButtonElem_,
this.updateButtonElem_));
}
/** @override */
enterDocument() {
'use strict';
super.enterDocument();
this.getHandler().listen(
this.clearButtonElem_, goog.events.EventType.CLICK, this.handleClear_);
this.getHandler().listen(
this.updateButtonElem_, goog.events.EventType.CLICK,
this.handleUpdate_);
}
/**
* Handles user clicking clear button.
* @param {!goog.events.Event} e The click event.
* @private
*/
handleClear_(e) {
'use strict';
if (this.cookieKey_) {<|fim▁hole|> }
/**
* Handles user clicking update button.
* @param {!goog.events.Event} e The click event.
* @private
*/
handleUpdate_(e) {
'use strict';
if (this.cookieKey_) {
var value = this.textAreaElem_.value;
if (value) {
// Strip line breaks.
value = goog.string.stripNewlines(value);
}
if (goog.net.Cookies.getInstance().isValidValue(value)) {
goog.net.Cookies.getInstance().set(this.cookieKey_, value);
goog.style.setElementShown(this.valueWarningElem_, false);
} else {
goog.style.setElementShown(this.valueWarningElem_, true);
}
}
}
/** @override */
disposeInternal() {
'use strict';
this.clearButtonElem_ = null;
this.cookieKey_ = null;
this.textAreaElem_ = null;
this.updateButtonElem_ = null;
this.valueWarningElem_ = null;
}
};
/**
* Cookie key.
* @type {?string}
* @private
*/
goog.ui.CookieEditor.prototype.cookieKey_;
/**
* Text area.
* @type {HTMLTextAreaElement}
* @private
*/
goog.ui.CookieEditor.prototype.textAreaElem_;
/**
* Clear button.
* @type {HTMLButtonElement}
* @private
*/
goog.ui.CookieEditor.prototype.clearButtonElem_;
/**
* Invalid value warning text.
* @type {HTMLSpanElement}
* @private
*/
goog.ui.CookieEditor.prototype.valueWarningElem_;
/**
* Update button.
* @type {HTMLButtonElement}
* @private
*/
goog.ui.CookieEditor.prototype.updateButtonElem_;
// TODO(user): add combobox for user to select different cookies<|fim▁end|> | goog.net.Cookies.getInstance().remove(this.cookieKey_);
}
this.textAreaElem_.value = ''; |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>module.exports = {
getOne: require('./get-one'),
getAllByUser: require('./get-all-by-user'),<|fim▁hole|> delete: require('./delete'),
testProjects: require('./test-projects')
};<|fim▁end|> |
create: require('./create'),
update: require('./update'), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.