prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>13-For your lists.py<|end_file_name|><|fim▁begin|>numbers = [7, 9, 12, 54, 99]
print "This list contains: "<|fim▁hole|> print num
# Add your loop below!
for num in numbers:
print num ** 2<|fim▁end|> |
for num in numbers: |
<|file_name|>standarditem.cpp<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2009-10 Qtrac Ltd. All rights reserved.
This program or module is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version. It is provided
for educational purposes and is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
*/
#include "standarditem.hpp"
StandardItem::StandardItem(const QString &text, bool done)
: QStandardItem(text)
{
setCheckable(true);
<|fim▁hole|> setFlags(Qt::ItemIsSelectable|Qt::ItemIsEnabled|
Qt::ItemIsEditable|Qt::ItemIsUserCheckable);
m_today = new QStandardItem;
m_today->setFlags(Qt::ItemIsSelectable|Qt::ItemIsEnabled);
m_today->setTextAlignment(Qt::AlignVCenter|Qt::AlignRight);
m_total = new QStandardItem;
m_total->setFlags(Qt::ItemIsSelectable|Qt::ItemIsEnabled);
m_total->setTextAlignment(Qt::AlignVCenter|Qt::AlignRight);
}
void StandardItem::incrementLastEndTime(int msec)
{
Q_ASSERT(!m_dateTimes.isEmpty());
QDateTime &endTime = m_dateTimes.last().second;
endTime.setTime(endTime.time().addMSecs(msec));
}
QString StandardItem::todaysTime() const
{
int minutes = minutesForTask(true);
return QString("%1:%2").arg(minutes / 60)
.arg(minutes % 60, 2, 10, QChar('0'));
}
QString StandardItem::totalTime() const
{
int minutes = minutesForTask(false);
return QString("%1:%2").arg(minutes / 60)
.arg(minutes % 60, 2, 10, QChar('0'));
}
int StandardItem::minutesForTask(bool onlyForToday) const
{
int minutes = 0;
QListIterator<QPair<QDateTime, QDateTime> > i(m_dateTimes);
while (i.hasNext()) {
const QPair<QDateTime, QDateTime> &dateTime = i.next();
if (onlyForToday &&
dateTime.first.date() != QDate::currentDate())
continue;
minutes += (dateTime.first.secsTo(dateTime.second) / 60);
}
for (int row = 0; row < rowCount(); ++row) {
StandardItem *item = static_cast<StandardItem*>(child(row,
0));
Q_ASSERT(item);
minutes += item->minutesForTask(onlyForToday);
}
return minutes;
}<|fim▁end|> | setCheckState(done ? Qt::Checked : Qt::Unchecked);
|
<|file_name|>generate_test_mask_image.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
generate_test_mask_image.py
---------------------
Date : February 2015
Copyright : (C) 2015 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2015'
__copyright__ = '(C) 2015, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
# Generates (or updates) a unit test image mask, which is used to specify whether
# a pixel in the control image should be checked (black pixel in mask) or not (white
# pixel in mask). For non black or white pixels, the pixels lightness is used to
# specify a maximum delta for each color component
import os
import sys
import argparse
from PyQt5.QtGui import QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb
import struct
import urllib.request, urllib.error, urllib.parse
import glob
def error(msg):
print(msg)<|fim▁hole|>def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
if (path[:7] == 'http://' or path[:7] == 'file://' or path[:8] == 'https://'):
#fetch remote image
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
image = QImage(path)
return image
def getControlImagePath(path):
if os.path.isfile(path):
return path
#else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0] for x in os.walk(control_images_folder) if path in x[0]]
if len(matching_control_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(matching_control_images) == 0:
error('No matching control images found for {}'.format(path))
found_control_image_path = matching_control_images[0]
#check for a single matching expected image
images = glob.glob(os.path.join(found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(path))
found_image = filtered_images[0]
print('Found matching control image: {}'.format(found_image))
return found_image
def updateMask(control_image_path, rendered_image_path, mask_image_path):
control_image = imageFromPath(control_image_path)
if not control_image:
error('Could not read control image {}'.format(control_image_path))
rendered_image = imageFromPath(rendered_image_path)
if not rendered_image:
error('Could not read rendered image {}'.format(rendered_image_path))
if not rendered_image.width() == control_image.width() or not rendered_image.height() == control_image.height():
print(('Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(control_image.width(),
control_image.height(),
rendered_image.width(),
rendered_image.height())))
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
#read current mask, if it exist
mask_image = imageFromPath(mask_image_path)
if mask_image.isNull():
print('Mask image does not exist, creating {}'.format(mask_image_path))
mask_image = QImage(control_image.width(), control_image.height(), QImage.Format_ARGB32)
mask_image.fill(QColor(0, 0, 0))
#loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
#ignore pixel
continue
expected_rgb = struct.unpack('I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack('I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
#update mask image
mask_image.setPixel(x, y, qRgb(difference, difference, difference))
mismatch_count += 1
if mismatch_count:
#update mask
mask_image.save(mask_image_path, "png")
print('Updated {} pixels in {}'.format(mismatch_count, mask_image_path))
else:
print('No mismatches in {}'.format(mask_image_path))
parser = argparse.ArgumentParser() # OptionParser("usage: %prog control_image rendered_image mask_image")
parser.add_argument('control_image')
parser.add_argument('rendered_image')
parser.add_argument('mask_image', nargs='?', default=None)
args = parser.parse_args()
args.control_image = getControlImagePath(args.control_image)
if not args.mask_image:
args.mask_image = args.control_image[:-4] + '_mask.png'
updateMask(args.control_image, args.rendered_image, args.mask_image)<|fim▁end|> | sys.exit(1)
|
<|file_name|>StreamRDFLib.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.riot.system;
import java.io.OutputStream ;
import java.io.Writer ;
import org.apache.jena.atlas.io.AWriter ;
import org.apache.jena.atlas.io.IO ;
import org.apache.jena.atlas.lib.CharSpace ;
import org.apache.jena.atlas.lib.Sink ;
import org.apache.jena.graph.Graph ;
import org.apache.jena.graph.Node ;
import org.apache.jena.graph.Triple ;
import org.apache.jena.riot.lang.StreamRDFCounting ;
import org.apache.jena.riot.writer.WriterStreamRDFPlain ;
import org.apache.jena.shared.JenaException ;
import org.apache.jena.shared.PrefixMapping ;
import org.apache.jena.sparql.core.DatasetGraph ;
import org.apache.jena.sparql.core.Quad ;
/** Various Common StreamRDF setups */
public class StreamRDFLib
{
/** Send everything to nowhere ... efficiently */
public static StreamRDF sinkNull() { return new StreamRDFBase() ; }
public static StreamRDF writer(OutputStream out) { return new WriterStreamRDFPlain(IO.wrapUTF8(out)) ; }
public static StreamRDF writer(AWriter out) { return new WriterStreamRDFPlain(out) ; }
public static StreamRDF writer(Writer out) { return new WriterStreamRDFPlain(IO.wrap(out)) ; }
public static StreamRDF writer(OutputStream out, CharSpace charSpace)
{
switch (charSpace) {
case ASCII:
return new WriterStreamRDFPlain(IO.wrapASCII(out), charSpace);
case UTF8:
default:
return writer(out);
}
}
public static StreamRDF writer(AWriter out, CharSpace charSpace)
{
return new WriterStreamRDFPlain(out, charSpace);
}
public static StreamRDF writer(Writer out, CharSpace charSpace)
{
return new WriterStreamRDFPlain(IO.wrap(out), charSpace);
}
public static StreamRDF graph(Graph graph) { return new ParserOutputGraph(graph) ; }
public static StreamRDF dataset(DatasetGraph dataset) { return new ParserOutputDataset(dataset) ; }
/**
* Output to a sink; prefix and base handled only within the parser.
* Unfortunately, Java needs different names for the triples and
* quads versions because of type erasure.
*/
public static StreamRDF sinkTriples(Sink<Triple> sink) { return new ParserOutputSinkTriples(sink) ; }
/**
* Output to a sink; prefix and base handled only within the parser.
* Unfortunately, Java needs different names for the triples and
* quads versions because of type erasure.
*/
public static StreamRDF sinkQuads(Sink<Quad> sink) { return new ParserOutputSinkQuads(sink) ; }
/** Convert any triples seen to a quads, adding a graph node of {@link Quad#tripleInQuad} */
public static StreamRDF extendTriplesToQuads(StreamRDF base)
{ return extendTriplesToQuads(Quad.tripleInQuad, base) ; }
/** Convert any triples seen to a quads, adding the specified graph node */
public static StreamRDF extendTriplesToQuads(Node graphNode, StreamRDF base)
{ return new ParserOutputSinkTriplesToQuads(graphNode, base) ; }
public static StreamRDFCounting count()
{ return new StreamRDFCountingBase(sinkNull()) ; }
public static StreamRDFCounting count(StreamRDF other)<|fim▁hole|> {
private final Node gn ;
ParserOutputSinkTriplesToQuads(Node gn, StreamRDF base)
{ super(base) ; this.gn = gn ; }
@Override public void triple(Triple triple)
{ other.quad(new Quad(gn, triple)) ; }
}
private static class ParserOutputSinkTriples extends StreamRDFBase
{
private final Sink<Triple> sink ;
public ParserOutputSinkTriples(Sink<Triple> sink)
{ this.sink = sink ; }
@Override
public void triple(Triple triple)
{ sink.send(triple) ; }
@Override
public void finish()
{ sink.flush() ; }
}
private static class ParserOutputSinkQuads extends StreamRDFBase
{
private final Sink<Quad> sink ;
public ParserOutputSinkQuads(Sink<Quad> sink)
{ this.sink = sink ; }
@Override
public void quad(Quad quad)
{ sink.send(quad) ; }
@Override
public void finish()
{ sink.flush() ; }
}
private static class ParserOutputGraph extends StreamRDFBase
{
protected final Graph graph ;
protected boolean warningIssued = false ;
public ParserOutputGraph(Graph graph) { this.graph = graph ; }
@Override public void triple(Triple triple) { graph.add(triple) ; }
@Override public void quad(Quad quad)
{
if ( quad.isTriple() || quad.isDefaultGraph() )
graph.add(quad.asTriple()) ;
else
{
if ( ! warningIssued )
{
//SysRIOT.getLogger().warn("Only triples or default graph data expected : named graph data ignored") ;
// Not ideal - assumes the global default.
ErrorHandlerFactory.getDefaultErrorHandler().warning("Only triples or default graph data expected : named graph data ignored", -1, -1) ;
}
warningIssued = true ;
}
//throw new IllegalStateException("Quad passed to graph parsing") ;
}
@Override public void base(String base)
{ }
@Override public void prefix(String prefix, String uri)
{
try { // Jena applies XML rules to prerfixes.
graph.getPrefixMapping().setNsPrefix(prefix, uri) ;
} catch (JenaException ex) {}
}
}
private static class ParserOutputDataset extends StreamRDFBase
{
protected final DatasetGraph dsg ;
protected final PrefixMapping prefixMapping ;
public ParserOutputDataset(DatasetGraph dsg)
{
this.dsg = dsg ;
this.prefixMapping = dsg.getDefaultGraph().getPrefixMapping() ;
// = dsg.getPrefixMapping().setNsPrefix(prefix, uri) ;
}
@Override public void triple(Triple triple)
{
dsg.add(Quad.defaultGraphNodeGenerated, triple.getSubject(), triple.getPredicate(), triple.getObject()) ;
//throw new IllegalStateException("Triple passed to dataset parsing") ;
}
@Override public void quad(Quad quad)
{
if ( quad.isTriple() )
dsg.add(Quad.defaultGraphNodeGenerated, quad.getSubject(), quad.getPredicate(), quad.getObject()) ;
else
dsg.add(quad) ;
}
@Override public void base(String base)
{ }
@Override public void prefix(String prefix, String uri)
{
try { // Jena applies XML rules to prerfixes.
prefixMapping.setNsPrefix(prefix, uri) ;
} catch (JenaException ex) {}
}
}
}<|fim▁end|> | { return new StreamRDFCountingBase(other) ; }
private static class ParserOutputSinkTriplesToQuads extends StreamRDFWrapper |
<|file_name|>watching.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# - * - mode: python; coding: utf-8 - * -<|fim▁hole|># Copyright (C) 2013 Andrey Degtyarev <[email protected]>
# This program is distributed licensed under the GNU General Public License v.3
# as published by the Free Software Foundation.
import manage_pressure.constants, manage_pressure.work_device, time
def control(motor_id, pressure_1_id, pressure_2_id):
devices = manage_pressure.work_device.WorkDevice(motor_id, pressure_1_id, pressure_2_id)
while 1:
devices.check()
devices.action()
time.sleep(manage_pressure.constants.TIME_REQUEST_DEVICE)<|fim▁end|> | |
<|file_name|>TableSpeedSearch.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2010 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui;
import com.intellij.util.PairFunction;
import com.intellij.util.containers.Convertor;
import javax.swing.*;
import javax.swing.table.TableModel;
import java.util.ListIterator;
public class TableSpeedSearch extends SpeedSearchBase<JTable> {
private static final PairFunction<Object, Cell, String> TO_STRING = new PairFunction<Object, Cell, String>() {
public String fun(Object o, Cell cell) {
return o == null ? "" : o.toString();
}
};
private final PairFunction<Object, Cell, String> myToStringConvertor;
public TableSpeedSearch(JTable table) {<|fim▁hole|> this(table, new PairFunction<Object, Cell, String>() {
@Override
public String fun(Object o, Cell c) {
return toStringConvertor.convert(o);
}
});
}
public TableSpeedSearch(JTable table, final PairFunction<Object, Cell, String> toStringConvertor) {
super(table);
myToStringConvertor = toStringConvertor;
}
protected boolean isSpeedSearchEnabled() {
return !getComponent().isEditing() && super.isSpeedSearchEnabled();
}
@Override
protected ListIterator<Object> getElementIterator(int startingIndex) {
return new MyListIterator(startingIndex);
}
protected int getElementCount() {
final TableModel tableModel = myComponent.getModel();
return tableModel.getRowCount() * tableModel.getColumnCount();
}
protected void selectElement(Object element, String selectedText) {
final int index = ((Integer)element).intValue();
final TableModel model = myComponent.getModel();
final int row = index / model.getColumnCount();
final int col = index % model.getColumnCount();
myComponent.getSelectionModel().setSelectionInterval(row, row);
myComponent.getColumnModel().getSelectionModel().setSelectionInterval(col, col);
TableUtil.scrollSelectionToVisible(myComponent);
}
protected int getSelectedIndex() {
final int row = myComponent.getSelectedRow();
final int col = myComponent.getSelectedColumn();
// selected row is not enough as we want to select specific cell in a large multi-column table
return row > -1 && col > -1 ? row * myComponent.getModel().getColumnCount() + col : -1;
}
protected Object[] getAllElements() {
throw new UnsupportedOperationException("Not implemented");
}
protected String getElementText(Object element) {
final int index = ((Integer)element).intValue();
final TableModel model = myComponent.getModel();
int row = myComponent.convertRowIndexToModel(index / model.getColumnCount());
int col = myComponent.convertColumnIndexToModel(index % model.getColumnCount());
Object value = model.getValueAt(row, col);
return myToStringConvertor.fun(value, new Cell(row, col));
}
private class MyListIterator implements ListIterator<Object> {
private int myCursor;
public MyListIterator(int startingIndex) {
final int total = getElementCount();
myCursor = startingIndex < 0 ? total : startingIndex;
}
public boolean hasNext() {
return myCursor < getElementCount();
}
public Object next() {
return myCursor++;
}
public boolean hasPrevious() {
return myCursor > 0;
}
public Object previous() {
return (myCursor--) - 1;
}
public int nextIndex() {
return myCursor;
}
public int previousIndex() {
return myCursor - 1;
}
public void remove() {
throw new AssertionError("Not Implemented");
}
public void set(Object o) {
throw new AssertionError("Not Implemented");
}
public void add(Object o) {
throw new AssertionError("Not Implemented");
}
}
}<|fim▁end|> | this(table, TO_STRING);
}
public TableSpeedSearch(JTable table, final Convertor<Object, String> toStringConvertor) { |
<|file_name|>RequestPlaybackDialog.ts<|end_file_name|><|fim▁begin|>import { Component, Inject } from '@angular/core';
import { FormControl, FormGroup, Validators } from '@angular/forms';
import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
import { BehaviorSubject } from 'rxjs';
import { Gap, PlaybackRange } from '../client';
import { YamcsService } from '../core/services/YamcsService';
import { Option } from '../shared/forms/Select';
import * as utils from '../shared/utils';
@Component({
selector: 'app-request-playback-dialog',
templateUrl: './RequestPlaybackDialog.html',
})
export class RequestPlaybackDialog {
gaps: Gap[];
linkOptions$ = new BehaviorSubject<Option[]>([]);
form = new FormGroup({
mergeTolerance: new FormControl(30),
link: new FormControl('', Validators.required)
});
constructor(
private dialogRef: MatDialogRef<RequestPlaybackDialog>,
private yamcs: YamcsService,
@Inject(MAT_DIALOG_DATA) readonly data: any,
) {
this.gaps = this.data.gaps;
this.yamcs.yamcsClient.getLinks(yamcs.instance!).then(links => {
const linkOptions = [];
for (const link of links) {
if (link.type.indexOf('DassPlaybackPacketProvider') !== -1) {
linkOptions.push({
id: link.name,
label: link.name,
});
}
}
this.linkOptions$.next(linkOptions);
if (linkOptions.length) {
this.form.get('link')!.setValue(linkOptions[0].id);
}
});<|fim▁hole|> const ranges = [];
const rangeCache = new Map<number, PlaybackRange>();
const tolerance = this.form.value['mergeTolerance'] * 60 * 1000;
for (const gap of this.gaps) {
const prev = rangeCache.get(gap.apid);
if (prev && (this.toMillis(gap.start) - this.toMillis(prev.stop)) < tolerance) {
prev.stop = gap.stop;
} else {
const range = {
apid: gap.apid,
start: gap.start,
stop: gap.stop,
};
ranges.push(range);
rangeCache.set(gap.apid, range);
}
}
this.dialogRef.close({
link: this.form.value['link'],
ranges,
});
}
private toMillis(dateString: string) {
return utils.toDate(dateString).getTime();
}
}<|fim▁end|> | }
sendRequest() { |
<|file_name|>li_boost_shared_ptr_runme.py<|end_file_name|><|fim▁begin|>import li_boost_shared_ptr
import gc
debug = False
# simple shared_ptr usage - created in C++
class li_boost_shared_ptr_runme:
def main(self):
if (debug):
print "Started"
li_boost_shared_ptr.cvar.debug_shared = debug
# Change loop count to run for a long time to monitor memory
loopCount = 1 #5000
for i in range (0,loopCount):
self.runtest()
# Expect 1 instance - the one global variable (GlobalValue)
if (li_boost_shared_ptr.Klass.getTotal_count() != 1):
raise RuntimeError("Klass.total_count=%s" % li_boost_shared_ptr.Klass.getTotal_count())
wrapper_count = li_boost_shared_ptr.shared_ptr_wrapper_count()
if (wrapper_count != li_boost_shared_ptr.NOT_COUNTING):
# Expect 1 instance - the one global variable (GlobalSmartValue)
if (wrapper_count != 1):
raise RuntimeError("shared_ptr wrapper count=%s" % wrapper_count)
if (debug):
print "Finished"
def runtest(self):
# simple shared_ptr usage - created in C++
k = li_boost_shared_ptr.Klass("me oh my")
val = k.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(1, k)
# simple shared_ptr usage - not created in C++
k = li_boost_shared_ptr.factorycreate()
val = k.getValue()
self.verifyValue("factorycreate", val)
self.verifyCount(1, k)
# pass by shared_ptr
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointertest(k)
val = kret.getValue()<|fim▁hole|> self.verifyCount(2, kret)
# pass by shared_ptr pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointertest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerreftest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointerreftest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by value
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.valuetest(k)
val = kret.getValue()
self.verifyValue("me oh my valuetest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.pointertest(k)
val = kret.getValue()
self.verifyValue("me oh my pointertest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.reftest(k)
val = kret.getValue()
self.verifyValue("me oh my reftest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.pointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my pointerreftest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# null tests
k = None
if (li_boost_shared_ptr.smartpointertest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerpointertest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerreftest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerpointerreftest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.nullsmartpointerpointertest(None) != "null pointer"):
raise RuntimeError("not null smartpointer pointer")
try:
li_boost_shared_ptr.valuetest(k)
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
if (li_boost_shared_ptr.pointertest(k) != None):
raise RuntimeError("return was not null")
try:
li_boost_shared_ptr.reftest(k)
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# $owner
k = li_boost_shared_ptr.pointerownertest()
val = k.getValue()
self.verifyValue("pointerownertest", val)
self.verifyCount(1, k)
k = li_boost_shared_ptr.smartpointerpointerownertest()
val = k.getValue()
self.verifyValue("smartpointerpointerownertest", val)
self.verifyCount(1, k)
# //////////////////////////////// Derived class ////////////////////////////////////////
# derived pass by shared_ptr
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrtest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrtest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr pointer
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr pointer ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by pointer
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedpointertest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# derived pass by ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedreftest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# //////////////////////////////// Derived and base class mixed ////////////////////////////////////////
# pass by shared_ptr (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr reference (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer reference (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by value (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.valuetest(k)
val = kret.getValue()
self.verifyValue("me oh my valuetest", val) # note slicing
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.pointertest(k)
val = kret.getValue()
self.verifyValue("me oh my pointertest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by ref (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.reftest(k)
val = kret.getValue()
self.verifyValue("me oh my reftest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# //////////////////////////////// Overloading tests ////////////////////////////////////////
# Base class
k = li_boost_shared_ptr.Klass("me oh my")
self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval")
self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval")
self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref")
# Derived class
k = li_boost_shared_ptr.KlassDerived("me oh my")
self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval")
self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval")
self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref")
# 3rd derived class
k = li_boost_shared_ptr.Klass3rdDerived("me oh my")
val = k.getValue()
self.verifyValue("me oh my-3rdDerived", val)
self.verifyCount(1, k)
val = li_boost_shared_ptr.test3rdupcast(k)
self.verifyValue("me oh my-3rdDerived", val)
self.verifyCount(1, k)
# //////////////////////////////// Member variables ////////////////////////////////////////
# smart pointer by value
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member value")
m.SmartMemberValue = k
val = k.getValue()
self.verifyValue("smart member value", val)
self.verifyCount(2, k)
kmember = m.SmartMemberValue
val = kmember.getValue()
self.verifyValue("smart member value", val)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
del m
self.verifyCount(2, kmember)
self.verifyCount(2, k)
# smart pointer by pointer
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member pointer")
m.SmartMemberPointer = k
val = k.getValue()
self.verifyValue("smart member pointer", val)
self.verifyCount(1, k)
kmember = m.SmartMemberPointer
val = kmember.getValue()
self.verifyValue("smart member pointer", val)
self.verifyCount(2, kmember)
self.verifyCount(2, k)
del m
self.verifyCount(2, kmember)
self.verifyCount(2, k)
# smart pointer by reference
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member reference")
m.SmartMemberReference = k
val = k.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(2, k)
kmember = m.SmartMemberReference
val = kmember.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
# The C++ reference refers to SmartMemberValue...
kmemberVal = m.SmartMemberValue
val = kmember.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(4, kmemberVal)
self.verifyCount(4, kmember)
self.verifyCount(4, k)
del m
self.verifyCount(3, kmemberVal)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
# plain by value
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member value")
m.MemberValue = k
val = k.getValue()
self.verifyValue("plain member value", val)
self.verifyCount(1, k)
kmember = m.MemberValue
val = kmember.getValue()
self.verifyValue("plain member value", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# plain by pointer
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member pointer")
m.MemberPointer = k
val = k.getValue()
self.verifyValue("plain member pointer", val)
self.verifyCount(1, k)
kmember = m.MemberPointer
val = kmember.getValue()
self.verifyValue("plain member pointer", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# plain by reference
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member reference")
m.MemberReference = k
val = k.getValue()
self.verifyValue("plain member reference", val)
self.verifyCount(1, k)
kmember = m.MemberReference
val = kmember.getValue()
self.verifyValue("plain member reference", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# null member variables
m = li_boost_shared_ptr.MemberVariables()
# shared_ptr by value
k = m.SmartMemberValue
if (k != None):
raise RuntimeError("expected null")
m.SmartMemberValue = None
k = m.SmartMemberValue
if (k != None):
raise RuntimeError("expected null")
self.verifyCount(0, k)
# plain by value
try:
m.MemberValue = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# ////////////////////////////////// Global variables ////////////////////////////////////////
# smart pointer
kglobal = li_boost_shared_ptr.cvar.GlobalSmartValue
if (kglobal != None):
raise RuntimeError("expected null")
k = li_boost_shared_ptr.Klass("smart global value")
li_boost_shared_ptr.cvar.GlobalSmartValue = k
self.verifyCount(2, k)
kglobal = li_boost_shared_ptr.cvar.GlobalSmartValue
val = kglobal.getValue()
self.verifyValue("smart global value", val)
self.verifyCount(3, kglobal)
self.verifyCount(3, k)
self.verifyValue("smart global value", li_boost_shared_ptr.cvar.GlobalSmartValue.getValue())
li_boost_shared_ptr.cvar.GlobalSmartValue = None
# plain value
k = li_boost_shared_ptr.Klass("global value")
li_boost_shared_ptr.cvar.GlobalValue = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalValue
val = kglobal.getValue()
self.verifyValue("global value", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
self.verifyValue("global value", li_boost_shared_ptr.cvar.GlobalValue.getValue())
try:
li_boost_shared_ptr.cvar.GlobalValue = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# plain pointer
kglobal = li_boost_shared_ptr.cvar.GlobalPointer
if (kglobal != None):
raise RuntimeError("expected null")
k = li_boost_shared_ptr.Klass("global pointer")
li_boost_shared_ptr.cvar.GlobalPointer = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalPointer
val = kglobal.getValue()
self.verifyValue("global pointer", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
li_boost_shared_ptr.cvar.GlobalPointer = None
# plain reference
kglobal
k = li_boost_shared_ptr.Klass("global reference")
li_boost_shared_ptr.cvar.GlobalReference = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalReference
val = kglobal.getValue()
self.verifyValue("global reference", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
try:
li_boost_shared_ptr.cvar.GlobalReference = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# ////////////////////////////////// Templates ////////////////////////////////////////
pid = li_boost_shared_ptr.PairIntDouble(10, 20.2)
if (pid.baseVal1 != 20 or pid.baseVal2 != 40.4):
raise RuntimeError("Base values wrong")
if (pid.val1 != 10 or pid.val2 != 20.2):
raise RuntimeError("Derived Values wrong")
def verifyValue(self, expected, got):
if (expected != got):
raise RuntimeError("verify value failed. Expected: ", expected, " Got: ", got)
def verifyCount(self, expected, k):
got = li_boost_shared_ptr.use_count(k)
if (expected != got):
raise RuntimeError("verify use_count failed. Expected: ", expected, " Got: ", got)
runme = li_boost_shared_ptr_runme()
runme.main()<|fim▁end|> | self.verifyValue("me oh my smartpointertest", val)
self.verifyCount(2, k) |
<|file_name|>query-builder.no.js<|end_file_name|><|fim▁begin|>/*!
* jQuery QueryBuilder 2.3.0
* Locale: Norwegian (no)
* Author: Jna Borup Coyle, [email protected]
* Licensed under MIT (http://opensource.org/licenses/MIT)
*/
(function(root, factory) {
if (typeof define === 'function' && define.amd) {
define(['jquery', 'query-builder'], factory);
}
else {
factory(root.jQuery);
}
}(this, function($) {
"use strict";
var QueryBuilder = $.fn.queryBuilder;
QueryBuilder.regional['no'] = {
"__locale": "Norwegian (no)",
"__author": "Jna Borup Coyle, [email protected]",
"add_rule": "Legg til regel",
"add_group": "Legg til gruppe",
"delete_rule": "Slett regel",
"delete_group": "Slett gruppe",
"conditions": {
"AND": "OG",
"OR": "ELLER"<|fim▁hole|> "not_equal": "er ikke lik",
"in": "finnes i",
"not_in": "finnes ikke i",
"less": "er mindre enn",
"less_or_equal": "er mindre eller lik",
"greater": "er større enn",
"greater_or_equal": "er større eller lik",
"begins_with": "begynner med",
"not_begins_with": "begynner ikke med",
"contains": "inneholder",
"not_contains": "inneholder ikke",
"ends_with": "slutter med",
"not_ends_with": "slutter ikke med",
"is_empty": "er tom",
"is_not_empty": "er ikke tom",
"is_null": "er null",
"is_not_null": "er ikke null"
}
};
QueryBuilder.defaults({ lang_code: 'no' });
}));<|fim▁end|> | },
"operators": {
"equal": "er lik", |
<|file_name|>cast-enum-with-dtor.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
#![feature(const_fn)]
// check dtor calling order when casting enums.
use std::sync::atomic;
use std::sync::atomic::Ordering;
use std::mem;
enum E {
A = 0,
B = 1,
C = 2
}
static FLAG: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
impl Drop for E {
fn drop(&mut self) {
// avoid dtor loop
unsafe { mem::forget(mem::replace(self, E::B)) };
FLAG.store(FLAG.load(Ordering::SeqCst)+1, Ordering::SeqCst);
}
}
fn main() {
assert_eq!(FLAG.load(Ordering::SeqCst), 0);
{
let e = E::C;
assert_eq!(e as u32, 2);
assert_eq!(FLAG.load(Ordering::SeqCst), 0);
}
assert_eq!(FLAG.load(Ordering::SeqCst), 1);
}<|fim▁end|> | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
<|file_name|>default_actions.py<|end_file_name|><|fim▁begin|>from django.utils.translation import ugettext_lazy as _
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.reviews.actions import (BaseReviewRequestAction,
BaseReviewRequestMenuAction)
from reviewboard.reviews.features import general_comments_feature
from reviewboard.reviews.models import ReviewRequest
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.urls import diffviewer_url_names
class CloseMenuAction(BaseReviewRequestMenuAction):
"""A menu action for closing the corresponding review request."""
action_id = 'close-review-request-action'
label = _('Close')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(context['request'].user.pk == review_request.submitter_id or
(context['perms']['reviews']['can_change_status'] and
review_request.public)))
class SubmitAction(BaseReviewRequestAction):
"""An action for submitting the review request."""
action_id = 'submit-review-request-action'
label = _('Submitted')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['review_request'].public and
not is_site_read_only_for(context['request'].user))
class DiscardAction(BaseReviewRequestAction):
"""An action for discarding the review request."""
action_id = 'discard-review-request-action'
label = _('Discarded')
class DeleteAction(BaseReviewRequestAction):
"""An action for permanently deleting the review request."""
action_id = 'delete-review-request-action'
label = _('Delete Permanently')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['perms']['reviews']['delete_reviewrequest'] and
not is_site_read_only_for(context['request'].user))
class UpdateMenuAction(BaseReviewRequestMenuAction):
"""A menu action for updating the corresponding review request."""
action_id = 'update-review-request-action'
label = _('Update')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(user.pk == review_request.submitter_id or
context['perms']['reviews']['can_edit_reviewrequest']))
class UploadDiffAction(BaseReviewRequestAction):
"""An action for updating/uploading a diff for the review request."""
action_id = 'upload-diff-action'
def get_label(self, context):
"""Return this action's label.
The label will change depending on whether or not the corresponding
review request already has a diff.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The label that displays this action to the user.
"""
review_request = context['review_request']
draft = review_request.get_draft(context['request'].user)
if (draft and draft.diffset) or review_request.get_diffsets():
return _('Update Diff')
return _('Upload Diff')
def should_render(self, context):
"""Return whether or not this action should render.
If the corresponding review request has a repository, then an upload
diff form exists, so we should render this UploadDiffAction.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
return (context['review_request'].repository_id is not None and
not is_site_read_only_for(context['request'].user))
class UploadFileAction(BaseReviewRequestAction):
"""An action for uploading a file for the review request."""
action_id = 'upload-file-action'
label = _('Add File')
class DownloadDiffAction(BaseReviewRequestAction):
"""An action for downloading a diff from the review request."""
action_id = 'download-diff-action'
label = _('Download Diff')
def get_url(self, context):
"""Return this action's URL.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The URL to invoke if this action is clicked.
"""
match = context['request'].resolver_match
# We want to use a relative URL in the diff viewer as we will not be
# re-rendering the page when switching between revisions.
if match.url_name in diffviewer_url_names:
return 'raw/'
return local_site_reverse('raw-diff', context['request'], kwargs={
'review_request_id': context['review_request'].display_id,
})
def get_hidden(self, context):
"""Return whether this action should be initially hidden to the user.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
bool: Whether this action should be initially hidden to the user.
"""
match = context['request'].resolver_match
if match.url_name in diffviewer_url_names:
return match.url_name == 'view-interdiff'
return super(DownloadDiffAction, self).get_hidden(context)
def should_render(self, context):
"""Return whether or not this action should render.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
review_request = context['review_request']
request = context['request']
match = request.resolver_match
# If we're on a diff viewer page, then this DownloadDiffAction should
# initially be rendered, but possibly hidden.
if match.url_name in diffviewer_url_names:
return True
return review_request.repository_id is not None
class EditReviewAction(BaseReviewRequestAction):
"""An action for editing a review intended for the review request."""
action_id = 'review-action'
label = _('Review')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
class AddGeneralCommentAction(BaseReviewRequestAction):
"""An action for adding a new general comment to a review."""
action_id = 'general-comment-action'
label = _('Add General Comment')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
request = context['request']
user = request.user
return (user.is_authenticated and
not is_site_read_only_for(user) and
general_comments_feature.is_enabled(request=request))
class ShipItAction(BaseReviewRequestAction):
"""An action for quickly approving the review request without comments."""
action_id = 'ship-it-action'<|fim▁hole|> def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
def get_default_actions():
"""Return a copy of all the default actions.
Returns:
list of BaseReviewRequestAction: A copy of all the default actions.
"""
return [
CloseMenuAction([
SubmitAction(),
DiscardAction(),
DeleteAction(),
]),
UpdateMenuAction([
UploadDiffAction(),
UploadFileAction(),
]),
DownloadDiffAction(),
EditReviewAction(),
AddGeneralCommentAction(),
ShipItAction(),
]<|fim▁end|> | label = _('Ship It!')
|
<|file_name|>stdafx.cpp<|end_file_name|><|fim▁begin|>// stdafx.cpp : source file that includes just the standard includes
// TestConsoleW32.pch will be the pre-compiled header
// stdafx.obj will contain the pre-compiled type information
<|fim▁hole|>// TODO: reference any additional headers you need in STDAFX.H
// and not in this file<|fim▁end|> |
#include "stdafx.h"
|
<|file_name|>SubAccountIdentification11.go<|end_file_name|><|fim▁begin|>package iso20022
// Account to or from which a securities entry is made.
type SubAccountIdentification11 struct {
// Party that legally owns the account.
AccountOwner *PartyIdentification13Choice `xml:"AcctOwnr,omitempty"`
// Account to or from which a securities entry is made.
SafekeepingAccount *SecuritiesAccount14 `xml:"SfkpgAcct"`
// Indicates whether there is activity or information update reported in the statement.
ActivityIndicator *YesNoIndicator `xml:"ActvtyInd"`
// Net position of a segregated holding, in a single security, within the overall position held in a securities subaccount.
BalanceForSubAccount []*AggregateBalanceInformation9 `xml:"BalForSubAcct,omitempty"`
}<|fim▁hole|> s.AccountOwner = new(PartyIdentification13Choice)
return s.AccountOwner
}
func (s *SubAccountIdentification11) AddSafekeepingAccount() *SecuritiesAccount14 {
s.SafekeepingAccount = new(SecuritiesAccount14)
return s.SafekeepingAccount
}
func (s *SubAccountIdentification11) SetActivityIndicator(value string) {
s.ActivityIndicator = (*YesNoIndicator)(&value)
}
func (s *SubAccountIdentification11) AddBalanceForSubAccount() *AggregateBalanceInformation9 {
newValue := new(AggregateBalanceInformation9)
s.BalanceForSubAccount = append(s.BalanceForSubAccount, newValue)
return newValue
}<|fim▁end|> |
func (s *SubAccountIdentification11) AddAccountOwner() *PartyIdentification13Choice { |
<|file_name|>formpost.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Rackspace
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package middleware
import (
"context"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"time"
"github.com/troubling/hummingbird/common"
"github.com/troubling/hummingbird/common/conf"
"github.com/troubling/hummingbird/common/srv"
"github.com/uber-go/tally"
)
const (
FP_INVALID = iota
FP_ERROR
FP_EXPIRED
FP_SCOPE_ACCOUNT
FP_SCOPE_CONTAINER
)
type fpLimitReader struct {
io.Reader
l int64
r int64
}
func (o *fpLimitReader) overRead() bool {
return o.r > o.l
}
func (o *fpLimitReader) Read(p []byte) (int, error) {
i, err := o.Reader.Read(p)
o.r += int64(i)
if o.r > o.l {
return 0, errors.New("Read over limit")
}
return i, err
}
func authenticateFormpost(ctx context.Context, proxyCtx *ProxyContext, account, container, path string, attrs map[string]string) int {
if expires, err := common.ParseDate(attrs["expires"]); err != nil {
return FP_ERROR
} else if time.Now().After(expires) {
return FP_EXPIRED
}
sigb, err := hex.DecodeString(attrs["signature"])
if err != nil || len(sigb) == 0 {
return FP_ERROR
}
checkhmac := func(key []byte) bool {
mac := hmac.New(sha1.New, key)
fmt.Fprintf(mac, "%s\n%s\n%s\n%s\n%s", path, attrs["redirect"],
attrs["max_file_size"], attrs["max_file_count"], attrs["expires"])
return hmac.Equal(sigb, mac.Sum(nil))
}
if ai, err := proxyCtx.GetAccountInfo(ctx, account); err == nil {
if key, ok := ai.Metadata["Temp-Url-Key"]; ok && checkhmac([]byte(key)) {
return FP_SCOPE_ACCOUNT
} else if key, ok := ai.Metadata["Temp-Url-Key-2"]; ok && checkhmac([]byte(key)) {
return FP_SCOPE_ACCOUNT
} else if ci, err := proxyCtx.C.GetContainerInfo(ctx, account, container); err == nil {
if key, ok := ci.Metadata["Temp-Url-Key"]; ok && checkhmac([]byte(key)) {
return FP_SCOPE_CONTAINER
} else if key, ok := ci.Metadata["Temp-Url-Key-2"]; ok && checkhmac([]byte(key)) {
return FP_SCOPE_CONTAINER
}
}
}
return FP_INVALID
}
func formpostRespond(writer http.ResponseWriter, status int, message, redirect string) {
if redirect == "" {
body := fmt.Sprintf("<h1>%d %s</h1>FormPost: %s", status, http.StatusText(status), message)
writer.Header().Set("Content-Type", "text/html")
writer.Header().Set("Content-Length", strconv.FormatInt(int64(len(body)), 10))
writer.WriteHeader(status)
writer.Write([]byte(body))
} else {
glue := "?"
if strings.Contains(redirect, "?") {
glue = "&"
}
redir := fmt.Sprintf("%s%sstatus=%d&message=%s", redirect, glue, status, common.Urlencode(message))
body := fmt.Sprintf("<html><body><p><a href=\"%s\">Click to continue...</a></p></body></html>",
html.EscapeString(redir))
writer.Header().Set("Location", redir)
writer.Header().Set("Content-Length", strconv.Itoa(len(body)))
writer.WriteHeader(303)
io.WriteString(writer, body)
}
}
func formpostAuthorizer(scope int, account, container string) func(r *http.Request) (bool, int) {
return func(r *http.Request) (bool, int) {
ar, a, c, _ := getPathParts(r)
if scope == FP_SCOPE_ACCOUNT {
if ar && a == account {
return true, http.StatusOK
}
} else if scope == FP_SCOPE_CONTAINER {
if ar && a == account && c == container {
return true, http.StatusOK
}
}
return false, http.StatusForbidden
}
}
func formpost(formpostRequestsMetric tally.Counter) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if request.Method != "POST" {
next.ServeHTTP(writer, request)
return
}
contentType, params, err := mime.ParseMediaType(request.Header.Get("Content-Type"))
if err != nil || contentType != "multipart/form-data" || params["boundary"] == "" {
next.ServeHTTP(writer, request)
return
}
apiReq, account, container, _ := getPathParts(request)
if !apiReq || account == "" || container == "" {
srv.StandardResponse(writer, 401)
return
}
ctx := GetProxyContext(request)
if ctx.Authorize != nil {
next.ServeHTTP(writer, request)
return
}
validated := false
attrs := map[string]string{
"redirect": "",
"max_file_size": "0",
"max_file_count": "0",
"expires": "0",
}
mr := multipart.NewReader(request.Body, params["boundary"])
var maxFileCount, fileCount, maxFileSize int64
for {
p, err := mr.NextPart()
if err == io.EOF {
break
} else if err != nil {
formpostRespond(writer, 400, "invalid request", attrs["redirect"])
return
}
if fn := p.FileName(); fn == "" {
data, err := ioutil.ReadAll(&io.LimitedReader{R: p, N: 8192})
if err != nil {
formpostRespond(writer, 400, "error reading form value", attrs["redirect"])
return
}
if len(attrs) > 64 {
formpostRespond(writer, 400, "too many form post values", attrs["redirect"])
return
}
attrs[p.FormName()] = string(data)
} else {
if !validated {
if maxFileCount, err = strconv.ParseInt(attrs["max_file_count"], 10, 64); err != nil || maxFileCount <= 0 {
formpostRespond(writer, 400, "max_file_count not valid", attrs["redirect"])
return
}
if maxFileSize, err = strconv.ParseInt(attrs["max_file_size"], 10, 64); err != nil || maxFileSize < 0 {
formpostRespond(writer, 400, "max_file_size not valid", attrs["redirect"])
return
}
scope := authenticateFormpost(request.Context(), ctx, account, container, request.URL.Path, attrs)
switch scope {
case FP_EXPIRED:
formpostRespond(writer, 401, "Form Expired", attrs["redirect"])
return
case FP_INVALID:
formpostRespond(writer, 401, "Invalid Signature", attrs["redirect"])
return
case FP_ERROR:
formpostRespond(writer, 400, "invalid request", attrs["redirect"])
return
default:
ctx.RemoteUsers = []string{".formpost"}
ctx.Authorize = formpostAuthorizer(scope, account, container)
validated = true
}
}
fileCount++
if fileCount > maxFileCount {
formpostRespond(writer, 400, "max file count exceeded", attrs["redirect"])
return
}
path := request.URL.Path
if !strings.HasSuffix(path, "/") && strings.Count(path, "/") < 4 {
path += "/"
}
path += fn
neww := httptest.NewRecorder()
flr := &fpLimitReader{Reader: p, l: maxFileSize}
newreq, err := ctx.newSubrequest("PUT", path, flr, request, "formpost")
if err != nil {
formpostRespond(writer, 500, "internal server error", attrs["redirect"])
return
}
newreq.Header.Set("X-Delete-At", attrs["x_delete_at"])
newreq.Header.Set("X-Delete-After", attrs["x_delete_after"])
newreq.TransferEncoding = []string{"chunked"}
if attrs["content-type"] != "" {
newreq.Header.Set("Content-Type", attrs["content-type"])
} else {
newreq.Header.Set("Content-Type", "application/octet-stream")
}
ctx.serveHTTPSubrequest(neww, newreq)
if flr.overRead() {
formpostRespond(writer, 400, "max_file_size exceeded", attrs["redirect"])
return
}
if neww.Code/100 != 2 {
formpostRespond(writer, neww.Code, "upload error", attrs["redirect"])
return<|fim▁hole|> formpostRespond(writer, 201, "Success.", attrs["redirect"])
})
}
}
func NewFormPost(config conf.Section, metricsScope tally.Scope) (func(http.Handler) http.Handler, error) {
RegisterInfo("formpost", map[string]interface{}{})
return formpost(metricsScope.Counter("formpost_requests")), nil
}<|fim▁end|> | }
}
p.Close()
} |
<|file_name|>yacc.py<|end_file_name|><|fim▁begin|>"""SCons.Tool.yacc
Tool-specific initialization for yacc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/yacc.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
<|fim▁hole|>
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# If -v is specified yacc will create the output debug file
# which is not really source for any process, but should
# be noted and also be cleaned
# Bug #2558
if "-v" in flags:
env.SideEffect(targetBase+'.output',target[0])
env.Clean(target[0],targetBase+'.output')
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|> | if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m". |
<|file_name|>retrieve-map.6.x.py<|end_file_name|><|fim▁begin|># Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)<|fim▁hole|> .fetch()
print(map_instance.sid)<|fim▁end|> |
map_instance = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \ |
<|file_name|>volumes_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"testing"
v1 "k8s.io/api/core/v1"
)
func TestVolumeExists(t *testing.T) {
vols := []v1.Volume{{Name: "a"}, {Name: "b"}, {Name: "d"}}
type args struct {
volumeName string
volumes []v1.Volume
}
tests := []struct {
name string
args args
wantErr bool
}{
{"exists", args{"d", vols}, false},
{"does-not-exist", args{"c", vols}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := VolumeExists(tt.args.volumeName, tt.args.volumes); (err != nil) != tt.wantErr {
t.Errorf("VolumeExists() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func vols(v ...v1.Volume) []v1.Volume {
return v
}
func TestVolumeIsEmptyDir(t *testing.T) {
emptyVolume := v1.Volume{Name: "e", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}
emptyAndHostVolume := v1.Volume{Name: "e&hp", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}
emptyAndHostVolume.VolumeSource.HostPath = &v1.HostPathVolumeSource{Path: "/dev/sdx"}
hostVolume := v1.Volume{Name: "h", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev/vdh"}}}
type args struct {
volumeName string
volumes []v1.Volume
}
tests := []struct {
name string
args args
wantErr bool
}{
{"is EmptyDir", args{"e", vols(emptyVolume)}, false},
{"is HostPath", args{"h", vols(hostVolume)}, true},
{"EmptyDir and HostPath", args{"e&hp", vols(emptyAndHostVolume)}, true},
{"not found", args{"e", vols(hostVolume)}, true},
{"many ; ok", args{"e", vols(emptyVolume, hostVolume, emptyAndHostVolume)}, false},
{"many ; nf", args{"e", vols(hostVolume, emptyAndHostVolume)}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {<|fim▁hole|> t.Errorf("VolumeIsEmptyDir() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestVolumeIsHostPath(t *testing.T) {
emptyVolume := v1.Volume{Name: "e", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}
emptyAndHostVolume := v1.Volume{Name: "e&hp", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}
emptyAndHostVolume.VolumeSource.HostPath = &v1.HostPathVolumeSource{Path: "/dev/sdx"}
hostVolume := v1.Volume{Name: "h", VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev/vdh"}}}
type args struct {
volumeName string
path string
volumes []v1.Volume
}
tests := []struct {
name string
args args
wantErr bool
}{
{"is EmptyDir", args{"e", "/dev/sdx", vols(emptyVolume)}, true},
{"is HostPath", args{"h", "/dev/vdh", vols(hostVolume)}, false},
{"wrong HostPath", args{"h", "/dev/sdx", vols(hostVolume)}, true},
{"EmptyDir and HostPath", args{"e&hp", "/dev/sdx", vols(emptyAndHostVolume)}, true},
{"not found", args{"e", "/dev/sdx", vols(hostVolume)}, true},
{"many ; ok", args{"h", "/dev/vdh", vols(emptyVolume, hostVolume, emptyAndHostVolume)}, false},
{"many ; nf", args{"h", "/dev/vdh", vols(emptyVolume, emptyAndHostVolume)}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := VolumeIsHostPath(tt.args.volumeName, tt.args.path, tt.args.volumes); (err != nil) != tt.wantErr {
t.Errorf("VolumeIsHostPath() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestVolumeMountExists(t *testing.T) {
mounts := []v1.VolumeMount{{Name: "a"}, {Name: "b"}, {Name: "d"}}
type args struct {
mountName string
mounts []v1.VolumeMount
}
tests := []struct {
name string
args args
wantErr bool
}{
{"exists", args{"d", mounts}, false},
{"does-not-exist", args{"c", mounts}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := VolumeMountExists(tt.args.mountName, tt.args.mounts); (err != nil) != tt.wantErr {
t.Errorf("VolumeMountExists() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
// Don't test human readables since they aren't critical to function<|fim▁end|> | if err := VolumeIsEmptyDir(tt.args.volumeName, tt.args.volumes); (err != nil) != tt.wantErr { |
<|file_name|>shell.rs<|end_file_name|><|fim▁begin|>// Copyright 2014-2015 Jonathan Eyolfson
use raw;
use super::ShellSurface;
use super::Surface;
pub struct Shell {
ptr: *mut raw::wl_shell
}
impl Shell {
pub unsafe fn from_ptr(ptr: *mut raw::wl_shell) -> Shell {
Shell { ptr: ptr }
}
pub fn get_shell_surface(&mut self, surface: &mut Surface) -> ShellSurface {
unsafe {
let ptr = raw::wl_shell_get_shell_surface(
self.ptr,
surface.to_ptr()
);
ShellSurface::from_ptr(ptr)
}
}
}
impl Drop for Shell {<|fim▁hole|> raw::wl_shell_destroy(self.ptr)
}
}
}<|fim▁end|> | fn drop(&mut self) {
unsafe { |
<|file_name|>about_regex.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on the Ben's book: Regular Expressions in 10 minutes.
I found this books very useful so I decided to write a koans in order to practice everything I had learned from it.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes."
m = re.search(r'Felix', string)
self.assertTrue(m and m.group(0) and m.group(0)== 'Felix', "I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 How many matches?
The default behaviour of most regular expression engines is to return just the first match.
In python you have the next options:
match() --> Determine if the RE matches at the beginning of the string.
search() --> Scan through a string, looking for any location where this RE matches.
findall() --> Find all substrings where the RE matches, and returns them as a list.
finditer() --> Find all substrings where the RE matches, and returns them as an iterator.
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes. Repeat My name is Felix"
m = len(re.findall('Felix', string)) #TIP: Maybe match it's not the best option
# I want to know how many times appears my name
self.assertEqual(m, 2)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag.
In Ben's book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koans is based on the Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string), ['felix'])
self.assertEqual(re.findall("felix", string, re.IGNORECASE), ['Felix', 'felix'])
def test_matching_any_character(self):
"""
Lesson 1 Matching any character
. matches any character, alphabetic characters, digits and .<|fim▁hole|> + "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# TIP: remember the name of this lesson
change_this_search_string = r'a.\.xl.' # <-- I want to find all uses of myArray
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_matching_set_character(self):
"""
Lesson 2 Matching sets of characters
A set of characters is defined using the metacharacters [ and ]. Everything between them is part of the set and
any one of the set members must match (but not all).
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South America(sa), but not (ca)
# TIP you can use the pattern .a. which matches in above test but in this case matches more than you want
change_this_search_string = '[ns]a[0-9].xls'
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_anything_but_matching(self):
"""
Lesson 2 Using character set ranges
Occasionally, you'll want a list of characters that you don't want to match.
Character sets can be negated using the ^ metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name sam
change_this_search_string = '[^nc]am.xls'
self.assertEquals(re.findall(change_this_search_string, string), ['sam.xls'])<|fim▁end|> | """
string = "pecks.xlx\n" \
+ "orders1.xls\n" \ |
<|file_name|>applayerframetype.rs<|end_file_name|><|fim▁begin|>/* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use syn::{self, parse_macro_input, DeriveInput};
pub fn derive_app_layer_frame_type(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = input.ident;
let mut fields = Vec::new();
let mut vals = Vec::new();
let mut cstrings = Vec::new();
let mut names = Vec::new();
match input.data {
syn::Data::Enum(ref data) => {
for (i, v) in (&data.variants).into_iter().enumerate() {
fields.push(v.ident.clone());
let name = transform_name(&v.ident.to_string());
let cname = format!("{}\0", name);
names.push(name);
cstrings.push(cname);
vals.push(i as u8);
}
}
_ => panic!("AppLayerFrameType can only be derived for enums"),
}
let expanded = quote! {
impl crate::applayer::AppLayerFrameType for #name {
fn from_u8(val: u8) -> Option<Self> {
match val {
#( #vals => Some(#name::#fields) ,)*
_ => None,
}
}
fn as_u8(&self) -> u8 {
match *self {
#( #name::#fields => #vals ,)*
}
}
fn to_cstring(&self) -> *const std::os::raw::c_char {
let s = match *self {
#( #name::#fields => #cstrings ,)*
};
s.as_ptr() as *const std::os::raw::c_char
}
fn from_str(s: &str) -> Option<#name> {
match s {
#( #names => Some(#name::#fields) ,)*
_ => None
}
}
}
};
proc_macro::TokenStream::from(expanded)
}
fn transform_name(name: &str) -> String {
let mut xname = String::new();
let chars: Vec<char> = name.chars().collect();
for i in 0..chars.len() {
if i > 0 && i < chars.len() - 1 && chars[i].is_uppercase() && chars[i + 1].is_lowercase() {
xname.push('.');
}
xname.push_str(&chars[i].to_lowercase().to_string());
}
xname
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_transform_name() {
assert_eq!(transform_name("One"), "one");
assert_eq!(transform_name("OneTwo"), "one.two");
assert_eq!(transform_name("OneTwoThree"), "one.two.three");<|fim▁hole|> assert_eq!(transform_name("SMB3Data"), "smb3.data");
}
}<|fim▁end|> | assert_eq!(transform_name("NBSS"), "nbss");
assert_eq!(transform_name("NBSSHdr"), "nbss.hdr"); |
<|file_name|>ExportLists.py<|end_file_name|><|fim▁begin|># Author: John Elkins <[email protected]>
# License: MIT <LICENSE>
from common import *
if len(sys.argv) < 2:
log('ERROR output directory is required')
time.sleep(3)
exit()
# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# log in and load personal library
api = open_api()
library = load_personal_library()
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
# skip empty and no-name playlists
if not playlist_name: return
if len(playlist_tracks) == 0: return
# setup output files
playlist_name = playlist_name.replace('/', '')
open_log(os.path.join(output_dir,playlist_name+u'.log'))
outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
encoding='utf-8',mode='w')
# keep track of stats
stats = create_stats()
export_skipped = 0
# keep track of songids incase we need to skip duplicates
song_ids = []
log('')
log('============================================================')
log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
+playlist_name)
log('============================================================')
# add the playlist description as a "comment"
if playlist_description:
outfile.write(tsep)
outfile.write(playlist_description)
outfile.write(os.linesep)
for tnum, pl_track in enumerate(playlist_tracks):
track = pl_track.get('track')
# we need to look up these track in the library
if not track:
library_track = [
item for item in library if item.get('id')
in pl_track.get('trackId')]
if len(library_track) == 0:
log(u'!! '+str(tnum+1)+repr(pl_track))
export_skipped += 1
continue
track = library_track[0]
result_details = create_result_details(track)
if not allow_duplicates and result_details['songid'] in song_ids:
log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
export_skipped += 1
continue
# update the stats
update_stats(track,stats)
# export the track
song_ids.append(result_details['songid'])
outfile.write(create_details_string(result_details))
outfile.write(os.linesep)
# calculate the stats
stats_results = calculate_stats_results(stats,len(playlist_tracks))
# output the stats to the log
log('')
log_stats(stats_results)
log(u'export skipped: '+unicode(export_skipped))
# close the files
close_log()
outfile.close()
# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call
playlist_contents = api.get_all_user_playlist_contents()
for playlist in playlist_contents:
playlist_name = playlist.get('name')
playlist_description = playlist.get('description')
playlist_tracks = playlist.get('tracks')
playlist_handler(playlist_name, playlist_description, playlist_tracks)
if export_thumbs_up:
# get thumbs up playlist
thumbs_up_tracks = []<|fim▁hole|> for track in library:
if track.get('rating') is not None and int(track.get('rating')) > 1:
thumbs_up_tracks.append(track)
# modify format of each dictionary to match the data type
# of the other playlists
thumbs_up_tracks_formatted = []
for t in thumbs_up_tracks:
thumbs_up_tracks_formatted.append({'track': t})
playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)
if export_all:
all_tracks_formatted = []
for t in library:
all_tracks_formatted.append({'track': t})
playlist_handler('All', 'All tracks', all_tracks_formatted)
close_api()<|fim▁end|> | |
<|file_name|>watson-wechat.py<|end_file_name|><|fim▁begin|>import itchat, time, re
from itchat.content import *
import urllib2, urllib
import json
from watson_developer_cloud import ConversationV1
response={'context':{}}
@itchat.msg_register([TEXT])
def text_reply(msg):
global response
request_text = msg['Text'].encode('UTF-8')
conversation = ConversationV1(
username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',
password='5Id2zfapBV6e',
version='2017-04-21')
# replace with your own workspace_id<|fim▁hole|> print "request ==>", request_text
try:
type(eval(response))
except:
print "first call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
else:
print "continue call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
if len( response['output']['text']) >0:
response_text = response['output']['text'][0]
else:
response_text = "No message"
itchat.send( response_text, msg['FromUserName'])
itchat.auto_login()
itchat.run(debug=True)<|fim▁end|> | workspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'
|
<|file_name|>RULE_9_2_D_use_reentrant_function.py<|end_file_name|><|fim▁begin|>"""
Use reentrant functions. Do not use not reentrant functions.(ctime, strtok, toupper)
== Violation ==
void A() {
k = ctime(); <== Violation. ctime() is not the reenterant function.
j = strok(blar blar); <== Violation. strok() is not the reenterant function.
}
== Good ==
void A() {
k = t.ctime(); <== Correct. It may be the reentrant function.
}
void A() {
k = ctime; <== Correct. It may be the reentrant function.
}
"""
from nsiqunittest.nsiqcppstyle_unittestbase import *
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
no_reenterant_functions = (
'ctime',
'strtok',
'toupper',
)
def RunRule(lexer, contextStack):
"""
Use reenterant keyword.
"""
t = lexer.GetCurToken()
if t.type == "ID":
<|fim▁hole|> if t2 is not None and t2.type == "LPAREN":
if t3 is None or t3.type != "PERIOD":
if t.value == "toupper" and nsiqcppstyle_state._nsiqcppstyle_state.GetVar(
"ignore_toupper", "false") == "true":
return
nsiqcppstyle_reporter.Error(t, __name__,
"Do not use not reentrant function(%s)." % t.value)
ruleManager.AddFunctionScopeRule(RunRule)
##########################################################################
# Unit Test
##########################################################################
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionScopeRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = ctime()
}
""")
self.ExpectError(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
void func1() {
#define ctime() k
}
""")
self.ExpectSuccess(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
void ctime() {
}
""")
self.ExpectSuccess(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
void ctime () {
}
""")
self.ExpectSuccess(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = help.ctime ()
}
""")
self.ExpectSuccess(__name__)
def test6(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = toupper()
}
""")
self.ExpectError(__name__)
def test7(self):
nsiqcppstyle_state._nsiqcppstyle_state.varMap["ignore_toupper"] = "true"
self.Analyze("thisfile.c",
"""
void func1()
{
k = toupper()
}
""")
self.ExpectSuccess(__name__)<|fim▁end|> | if t.value in no_reenterant_functions:
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndComment()
t3 = lexer.PeekPrevTokenSkipWhiteSpaceAndComment()
|
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.14 on 2020-07-09 10:37<|fim▁hole|>
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('versions', '0008_auto_20200625_1114'),
('addons', '0014_remove_addon_view_source'),
]
operations = [
migrations.CreateModel(
name='PromotedApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('group_id', models.SmallIntegerField(choices=[(1, 'Recommended'), (2, 'Verified - Tier 1'), (3, 'Verified - Tier 2'), (4, 'Line'), (5, 'Spotlight'), (6, 'Strategic')], null=True)),
('version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='promoted_approvals', to='versions.Version')),
],
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='PromotedAddon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('group_id', models.SmallIntegerField(choices=[(0, 'Not Promoted'), (1, 'Recommended'), (2, 'Verified - Tier 1'), (3, 'Verified - Tier 2'), (4, 'Line'), (5, 'Spotlight'), (6, 'Strategic')], default=0)),
('addon', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddConstraint(
model_name='promotedapproval',
constraint=models.UniqueConstraint(fields=('group_id', 'version'), name='unique_promoted_version'),
),
]<|fim▁end|> | |
<|file_name|>brute_searcher.py<|end_file_name|><|fim▁begin|># pylint: disable=no-else-return
"""Adapted from an exercise in "Coding the Matrix". This script ingests
a text file, produces an index of the words in that text file, and allows
a user to search that index for one of more words. Validation is minimal.
"""
from string import punctuation
def make_inverse_index(strlist):
"""Take a list containing lines of text and build a dictionary that
maps every word of every line to a list of the line numbers on which
it appears.
"""
index = {}
for i in range(1, len(strlist)):
line = sanitize(strlist[i])
words = line.split(' ')
for word in words:
if word == '':
continue
if word in index:
index[word].add(i)
else:
index[word] = {i}
return index
def sanitize(str_to_sanitize):<|fim▁hole|>
def search(index, query, operator='OR'):
"""Take a list of search terms and return a set containing the line
numbers where all (output='AND') or some (output='OR') of the terms
can be found.
"""
values = []
sanitized_input = []
for term in query:
sanitized_input.append(sanitize(term))
for term in sanitized_input:
if term in index:
values.append(index[term])
elif operator == 'AND':
return [] #Shortcircuit since AND can't be satisfied.
if len(values) < 1:
return []
elif len(values) == 1:
return values[0]
if operator.upper() == 'AND':
return values[0].intersection(*values[1:])
else:
return values[0].union(*values[1:])
def sorted_search(index, query, operator='OR'):
"""Convert search() results to a list and return it sorted."""
values = search(index, query, operator)
values = [x for x in values]
values.sort()
return values
def try_brute_search():
"""Provide a way to interact with brute_searcher from the CL."""
query = []
while True:
term = str(raw_input("Add a search term (if you're done just hit enter): "))
if term == '':
break
query.append(term)
text = open('../src/leaves.txt')
strlist = text.readlines()
index = make_inverse_index(strlist)
union = sorted_search(index, query, 'OR')
if union:
print "\nLINES THAT CONTAIN AT LEAST ONE SEARCH TERM:\n"
for line_number in union:
print "Line %d: %s" % (line_number, strlist[line_number])
else:
print "\nTHE SEARCH TERM(S) DON'T APPEAR ANYWHERE IN THE INPUT TEXT.\n"
quit() # If no lines contained ANY term, no lines can contain EVERY term.
intersection = sorted_search(index, query, 'AND')
if intersection:
print "\nLINES THAT CONTAIN EVERY SEARCH TERM:\n"
for line_number in intersection:
print "Line %d: %s" % (line_number, strlist[line_number])
else:
print "\nNO LINES CONTAINED EVERY SEARCH TERM.\n"
try_brute_search()<|fim▁end|> | """Remove punctuation and line endings. Standardize case."""
return str_to_sanitize.translate(None, punctuation + '\n').lower()
|
<|file_name|>gash.rs<|end_file_name|><|fim▁begin|>//
// gash.rs
//
// Reference solution for PS2
// Running on Rust 0.8
//
// Special thanks to Kiet Tran for porting code from Rust 0.7 to Rust 0.8.
//
// University of Virginia - cs4414 Fall 2013
// Weilin Xu, Purnam Jantrania, David Evans
// Version 0.2
//
// Modified
use std::{run, os, libc};
use std::task;
fn get_fd(fpath: &str, mode: &str) -> libc::c_int {
#[fixed_stack_segment]; #[inline(never)];
unsafe {
let fpathbuf = fpath.to_c_str().unwrap();
let modebuf = mode.to_c_str().unwrap();
return libc::fileno(libc::fopen(fpathbuf, modebuf));
}
}
fn exit(status: libc::c_int) {
#[fixed_stack_segment]; #[inline(never)];
unsafe { libc::exit(status); }
}
fn _handle_cmd(cmd_line: &str, pipe_in: libc::c_int,
pipe_out: libc::c_int, pipe_err: libc::c_int, output: bool) -> Option<run::ProcessOutput> {
let out_fd = pipe_out;
let in_fd = pipe_in;
let err_fd = pipe_err;
let mut argv: ~[~str] =
cmd_line.split_iter(' ').filter_map(|x| if x != "" { Some(x.to_owned()) } else { None }).to_owned_vec();
if argv.len() > 0 {
let program = argv.remove(0);
let (out_opt, err_opt) = if output { (None, None) } else { (Some(out_fd), Some(err_fd))};
let mut prog = run::Process::new(program, argv, run::ProcessOptions {
env: None,
dir: None,
in_fd: Some(in_fd),
out_fd: out_opt,
err_fd: err_opt
});
let output_opt = if output { Some(prog.finish_with_output()) }
else { prog.finish(); None };
// close the pipes after process terminates.
if in_fd != 0 {os::close(in_fd);}
if out_fd != 1 {os::close(out_fd);}
if err_fd != 2 {os::close(err_fd);}
return output_opt;
}
return None;
}
fn handle_cmd(cmd_line: &str, pipe_in: libc::c_int, pipe_out: libc::c_int, pipe_err: libc::c_int) {
_handle_cmd(cmd_line, pipe_in, pipe_out, pipe_err, false);
}
fn handle_cmd_with_output(cmd_line: &str, pipe_in: libc::c_int) -> Option<run::ProcessOutput> {
return _handle_cmd(cmd_line, pipe_in, -1, -1, true);
}
pub fn handle_cmdline(cmd_line: &str) -> Option<run::ProcessOutput> {
// handle pipes
let progs: ~[~str] = cmd_line.split_iter('|').map(|x| x.trim().to_owned()).collect();
let mut pipes = ~[];
for _ in range(0, progs.len()-1) {
pipes.push(os::pipe());
}
if progs.len() == 1 {
return handle_cmd_with_output(progs[0], 0);
} else {
let mut output_opt = None;
for i in range(0, progs.len()) {
let prog = progs[i].to_owned();
if i == 0 {
let pipe_i = pipes[i];
task::spawn_sched(task::SingleThreaded, ||{handle_cmd(prog, 0, pipe_i.out, 2)});
} else if i == progs.len() - 1 {
let pipe_i_1 = pipes[i-1];
output_opt = handle_cmd_with_output(prog, pipe_i_1.input);
} else {
let pipe_i = pipes[i];
let pipe_i_1 = pipes[i-1];
task::spawn_sched(task::SingleThreaded, ||{handle_cmd(prog, pipe_i_1.input, pipe_i.out, 2)});<|fim▁hole|> }
return output_opt;
}
}<|fim▁end|> | } |
<|file_name|>test_hamster.py<|end_file_name|><|fim▁begin|>import sys
sys.path.append("..\\..")
import os
import time
from api.providers.torec.hamster import TorecHashCodesHamster
from api.requestsmanager import RequestsManager
<|fim▁hole|> self.hamster = TorecHashCodesHamster(RequestsManager())
def test_remove_after_max_time_passed(self):
self.hamster.add_sub_id("23703")
self.hamster.add_sub_id("2638")
self.assertEquals(len(self.hamster._records), 2)
time.sleep(10)
self.assertEquals(len(self.hamster._records), 2)
time.sleep(120)
self.assertEquals(len(self.hamster._records), 0)
def test_remove_after_after_request(self):
self.hamster.add_sub_id("23703")
self.hamster.add_sub_id("2638")
self.assertEquals(len(self.hamster._records), 2)
self.hamster.remove_sub_id("2638")
self.assertEquals(len(self.hamster._records), 1)
self.assertEquals(self.hamster._records.keys()[0], "23703")
def run_tests():
test_runner = unittest.TextTestRunner(verbosity=0)
tests = unittest.defaultTestLoader.loadTestsFromTestCase(
TestTorecHashCodeHamster)
test_runner.run(tests)<|fim▁end|> | import unittest
class TestTorecHashCodeHamster(unittest.TestCase):
def setUp(self):
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
import django_filters
from rest_framework import viewsets
from rest_framework.response import Response
from api.serializers import UserSerializer, AddressSerializer, DestinationSerializer, GuestSerializer, MessageSerializer, OpenHourSerializer
from portal.models import Guest, Message
from building.models import Address
from transportation.models import Destination, OpenHour
class UserViewSet(viewsets.ModelViewSet):
lookup_field = 'username'
serializer_class = UserSerializer
queryset = User.objects.none() # critical
def list(self, request):
queryset = User.objects.filter(username = request.user.username) # critical
serializer = UserSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, username=None):
queryset = User.objects.filter(username = request.user.username) # critical
guest = get_object_or_404(queryset, username=username)
serializer = UserSerializer(guest, context={'request': request})
return Response(serializer.data)
# portal models
class AddressFilter(django_filters.FilterSet):
street = django_filters.CharFilter(name="street",lookup_type="icontains")
city = django_filters.CharFilter(name="city",lookup_type="icontains")
class Meta:
model = Address
fields = ('street', 'city')
class AddressViewSet(viewsets.ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
filter_class = AddressFilter
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
search_fields = ('street', 'city')
class DestinationViewSet(viewsets.ModelViewSet):
queryset = Destination.objects.all()
serializer_class = DestinationSerializer
class GuestViewSet(viewsets.GenericViewSet):
queryset = Guest.objects.none() # critical
def list(self, request):
queryset = Guest.objects.filter(user = request.user) # critical
serializer = GuestSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
<|fim▁hole|> queryset = Guest.objects.filter(user = request.user) # critical
guest = get_object_or_404(queryset, pk=pk)
serializer = GuestSerializer(guest, context={'request': request})
return Response(serializer.data)
class MessageViewSet(viewsets.GenericViewSet):
queryset = Message.objects.none() # critical
def list(self, request):
queryset = Message.objects.filter(user = request.user) # critical
serializer = MessageSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Message.objects.filter(user = request.user) # critical
message = get_object_or_404(queryset, pk=pk)
serializer = MessageSerializer(message, context={'request': request})
return Response(serializer.data)
class OpenHourViewSet(viewsets.ModelViewSet):
queryset = OpenHour.objects.all()
serializer_class = OpenHourSerializer
# other applications
from sensors.models import Sensor, SensorValue
from api.serializers import SensorSerializer, SensorValueSerializer
class SensorViewSet(viewsets.ModelViewSet):
queryset = Sensor.objects.all()
serializer_class = SensorSerializer
class SensorValueViewSet(viewsets.ModelViewSet):
queryset = SensorValue.objects.all()
serializer_class = SensorValueSerializer
# security application
from security.models import Camera, SafetyIncidentSource, SafetyIncident, SafetyIncidentAlert, SafetyIncidentAlertBoundary
from api.serializers import CameraSerializer, SafetyIncidentSourceSerializer, SafetyIncidentSerializer, SafetyIncidentAlertSerializer, SafetyIncidentAlertBoundarySerializer
class CameraViewSet(viewsets.ModelViewSet):
queryset = Camera.objects.all()
serializer_class = CameraSerializer
class SafetyIncidentSourceViewSet(viewsets.ModelViewSet):
lookup_field = 'name'
queryset = SafetyIncidentSource.objects.all()
serializer_class = SafetyIncidentSourceSerializer
class SafetyIncidentFilter(django_filters.FilterSet):
"""
source is a ForeignKey in SafetyIncident. We look it up by "name" in the query string.
"""
source = django_filters.CharFilter(method='filter_source')
location = django_filters.CharFilter(name="location",lookup_type="icontains")
type = django_filters.CharFilter(name="type",lookup_type="icontains")
def filter_source(self, qs, value):
return qs.filter(source = SafetyIncidentSource.objects.filter(name = value))
class Meta:
model = SafetyIncident
fields = ('source', 'location', 'type')
class SafetyIncidentViewSet(viewsets.ModelViewSet):
queryset = SafetyIncident.objects.all()
serializer_class = SafetyIncidentSerializer
filter_class = SafetyIncidentFilter
class SafetyIncidentAlertViewSet(viewsets.ModelViewSet):
queryset = SafetyIncidentAlert.objects.all()
serializer_class = SafetyIncidentAlertSerializer
class SafetyIncidentAlertBoundaryViewSet(viewsets.ModelViewSet):
queryset = SafetyIncidentAlertBoundary.objects.all()
serializer_class = SafetyIncidentAlertBoundarySerializer
from dataview.models import Attribute, Node
from api.serializers import AttributeSerializer, NodeSerializer
class AttributeFilter(django_filters.FilterSet):
node = django_filters.CharFilter(method='filter_node')
def filter_node(self, qs, name, value):
return qs.filter(node=Node.objects.filter(name=value))
class Meta:
model = Attribute
fields = ('node', 'name')
class NodeViewSet(viewsets.ModelViewSet):
queryset = Node.objects.all()
serializer_class = NodeSerializer
class AttributeViewSet(viewsets.ModelViewSet):
queryset = Attribute.objects.all()
serializer_class = AttributeSerializer
filter_class = AttributeFilter<|fim▁end|> | def retrieve(self, request, pk=None): |
<|file_name|>util.rs<|end_file_name|><|fim▁begin|>//! Contains utility things used in glium's internals
use std::default::Default;
use std::hash::Hasher;
/// A speedy hash algorithm for node ids and def ids. The hashmap in
/// libcollections by default uses SipHash which isn't quite as speedy as we
/// want. In the compiler we're not really worried about DOS attempts, so we<|fim▁hole|>///
/// Copy-pasted from rustc/util/nodemap.rs
pub struct FnvHasher(u64);
impl Default for FnvHasher {
fn default() -> FnvHasher { FnvHasher(0xcbf29ce484222325) }
}
impl Hasher for FnvHasher {
fn write(&mut self, bytes: &[u8]) {
let FnvHasher(mut hash) = *self;
for byte in bytes {
hash = hash ^ (*byte as u64);
hash = hash * 0x100000001b3;
}
*self = FnvHasher(hash);
}
fn finish(&self) -> u64 { self.0 }
}<|fim▁end|> | /// just default to a non-cryptographic hash.
///
/// This uses FNV hashing, as described here:
/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function |
<|file_name|>log_handler.py<|end_file_name|><|fim▁begin|>import logging
from .model import LogEntry, LogLevels
class NGWLogHandler(logging.Handler):
"""
Simple standard log handler for nextgisweb_log
"""
def __init__(self, level=LogLevels.default_value, component=None, group=None):
logging.Handler.__init__(self, level=level)
self.component = component
self.group = group<|fim▁hole|>
def emit(self, record):
self.format(record)
if record.exc_info:
record.exc_text = logging._defaultFormatter.formatException(record.exc_info)
else:
record.exc_text = None
# Insert log record:
log_entry = LogEntry()
log_entry.component = self.component
log_entry.group = self.group
log_entry.message_level = record.levelno
log_entry.message_level_name = record.levelname
log_entry.message_name = record.name
log_entry.message_text = record.msg
log_entry.exc_info = record.exc_text
log_entry.persist()<|fim▁end|> | |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django.forms import *
from django.forms.formsets import BaseFormSet
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from tradeschool.models import *
class DefaultBranchForm(Form):
def __init__(self, user, redirect_to, *args, **kwargs):
super(DefaultBranchForm, self).__init__(*args, **kwargs)
if user.is_superuser:
branches = Branch.objects.all()
else:
branches = Branch.objects.filter(pk__in=user.branches_organized.all)
choices = [(o.id, unicode(o.title)) for o in branches]
self.fields['default_branch'] = forms.ChoiceField(choices=choices)
if user.default_branch:
self.initial['default_branch'] = user.default_branch.pk
self.initial['organizer_id'] = user.pk
self.initial['redirect_to'] = redirect_to
default_branch = forms.ChoiceField()
organizer_id = forms.IntegerField(widget=forms.HiddenInput)
redirect_to = forms.CharField(widget=forms.HiddenInput)
class TimeModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
from django.utils import timezone
current_tz = timezone.get_current_timezone()
date = obj.start_time.astimezone(current_tz).strftime('%A, %b %d')
time = obj.start_time.astimezone(current_tz).strftime(
'%I:%M%p').lstrip('0').lower()
if obj.venue is not None:
return "%s %s at %s" % (date, time, obj.venue)
return "%s %s" % (date, time)
class TimeSelectionForm(Form):
"""
A simple dropdown menu for teachers to select an available time
when submitting a class. Uses the Time model
"""
time = TimeModelChoiceField(
queryset=Time.objects.all(),
error_messages={'required': _('Please select a time'), }
)
class BranchForm(ModelForm):
def __init__(self, *args, **kwargs):
super(BranchForm, self).__init__(*args, **kwargs)
self.fields['city'].error_messages['required'] = _(
"Please enter a city")
self.fields['country'].error_messages['required'] = _(
"Please enter a country")
self.initial['site'] = Site.objects.get_current()
class Meta:
model = Branch
fields = (
'city',
'state',<|fim▁hole|> )
class TeacherForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about yourself")
self.fields['phone'].error_messages['required'] = _(
"Please enter phone number")
class Meta:
model = Person
fields = ('fullname', 'email', 'phone', 'bio', 'website')
# since bio is set to blank=True in the Person model
# to accommodate students, we're setting it here manually.
bio = forms.CharField(
required=True,
label=_("A few sentences about you"),
help_text=_("For prospective students to see on the website"),
widget=forms.Textarea
)
class OrganizerForm(TeacherForm):
"""
"""
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(TeacherForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['names_of_co_organizers'].error_messages['required'] = _(
"Please enter the names of at least one or two more organizers")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about why you would like to open a Trade School in your area")
class Meta:
model = Person
fields = (
'fullname',
'names_of_co_organizers',
'email',
'bio',
)
# since names_of_co_organizers is set to blank=True in the Person model
# to accommodate students and teachers, we're setting it here manually.
names_of_co_organizers = forms.CharField(
required=True,
label=_("Names of Co-Organizers"),
)
bio = forms.CharField(
required=True,
label=_("A few sentences about why your group wants to open a Trade School"),
widget=forms.Textarea
)
class CourseForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].error_messages['required'] = _(
"Please enter a class title")
self.fields['description'].error_messages['required'] = _(
"Please enter a class description")
self.fields['max_students'].error_messages['required'] = _(
"Please enter the maximum number of students in your class")
class Meta:
model = Course
fields = ('title', 'description', 'max_students')
class BarterItemForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['class'] = 'barter_item'
self.fields['title'].error_messages['required'] = _(
"Barter item cannot be blank")
class Meta:
model = BarterItem
fields = ('title',)
class BaseBarterItemFormSet(BaseFormSet):
def __init__(self, branch, *args, **kwargs):
""
self.branch = branch
super(BaseBarterItemFormSet, self).__init__(*args, **kwargs)
def clean(self):
"Checks that at least 5 barter items form are filled"
count = 0
required = self.branch.min_barteritems
if any(self.errors):
return
for form in self.forms:
if form.is_bound:
if form['title'].data:
count += 1
if count < required:
raise forms.ValidationError(
_("Please add at least %i barter items" % required)
)
class RegistrationForm(ModelForm):
def __init__(self, course, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['items'].queryset = BarterItem.objects.filter(
course=course)
self.fields['items'].error_messages['required'] = _(
"Please select at least one item")
self.fields['items'].empty_label = None
class Meta:
model = Registration
fields = ('items', )
widgets = {'items': CheckboxSelectMultiple(), }
class StudentForm(ModelForm):
def __init__(self, *args, **kwargs):
super(StudentForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['phone'].error_messages['required'] = _(
"Please enter your phone number")
class Meta:
model = Person
fields = ('fullname', 'email', 'phone')
class FeedbackForm(ModelForm):
def __init__(self, *args, **kwargs):
super(FeedbackForm, self).__init__(*args, **kwargs)
self.fields['content'].error_messages['required'] = _(
"Please enter your feedback")
class Meta:
model = Feedback
fields = ('content',)<|fim▁end|> | 'country', |
<|file_name|>content_templates_test.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import pytz
from datetime import datetime, timedelta
from apps.rules.routing_rules import Weekdays
from apps.templates.content_templates import get_next_run, get_item_from_template, render_content_template
from superdesk.metadata.item import ITEM_STATE, CONTENT_STATE
from superdesk.tests import TestCase
from superdesk.utc import utcnow
class TemplatesTestCase(TestCase):
def setUp(self):
# now is today at 09:05:03
self.now = datetime.utcnow().replace(hour=9, minute=5, second=3)
self.weekdays = [day.name for day in Weekdays]<|fim▁hole|> def get_delta(self, create_at, weekdays, time_zone=None, now=None, cron_list=None):
schedule = {
'day_of_week': weekdays,
'create_at': create_at,
'is_active': True,
'time_zone': time_zone or 'UTC'
}
if cron_list:
schedule['cron_list'] = cron_list
schedule.pop('create_at', None)
next_run = get_next_run(schedule, now or self.now)
return next_run - (now or self.now).replace(second=0)
def test_inactive_schedule(self):
self.assertEqual(None,
get_next_run({'is_active': False, 'day_of_week': self.weekdays, 'create_at': '09:15:00'}))
def test_next_run_same_day_later(self):
delta = self.get_delta('09:08:00', self.weekdays)
self.assertEqual(delta.days, 0)
self.assertEqual(delta.seconds, 179)
def test_next_run_same_day_later_cron_list(self):
cron_list = ['30 07 * * *', '08 09 * * *']
delta = self.get_delta('09:08:00', self.weekdays, cron_list=cron_list)
self.assertEqual(delta.days, 0)
self.assertEqual(delta.seconds, 179)
def test_next_run_next_day(self):
delta = self.get_delta('09:03:00', self.weekdays)
self.assertEqual(delta.days, 0)
self.assertEqual(delta.seconds, 3600 * 24 - 121)
def test_next_run_next_week(self):
delta = self.get_delta('09:03:00', [self.now.strftime('%a').upper()])
self.assertEqual(delta.days, 6)
def test_next_run_now(self):
delta = self.get_delta('09:05:00', self.weekdays)
self.assertEqual(delta.seconds, 24 * 60 * 60 - 1)
def test_get_item_from_template(self):
template = {'_id': 'foo', 'name': 'test',
'schedule_desk': 'sports', 'schedule_stage': 'schedule',
'data': {
'headline': 'Foo',
'dateline': {
'located': {
'city': 'Sydney',
'city_code': 'Sydney',
'tz': 'Australia/Sydney'
},
'date': '2015-10-10T10:10:10',
}
}}
now = utcnow()
with self.app.app_context():
item = get_item_from_template(template)
self.assertNotIn('_id', item)
self.assertEqual('foo', item.get('template'))
self.assertEqual('Foo', item.get('headline'))
self.assertEqual(CONTENT_STATE.SUBMITTED, item.get(ITEM_STATE))
self.assertEqual({'desk': 'sports', 'stage': 'schedule'}, item.get('task'))
dateline = item.get('dateline')
self.assertEqual('Sydney', dateline['located']['city'])
self.assertEqual(now, dateline.get('date'))
self.assertIn('SYDNEY', dateline.get('text'))
def test_next_run_for_timezone(self):
# UTC time Zero hours
now = datetime(2018, 6, 30, 19, 0, 0, 0, tzinfo=pytz.utc)
current_now = now + timedelta(seconds=5)
# schedule at 06:00 AM
delta = self.get_delta('06:00:00',
self.weekdays,
time_zone='Australia/Sydney',
now=current_now
)
self.assertEqual(delta.days, 0)
self.assertEqual(delta.seconds, 3600)
# 30 minutes before schedule
current_now = now + timedelta(minutes=30)
delta = self.get_delta('06:00:00',
self.weekdays,
time_zone='Australia/Sydney',
now=current_now
)
self.assertEqual(delta.days, 0)
self.assertEqual(delta.seconds, 1800)
# hour after schedule
current_now = now + timedelta(hours=1, seconds=5)
delta = self.get_delta('06:00:00',
self.weekdays,
time_zone='Australia/Sydney',
now=current_now
)
self.assertEqual(delta.days, 1)
class RenderTemplateTestCase(TestCase):
def test_render_content_template(self):
template = {
'_id': 'foo',
'template_name': 'test',
'template_desks': ['sports'],
'data': {
'headline': 'Foo Template: {{item.headline}}',
'body_html': 'This article has slugline: {{item.slugline}} and dateline: {{item.dateline["text"]}} '
'at {{item.versioncreated | format_datetime("Australia/Sydney", "%d %b %Y %H:%S %Z")}}',
'urgency': 1, 'priority': 3,
'dateline': {},
'anpa_take_key': 'this is test',
'place': ['Australia']
}
}
item = {
'_id': '123', 'headline': 'Test Template',
'slugline': 'Testing', 'body_html': 'This is test story',
'dateline': {
'text': 'hello world'
},
'urgency': 4, 'priority': 6,
'versioncreated': '2015-06-01T22:54:53+0000',
'place': ['NSW']
}
updates = render_content_template(item, template)
self.assertEqual(updates['headline'], 'Foo Template: Test Template')
self.assertEqual(updates['urgency'], 1)
self.assertEqual(updates['priority'], 3)
self.assertEqual(updates['body_html'], 'This article has slugline: Testing and dateline: '
'hello world at 02 Jun 2015 08:53 AEST')
self.assertListEqual(updates['place'], ['Australia'])
def test_headline_strip_tags(self):
template = {'data': {'headline': ' test\nit<br>'}}
updates = render_content_template({}, template)
self.assertEqual('test it', updates['headline'])
item = get_item_from_template(template)
self.assertEqual('test it', item['headline'])<|fim▁end|> | |
<|file_name|>extrude.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Copyright (C) 2007
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
# local library
import inkex
import simplepath
import simpletransform
import cubicsuperpath
inkex.localize()
class Extrude(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
opts = [('-m', '--mode', 'string', 'mode', 'Lines',
'Join paths with lines or polygons'),
]
for o in opts:
self.OptionParser.add_option(o[0], o[1], action="store", type=o[2],
dest=o[3], default=o[4], help=o[5])
def effect(self):
paths = []
for id, node in self.selected.iteritems():
if node.tag == '{http://www.w3.org/2000/svg}path':
paths.append(node)
if len(paths) < 2:
inkex.errormsg(_('Need at least 2 paths selected'))
return
pts = [cubicsuperpath.parsePath(paths[i].get('d'))
for i in range(len(paths))]
for i in range(len(paths)):
if 'transform' in paths[i].keys():
trans = paths[i].get('transform')
trans = simpletransform.parseTransform(trans)
simpletransform.applyTransformToPath(trans, pts[i])
for n1 in range(0, len(paths)):
for n2 in range(n1 + 1, len(paths)):
verts = []
for i in range(0, min(map(len, pts))):
comp = []
for j in range(0, min(len(pts[n1][i]), len(pts[n2][i]))):
comp.append([pts[n1][i][j][1][-2:], pts[n2][i][j][1][-2:]])
verts.append(comp)
if self.options.mode.lower() == 'lines':
line = []
for comp in verts:
for n,v in enumerate(comp):
line += [('M', v[0])]
line += [('L', v[1])]
ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')
paths[0].xpath('..')[0].append(ele)
ele.set('d', simplepath.formatPath(line))
ele.set('style', 'fill:none;stroke:#000000;stroke-opacity:1;stroke-width:1;')
elif self.options.mode.lower() == 'polygons':
g = inkex.etree.Element('{http://www.w3.org/2000/svg}g')
g.set('style', 'fill:#000000;stroke:#000000;fill-opacity:0.3;stroke-width:2;stroke-opacity:0.6;')
paths[0].xpath('..')[0].append(g)<|fim▁hole|> for n,v in enumerate(comp):
nn = n+1
if nn == len(comp): nn = 0
line = []
line += [('M', comp[n][0])]
line += [('L', comp[n][1])]
line += [('L', comp[nn][1])]
line += [('L', comp[nn][0])]
line += [('L', comp[n][0])]
ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')
g.append(ele)
ele.set('d', simplepath.formatPath(line))
if __name__ == '__main__': #pragma: no cover
e = Extrude()
e.affect()<|fim▁end|> | for comp in verts: |
<|file_name|>debug.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
## Copyright (C) 2008 Red Hat, Inc.
## Copyright (C) 2008 Tim Waugh <[email protected]>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
import traceback
_debug=False
def debugprint (x):
if _debug:
try:
print x
except:
pass
def get_debugging ():
return _debug
def set_debugging (d):
global _debug
_debug = d
def fatalException (exitcode=1):
nonfatalException (type="fatal", end="Exiting")
sys.exit (exitcode)
def nonfatalException (type="non-fatal", end="Continuing anyway.."):
d = get_debugging ()
set_debugging (True)<|fim▁hole|> debugprint ("Caught %s exception. Traceback:" % type)
(type, value, tb) = sys.exc_info ()
tblast = traceback.extract_tb (tb, limit=None)
if len (tblast):
tblast = tblast[:len (tblast) - 1]
extxt = traceback.format_exception_only (type, value)
for line in traceback.format_tb(tb):
debugprint (line.strip ())
debugprint (extxt[0].strip ())
debugprint (end)
set_debugging (d)<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup<|fim▁hole|>
d = generate_distutils_setup(
packages=['jsk_apc2016_common'],
package_dir={'': 'python'},
)
setup(**d)<|fim▁end|> | |
<|file_name|>modularity.py<|end_file_name|><|fim▁begin|>'''
Construct and manipulate multilayer representations of configuration vectors
Created by: Ankit Khambhati
Change Log
----------
2016/02/03 - Implement functions to construct multilayer networks
'''
import numpy as np
import scipy.sparse as sp
from ....Common import errors
from ...Transforms import configuration
def ml_modularity_matr(conn_matr, gamma, omega, null):
"""
Find the multilayer modularity matrix of network and an assocomm_initated
null model. Method assumes sequential linking between layers with
homogenous weights.
Parameters
----------
conn_matr: numpy.ndarray
Connection matrix over multiple layers
Has shape: [n_layers x n_conns]
gamma: float
Intra-layer resolution parameter, typical values around 1.0
omega: float
Inter-layer resolution parameter, typical values around 1.0
null: str
Choose a null mode type: ['None', 'temporal',
'connectional', 'nodal']
Returns
-------
ml_mod_matr: numpy.ndarray
Multilayer modularity matrix
Has shape: [n_nodes*n_layers x n_nodes*n_layers]
twomu: float
Total edge weight in the network
"""
# Standard param checks
errors.check_type(conn_matr, np.ndarray)
errors.check_type(gamma, float)
errors.check_type(omega, float)
errors.check_type(null, str)
# Check conn_matr dimensions
if not len(conn_matr.shape) == 2:
raise ValueError('%r does not have two-dimensions' % conn_matr)
n_layers = conn_matr.shape[0]
n_conns = conn_matr.shape[1]<|fim▁hole|> null = null.lower()
if null not in valid_null_types:
raise ValueError('%r is not on of %r' % (null, valid_null_types))
# Initialize multilayer matrix
B = np.zeros((n_nodes*n_layers, n_nodes*n_layers))
twomu = 0
if null == 'temporal':
rnd_layer_ix = np.random.permutation(n_layers)
conn_matr = conn_matr[rnd_layer_ix, :]
if null == 'connectional':
rnd_node_ix = np.random.permutation(n_nodes)
rnd_node_iy = np.random.permutation(n_nodes)
ix, iy = np.mgrid[0:n_nodes, 0:n_nodes]
for ll, conn_vec in enumerate(conn_matr):
A = configuration.convert_conn_vec_to_adj_matr(conn_vec)
if null == 'connectional':
A = A[rnd_node_ix[ix], rnd_node_iy[iy]]
A = np.triu(A, k=1)
A += A.T
# Compute node degree
k = np.sum(A, axis=0)
twom = np.sum(k) # Intra-layer average node degree
twomu += twom # Inter-layer accumulated node degree
# NG Null-model
if twom < 1e-6:
P = np.dot(k.reshape(-1, 1), k.reshape(1, -1)) / 1.0
else:
P = np.dot(k.reshape(-1, 1), k.reshape(1, -1)) / twom
# Multi-slice modularity matrix
start_ix = ll*n_nodes
end_ix = (ll+1)*n_nodes
B[start_ix:end_ix, start_ix:end_ix] = A - gamma*P
# Add inter-slice degree
twomu += twomu + 2*omega*n_nodes*(n_layers-1)
# Add the sequential inter-layer model
interlayer = sp.spdiags(np.ones((2, n_nodes*n_layers)),
[-n_nodes, n_nodes],
n_nodes*n_layers, n_nodes*n_layers).toarray()
if null == 'nodal':
null_layer = np.random.permutation(np.diag(np.ones(n_nodes)))
for ll in xrange(n_layers-1):
interlayer[ll*n_nodes:(ll+1)*n_nodes,
(ll+1)*n_nodes:(ll+2)*n_nodes] = null_layer
interlayer = np.triu(interlayer, k=1)
interlayer += interlayer.T
B = B + omega*interlayer
B = np.triu(B, k=1)
B += B.T
ml_mod_matr = B
return ml_mod_matr, twomu<|fim▁end|> | n_nodes = int(np.floor(np.sqrt(2*n_conns))+1)
# Check null model specomm_initfication
valid_null_types = ['none', 'temporal', 'connectional', 'nodal'] |
<|file_name|>cli_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import string
import subprocess
import sys
import re
from cli_test_parameters import CLITestParameters
class CLITest:
def __init__(self):
pass
@staticmethod
def check_description(test_case, cli):
parameters = CLITestParameters()
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'description'), cli.get_description())
@staticmethod
def check_curl(test_case, cli, output):
parameters = CLITestParameters()
p = re.compile(r'-u ".*?"\s')
a = p.findall(output)
output = output.replace(a[0], '')
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'curl').encode('utf-8'), output.encode('utf-8'))
@staticmethod
def get_cli_name_from_class(i):
name = i.__class__.__name__
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
cli_name = str.join('-', m)
return cli_name
@staticmethod
def check_cli_help(test_case, cli):
parameters = CLITestParameters()
name = cli.__class__.__name__
expected_output = parameters.get_cli_help(name)
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
command = str.join('-', m)
try:
output = subprocess.check_output([command, '-h'])
test_case.assertEqual(expected_output, output)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
<|fim▁hole|> output = None
try:
command = CLITest.get_cli_name_from_class(cli)
args.insert(0, command)
output = subprocess.check_output(args=args)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
return output
@staticmethod
def random_string(n):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
@staticmethod
def is_int(s):
try:
int(s)
return True
except ValueError:
return False<|fim▁end|> | @staticmethod
def get_cli_output(cli, args): |
<|file_name|>buffer_manager.cc<|end_file_name|><|fim▁begin|>#include "buffer_manager.hh"
#include "assert.hh"
#include "buffer.hh"
#include "client_manager.hh"
#include "exception.hh"
#include "file.hh"
#include "ranges.hh"
#include "string.hh"
namespace Kakoune
{
BufferManager::~BufferManager()
{
// Move buffers to avoid running BufClose with buffers remaining in that list
BufferList buffers = std::move(m_buffers);
for (auto& buffer : buffers)
buffer->on_unregistered();
// Make sure not clients exists
if (ClientManager::has_instance())
ClientManager::instance().clear();
}
Buffer* BufferManager::create_buffer(String name, Buffer::Flags flags,
StringView data, timespec fs_timestamp)
{
auto path = real_path(parse_filename(name));
for (auto& buf : m_buffers)
{
if (buf->name() == name or
(buf->flags() & Buffer::Flags::File and buf->name() == path))
throw runtime_error{"buffer name is already in use"};
}
m_buffers.push_back(std::make_unique<Buffer>(std::move(name), flags, data, fs_timestamp));
auto* buffer = m_buffers.back().get();
buffer->on_registered();
if (contains(m_buffer_trash, buffer))
throw runtime_error{"buffer got removed during its creation"};
return buffer;
}
void BufferManager::delete_buffer(Buffer& buffer)
{
auto it = find_if(m_buffers, [&](auto& p) { return p.get() == &buffer; });
kak_assert(it != m_buffers.end());
m_buffer_trash.emplace_back(std::move(*it));
m_buffers.erase(it);
if (ClientManager::has_instance())
ClientManager::instance().ensure_no_client_uses_buffer(buffer);
buffer.on_unregistered();
}
Buffer* BufferManager::get_buffer_ifp(StringView name)
{
auto path = real_path(parse_filename(name));
for (auto& buf : m_buffers)
{
if (buf->name() == name or
(buf->flags() & Buffer::Flags::File and buf->name() == path))
return buf.get();
}
return nullptr;
}<|fim▁hole|>
Buffer& BufferManager::get_buffer(StringView name)
{
Buffer* res = get_buffer_ifp(name);
if (not res)
throw runtime_error{format("no such buffer '{}'", name)};
return *res;
}
Buffer& BufferManager::get_first_buffer()
{
if (all_of(m_buffers, [](auto& b) { return (b->flags() & Buffer::Flags::Debug); }))
create_buffer("*scratch*", Buffer::Flags::None);
return *m_buffers.back();
}
void BufferManager::backup_modified_buffers()
{
for (auto& buf : m_buffers)
{
if ((buf->flags() & Buffer::Flags::File) and buf->is_modified()
and not (buf->flags() & Buffer::Flags::ReadOnly))
write_buffer_to_backup_file(*buf);
}
}
void BufferManager::clear_buffer_trash()
{
for (auto& buffer : m_buffer_trash)
{
// Do that again, to be tolerant in some corner cases, where a buffer is
// deleted during its creation
if (ClientManager::has_instance())
{
ClientManager::instance().ensure_no_client_uses_buffer(*buffer);
ClientManager::instance().clear_window_trash();
}
buffer.reset();
}
m_buffer_trash.clear();
}
}<|fim▁end|> | |
<|file_name|>X11_MenuDialog.py<|end_file_name|><|fim▁begin|>import sys,os,string
def GFX_MenuDialog(filename,*items):
file=open(filename,'w')
file.writelines(map(lambda x:x+"\n", items))
file.close()
os.system("python X11_MenuDialog.py "+filename);
if __name__=="__main__":
import qt,string
class WidgetView ( qt.QWidget ):
def __init__( self, *args ):<|fim▁hole|> self.grid = qt.QGridLayout( 0, 0 )
self.topLayout.addLayout( self.grid, 10 )
# Create a list box
self.lb = qt.QListBox( self, "listBox" )
file=open(sys.argv[1],'r')
self.dasitems=map(lambda x:string.rstrip(x),file.readlines())
file.close()
self.setCaption(self.dasitems.pop(0))
for item in self.dasitems:
self.lb.insertItem(item)
self.grid.addMultiCellWidget( self.lb, 0, 0, 0, 0 )
self.connect( self.lb, qt.SIGNAL("selected(int)"), self.listBoxItemSelected )
self.topLayout.activate()
def listBoxItemSelected( self, index ):
txt = qt.QString()
txt = "List box item %d selected" % index
print txt
file=open(sys.argv[1],'w')
file.write(self.dasitems[index])
file.close();
a.quit()
a = qt.QApplication( sys.argv )
w = WidgetView()
a.setMainWidget( w )
w.show()
a.exec_loop()<|fim▁end|> | apply( qt.QWidget.__init__, (self,) + args )
self.topLayout = qt.QVBoxLayout( self, 10 ) |
<|file_name|>icmp.rs<|end_file_name|><|fim▁begin|>use core::mem::size_of;
use core::option::Option;
use common::debug::*;
use common::vec::*;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct ICMPHeader {
pub _type: u8,
pub code: u8,
pub checksum: Checksum,
pub data: [u8; 4]
}
pub struct ICMP {
pub header: ICMPHeader,
pub data: Vec<u8>
}
impl FromBytes for ICMP {
fn from_bytes(bytes: Vec<u8>) -> Option<ICMP> {
if bytes.len() >= size_of::<ICMPHeader>() {
unsafe {
return Option::Some(ICMP {
header: *(bytes.as_ptr() as *const ICMPHeader),
data: bytes.sub(size_of::<ICMPHeader>(), bytes.len() - size_of::<ICMPHeader>())
});
}
}
return Option::None;
}
}
impl ToBytes for ICMP {
fn to_bytes(&self) -> Vec<u8> {
unsafe{
let header_ptr: *const ICMPHeader = &self.header;
let mut ret = Vec::from_raw_buf(header_ptr as *const u8, size_of::<ICMPHeader>());
ret.push_all(&self.data);
return ret;
}
}
}
impl ICMP {
pub fn d(&self){
d("ICMP ");
dbh(self.header._type);
d(" code ");
dbh(self.header.code);
d(" data ");<|fim▁hole|>}<|fim▁end|> | dd(self.data.len());
} |
<|file_name|>raw_to_wav.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy.io.wavfile import write
a = np.fromfile('/tmp/file.raw', dtype='int16')<|fim▁hole|><|fim▁end|> | write('/tmp/file.wav', 16000, a) |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
//! This crate provides aggregate functions for batch executors.
#![allow(incomplete_features)]
#![feature(proc_macro_hygiene)]
#![feature(specialization)]
#[macro_use(box_try)]
extern crate tikv_util;
#[macro_use(other_err)]
extern crate tidb_query_common;
mod impl_avg;
mod impl_bit_op;
mod impl_count;
mod impl_first;
mod impl_max_min;
mod impl_sum;
mod parser;
mod summable;
mod util;
pub use self::parser::{AggrDefinitionParser, AllAggrDefinitionParser};
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_datatype::expr::EvalContext;
/// A trait for all single parameter aggregate functions.
///
/// Unlike ordinary function, aggregate function calculates a summary value over multiple rows. To
/// save memory, this functionality is provided via an incremental update model:
///
/// 1. Each aggregate function associates a state structure, storing partially computed aggregate
/// results.
///
/// 2. The caller calls `update()` or `update_vector()` for each row to update the state.
///
/// 3. The caller finally calls `push_result()` to aggregate a summary value and push it into the
/// given data container.
///
/// This trait can be auto derived by using `tidb_query_codegen::AggrFunction`.
pub trait AggrFunction: std::fmt::Debug + Send + 'static {
/// The display name of the function.
fn name(&self) -> &'static str;
/// Creates a new state instance. Different states aggregate independently.
fn create_state(&self) -> Box<dyn AggrFunctionState>;
}
/// A trait for all single parameter aggregate function states.
///
/// Aggregate function states are created by corresponding aggregate functions. For each state,
/// it can be updated or aggregated (to finalize a result) independently.
///
/// Note that aggregate function states are strongly typed, that is, the caller must provide the
/// parameter in the correct data type for an aggregate function states that calculates over this
/// data type. To be safely boxed and placed in a vector, interfaces are provided in a form that
/// accept all kinds of data type. However, unmatched types will result in panics in runtime.
pub trait AggrFunctionState:
std::fmt::Debug
+ Send
+ 'static
+ AggrFunctionStateUpdatePartial<&'static Int>
+ AggrFunctionStateUpdatePartial<&'static Real>
+ AggrFunctionStateUpdatePartial<&'static Decimal>
+ AggrFunctionStateUpdatePartial<BytesRef<'static>>
+ AggrFunctionStateUpdatePartial<&'static DateTime>
+ AggrFunctionStateUpdatePartial<&'static Duration>
+ AggrFunctionStateUpdatePartial<JsonRef<'static>>
+ AggrFunctionStateUpdatePartial<EnumRef<'static>>
+ AggrFunctionStateUpdatePartial<SetRef<'static>>
{
// TODO: A better implementation is to specialize different push result targets. However
// current aggregation executor cannot utilize it.
fn push_result(&self, ctx: &mut EvalContext, target: &mut [VectorValue]) -> Result<()>;
}
/// A helper trait for single parameter aggregate function states that only work over concrete eval
/// types. This is the actual and only trait that normal aggregate function states will implement.
///
/// Unlike `AggrFunctionState`, this trait only provides specialized `update()` and `push_result()`
/// functions according to the associated type. `update()` and `push_result()` functions that accept
/// any eval types (but will panic when eval type does not match expectation) will be generated via
/// implementations over this trait.
pub trait ConcreteAggrFunctionState: std::fmt::Debug + Send + 'static {
type ParameterType: EvaluableRef<'static>;
/// # Safety
///
/// This function should be called with `update_concrete` macro.
unsafe fn update_concrete_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<Self::ParameterType>,
) -> Result<()>;
fn push_result(&self, ctx: &mut EvalContext, target: &mut [VectorValue]) -> Result<()>;
}
#[macro_export]
macro_rules! update_concrete {
( $state:expr, $ctx:expr, $value:expr ) => {
unsafe { $state.update_concrete_unsafe($ctx, $value.unsafe_into()) }
};
}
#[macro_export]
macro_rules! update_vector {
( $state:expr, $ctx:expr, $physical_values:expr, $logical_rows:expr ) => {
unsafe {
$state.update_vector_unsafe(
$ctx,
$physical_values.phantom_data().unsafe_into(),
$physical_values.unsafe_into(),
$logical_rows,
)
}
};
}
#[macro_export]
macro_rules! update_repeat {
( $state:expr, $ctx:expr, $value:expr, $repeat_times:expr ) => {
unsafe { $state.update_repeat_unsafe($ctx, $value.unsafe_into(), $repeat_times) }
};
}
#[macro_export]
macro_rules! update {
( $state:expr, $ctx:expr, $value:expr ) => {
unsafe { $state.update_unsafe($ctx, $value.unsafe_into()) }
};
}
#[macro_export]
macro_rules! impl_state_update_partial {
( $ty:tt ) => {
#[inline]
unsafe fn update_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<$ty>,
) -> Result<()> {
self.update(ctx, value)
}
#[inline]
unsafe fn update_repeat_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<$ty>,
repeat_times: usize,
) -> Result<()> {
self.update_repeat(ctx, value, repeat_times)
}
#[inline]
unsafe fn update_vector_unsafe(
&mut self,
ctx: &mut EvalContext,
phantom_data: Option<$ty>,
physical_values: $ty::ChunkedType,
logical_rows: &[usize],
) -> Result<()> {
self.update_vector(ctx, phantom_data, physical_values, logical_rows)
}
};
}
#[macro_export]
macro_rules! impl_concrete_state {
( $ty:ty ) => {
#[inline]
unsafe fn update_concrete_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<$ty>,
) -> Result<()> {
self.update_concrete(ctx, value)
}
};
}
#[macro_export]
macro_rules! impl_unmatched_function_state {
( $ty:ty ) => {
impl<T1, T> super::AggrFunctionStateUpdatePartial<T1> for $ty
where
T1: EvaluableRef<'static> + 'static,
T: EvaluableRef<'static> + 'static,
VectorValue: VectorValueExt<T::EvaluableType>,
{
#[inline]
default unsafe fn update_unsafe(
&mut self,
_ctx: &mut EvalContext,
_value: Option<T1>,
) -> Result<()> {
panic!("Unmatched parameter type")
}
#[inline]
default unsafe fn update_repeat_unsafe(
&mut self,
_ctx: &mut EvalContext,
_value: Option<T1>,
_repeat_times: usize,
) -> Result<()> {
panic!("Unmatched parameter type")
}
#[inline]
default unsafe fn update_vector_unsafe(
&mut self,
_ctx: &mut EvalContext,
_phantom_data: Option<T1>,
_physical_values: T1::ChunkedType,
_logical_rows: &[usize],
) -> Result<()> {
panic!("Unmatched parameter type")
}
}
};
}
/// A helper trait that provides `update()` and `update_vector()` over a concrete type, which will
/// be relied in `AggrFunctionState`.
pub trait AggrFunctionStateUpdatePartial<TT: EvaluableRef<'static>> {
/// Updates the internal state giving one row data.
///
/// # Panics
///
/// Panics if the aggregate function does not support the supplied concrete data type as its
/// parameter.
///
/// # Safety
///
/// This function should be called with `update` macro.
unsafe fn update_unsafe(&mut self, ctx: &mut EvalContext, value: Option<TT>) -> Result<()>;
/// Repeatedly updates the internal state giving one row data.
///
/// # Panics
///
/// Panics if the aggregate function does not support the supplied concrete data type as its
/// parameter.
///
/// # Safety
///
/// This function should be called with `update_repeat_unsafe` macro.
unsafe fn update_repeat_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<TT>,
repeat_times: usize,
) -> Result<()>;
/// Updates the internal state giving multiple rows data.
///
/// # Panics
///
/// Panics if the aggregate function does not support the supplied concrete data type as its
/// parameter.
///
/// # Safety
///
/// This function should be called with `update_vector` macro.
unsafe fn update_vector_unsafe(
&mut self,<|fim▁hole|> phantom_data: Option<TT>,
physical_values: TT::ChunkedType,
logical_rows: &[usize],
) -> Result<()>;
}
impl<T: EvaluableRef<'static>, State> AggrFunctionStateUpdatePartial<T> for State
where
State: ConcreteAggrFunctionState,
{
// All `ConcreteAggrFunctionState` implement `AggrFunctionStateUpdatePartial<T>`, which is
// one of the trait bound that `AggrFunctionState` requires.
#[inline]
default unsafe fn update_unsafe(
&mut self,
_ctx: &mut EvalContext,
_value: Option<T>,
) -> Result<()> {
panic!("Unmatched parameter type")
}
#[inline]
default unsafe fn update_repeat_unsafe(
&mut self,
_ctx: &mut EvalContext,
_value: Option<T>,
_repeat_times: usize,
) -> Result<()> {
panic!("Unmatched parameter type")
}
#[inline]
default unsafe fn update_vector_unsafe(
&mut self,
_ctx: &mut EvalContext,
_phantom_data: Option<T>,
_physical_values: T::ChunkedType,
_logical_rows: &[usize],
) -> Result<()> {
panic!("Unmatched parameter type")
}
}
impl<T: EvaluableRef<'static>, State> AggrFunctionStateUpdatePartial<T> for State
where
State: ConcreteAggrFunctionState<ParameterType = T>,
{
#[inline]
unsafe fn update_unsafe(&mut self, ctx: &mut EvalContext, value: Option<T>) -> Result<()> {
self.update_concrete_unsafe(ctx, value)
}
#[inline]
unsafe fn update_repeat_unsafe(
&mut self,
ctx: &mut EvalContext,
value: Option<T>,
repeat_times: usize,
) -> Result<()> {
for _ in 0..repeat_times {
self.update_concrete_unsafe(ctx, value.clone())?;
}
Ok(())
}
#[inline]
unsafe fn update_vector_unsafe(
&mut self,
ctx: &mut EvalContext,
_phantom_data: Option<T>,
physical_values: T::ChunkedType,
logical_rows: &[usize],
) -> Result<()> {
for physical_index in logical_rows {
self.update_concrete_unsafe(ctx, physical_values.get_option_ref(*physical_index))?;
}
Ok(())
}
}
impl<F> AggrFunctionState for F
where
F: ConcreteAggrFunctionState,
{
fn push_result(&self, ctx: &mut EvalContext, target: &mut [VectorValue]) -> Result<()> {
<Self as ConcreteAggrFunctionState>::push_result(self, ctx, target)
}
}
#[cfg(test)]
mod tests {
use super::*;
use tidb_query_datatype::EvalType;
#[test]
fn test_type_match() {
/// A state that accepts Int and outputs Real.
#[derive(Clone, Debug)]
struct AggrFnStateFoo {
sum: i64,
}
impl AggrFnStateFoo {
fn new() -> Self {
Self { sum: 0 }
}
}
impl ConcreteAggrFunctionState for AggrFnStateFoo {
type ParameterType = &'static Int;
unsafe fn update_concrete_unsafe(
&mut self,
_ctx: &mut EvalContext,
value: Option<&'static Int>,
) -> Result<()> {
if let Some(v) = value {
self.sum += *v;
}
Ok(())
}
fn push_result(
&self,
_ctx: &mut EvalContext,
target: &mut [VectorValue],
) -> Result<()> {
target[0].push_real(Real::new(self.sum as f64).ok());
Ok(())
}
}
let mut ctx = EvalContext::default();
let mut s = AggrFnStateFoo::new();
// Update using `Int` should success.
assert!(update!(
&mut s as &mut dyn AggrFunctionStateUpdatePartial<_>,
&mut ctx,
Some(&1)
)
.is_ok());
assert!(update!(
&mut s as &mut dyn AggrFunctionStateUpdatePartial<_>,
&mut ctx,
Some(&3)
)
.is_ok());
// Update using other data type should panic.
let result = panic_hook::recover_safe(|| {
let mut s = s.clone();
let _ = update!(
&mut s as &mut dyn AggrFunctionStateUpdatePartial<_>,
&mut ctx,
Real::new(1.0).ok().as_ref()
);
});
assert!(result.is_err());
let result = panic_hook::recover_safe(|| {
let mut s = s.clone();
let _ = update!(
&mut s as &mut dyn AggrFunctionStateUpdatePartial<_>,
&mut ctx,
Some(&[1u8] as BytesRef)
);
});
assert!(result.is_err());
// Push result to Real VectorValue should success.
let mut target = vec![VectorValue::with_capacity(0, EvalType::Real)];
assert!((&mut s as &mut dyn AggrFunctionState)
.push_result(&mut ctx, &mut target)
.is_ok());
assert_eq!(target[0].to_real_vec(), &[Real::new(4.0).ok()]);
// Calling push result multiple times should also success.
assert!(update!(
&mut s as &mut dyn AggrFunctionStateUpdatePartial<_>,
&mut ctx,
Some(&1)
)
.is_ok());
assert!((&mut s as &mut dyn AggrFunctionState)
.push_result(&mut ctx, &mut target)
.is_ok());
assert_eq!(
target[0].to_real_vec(),
&[Real::new(4.0).ok(), Real::new(5.0).ok()]
);
// Push result into other VectorValue should panic.
let result = panic_hook::recover_safe(|| {
let mut s = s.clone();
let mut target: Vec<VectorValue> = Vec::new();
let _ = (&mut s as &mut dyn AggrFunctionState).push_result(&mut ctx, &mut target[..]);
});
assert!(result.is_err());
let result = panic_hook::recover_safe(|| {
let mut s = s.clone();
let mut target: Vec<VectorValue> = vec![VectorValue::with_capacity(0, EvalType::Int)];
let _ = (&mut s as &mut dyn AggrFunctionState).push_result(&mut ctx, &mut target[..]);
});
assert!(result.is_err());
}
}<|fim▁end|> | ctx: &mut EvalContext, |
<|file_name|>audiofiles.cpp<|end_file_name|><|fim▁begin|>/* Copyright � 2007 Apple Inc. All Rights Reserved.
Disclaimer: IMPORTANT: This Apple software is supplied to you by
Apple Inc. ("Apple") in consideration of your agreement to the
following terms, and your use, installation, modification or
redistribution of this Apple software constitutes acceptance of these
terms. If you do not agree with these terms, please do not use,
install, modify or redistribute this Apple software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, Apple grants you a personal, non-exclusive
license, under Apple's copyrights in this original Apple software (the
"Apple Software"), to use, reproduce, modify and redistribute the Apple
Software, with or without modifications, in source and/or binary forms;
provided that if you redistribute the Apple Software in its entirety and
without modifications, you must retain this notice and the following
text and disclaimers in all such redistributions of the Apple Software.
Neither the name, trademarks, service marks or logos of Apple Inc.
may be used to endorse or promote products derived from the Apple
Software without specific prior written permission from Apple. Except
as expressly stated in this notice, no other rights or licenses, express
or implied, are granted by Apple herein, including but not limited to
any patent rights that may be infringed by your derivative works or by
other works in which the Apple Software may be incorporated.
The Apple Software is provided by Apple on an "AS IS" basis. APPLE
MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "audiofiles.h"
#include "CAChannelLayouts.h"
#include <sys/stat.h>
#include <algorithm>
#include "CAXException.h"
#include "CAFilePathUtils.h"
#if !defined(__COREAUDIO_USE_FLAT_INCLUDES__)
#include <AudioUnit/AudioCodec.h>
#else
#include <AudioCodec.h>
#endif
nynex::FileReaderWriter::ConversionParameters::ConversionParameters() :
flags(0)
{
memset(this, 0, sizeof(nynex::FileReaderWriter::ConversionParameters));
output.channels = -1;
output.bitRate = -1;
output.codecQuality = -1;
output.srcQuality = -1;
output.srcComplexity = 0;
output.strategy = -1;
output.primeMethod = -1;
output.creationFlags = 0;
}
nynex::FileReaderWriter::FileReaderWriter() :
mOpenedSourceFile(false),
mCreatedOutputFile(false),
mEncoding(false),
mDecoding(false)
{
mOutName[0] = '\0';
}
nynex::FileReaderWriter::~FileReaderWriter()
{
}
void nynex::FileReaderWriter::GenerateOutputFileName(const char *inputFilePath,
const CAStreamBasicDescription &inputFormat,
const CAStreamBasicDescription &outputFormat,
OSType outputFileType,
char *outName)
{
struct stat sb;
char inputDir[256];
char inputBasename[256];
strcpy(inputDir, dirname((char*)inputFilePath));
const char *infname = basename((char*)inputFilePath);
const char *inext = strrchr(infname, '.');
if (inext == NULL) strcpy(inputBasename, infname);
else {
int n;
memcpy(inputBasename, infname, n = inext - infname);
inputBasename[n] = '\0';
}
CFArrayRef exts;
UInt32 propSize = sizeof(exts);
XThrowIfError(AudioFileGetGlobalInfo(kAudioFileGlobalInfo_ExtensionsForType,
sizeof(OSType), &outputFileType, &propSize, &exts), "generate output file name");
char outputExt[32];
CFStringRef cfext = (CFStringRef)CFArrayGetValueAtIndex(exts, 0);
CFStringGetCString(cfext, outputExt, sizeof(outputExt), kCFStringEncodingUTF8);
CFRelease(exts);
// 1. olddir + oldname + newext
sprintf(outName, "%s/%s.%s", inputDir, inputBasename, outputExt);
#if TARGET_OS_MAC
if (lstat(outName, &sb)) return;
#else
if (stat(outName, &sb)) return;
#endif
if (outputFormat.IsPCM()) {
// If sample rate changed:
// 2. olddir + oldname + "-SR" + newext
if (inputFormat.mSampleRate != outputFormat.mSampleRate && outputFormat.mSampleRate != 0.) {
sprintf(outName, "%s/%s-%.0fk.%s", inputDir, inputBasename, outputFormat.mSampleRate/1000., outputExt);
#if TARGET_OS_MAC
if (lstat(outName, &sb)) return;
#else
if (stat(outName, &sb)) return;
#endif
}
// If bit depth changed:
// 3. olddir + oldname + "-bit" + newext
if (inputFormat.mBitsPerChannel != outputFormat.mBitsPerChannel) {
sprintf(outName, "%s/%s-%ldbit.%s", inputDir, inputBasename, outputFormat.mBitsPerChannel, outputExt);
#if TARGET_OS_MAC
if (lstat(outName, &sb)) return;
#else
if (stat(outName, &sb)) return;
#endif
}
}
// maybe more with channels/layouts? $$$
// now just append digits
for (int i = 1; ; ++i) {
sprintf(outName, "%s/%s-%d.%s", inputDir, inputBasename, i, outputExt);
#if TARGET_OS_MAC
if (lstat(outName, &sb)) return;
#else
if (stat(outName, &sb)) return;
#endif
}
}
void nynex::FileReaderWriter::OpenInputFile()
{
AudioFileID fileid = mParams.input.audioFileID;
if (mParams.input.trackIndex != 0) {
if (fileid == 0) {
CACFURL url = CFURLCreateFromFileSystemRepresentation(NULL, (const Byte *)mParams.input.filePath, strlen(mParams.input.filePath), false);
XThrowIfError(AudioFileOpenURL(url.GetCFObject(), fsRdPerm, 0, &fileid), "Couldn't open input file");
}
XThrowIfError(AudioFileSetProperty(fileid, 'uatk' /*kAudioFilePropertyUseAudioTrack*/, sizeof(mParams.input.trackIndex), &mParams.input.trackIndex), "Couldn't set input file's track index");
}
if (fileid)
mSrcFile.WrapAudioFileID(fileid, false);
else
mSrcFile.Open(mParams.input.filePath);
}
void nynex::FileReaderWriter::OpenOutputFile(const CAStreamBasicDescription &destFormat, CAAudioChannelLayout &destFileLayout)
{
const ConversionParameters ¶ms = mParams;
// output file - need to get this from somewhere else
strcpy(mOutName, params.output.filePath);
// deal with pre-existing output file
struct stat st;
if (stat(mOutName, &st) == 0) {
XThrowIf(!(params.flags & kOpt_OverwriteOutputFile), 1, "overwrite output file");
// not allowed to overwrite
// output file exists - delete it
XThrowIfError(unlink(mOutName), "delete output file");
}
// create the output file
mDestFile.Create(mOutName, params.output.fileType, destFormat, &destFileLayout.Layout(), params.output.creationFlags);
}
void nynex::FileReaderWriter::Input(const char * filename) {
CloseInputFile();
mParams.input.filePath = filename; // object never owns this memory. don't free or malloc
PrepareConverters();
}
void nynex::FileReaderWriter::CloseInputFile() {
try { mSrcFile.Close(); } catch (...) { }
mOpenedSourceFile = false;
}
void nynex::FileReaderWriter::Output(const char * filename) {
CloseOutputFile();
XThrowIf(!mReady, -1, "Output: not prepared");
OpenOutputFile(mDestFormat, mDstFileLayout);
mCreatedOutputFile = true;
mDestFile.SetClientFormat(mDestClientFormat);
mDestFile.SetClientChannelLayout(mDstClientLayout); // must succeed
if (!mDestFormat.IsPCM()) {
// set the codec quality
if (mParams.output.codecQuality != -1) {
if (mParams.flags & kOpt_Verbose)
printf("codec quality = %ld\n", mParams.output.codecQuality);
mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
}
// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
if (mParams.output.strategy != -1) {
if (mParams.flags & kOpt_Verbose)
printf("strategy = %ld\n", mParams.output.strategy);
mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
}
// set any user defined properties
UInt32 i = 0;
while (mParams.output.userProperty[i].propertyID != 0 && i < kMaxUserProps)
{
if (mParams.flags & kOpt_Verbose)
printf("user property '%4.4s' = %ld\n", (char *)(&mParams.output.userProperty[i].propertyID), mParams.output.userProperty[i].propertyValue);
mDestFile.SetConverterProperty(mParams.output.userProperty[i].propertyID, sizeof(SInt32), &mParams.output.userProperty[i].propertyValue);
++i;
}
// set the bitrate
if (mParams.output.bitRate != -1) {
if (mParams.flags & kOpt_Verbose)
printf("bitrate = %ld\n", mParams.output.bitRate);
mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
}
if (mParams.output.srcComplexity) {
mDestFile.SetConverterProperty('srca'/*kAudioConverterSampleRateConverterComplexity*/, sizeof(UInt32), &mParams.output.srcComplexity, true);
}
// set the SRC quality
if (mParams.output.srcQuality != -1) {
if (mParams.flags & kOpt_Verbose)
printf("SRC quality = %ld\n", mParams.output.srcQuality);
mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
}
}
}
void nynex::FileReaderWriter::CloseOutputFile() {
try { mDestFile.Close(); } catch (...) { }
mCreatedOutputFile = false;
}
void nynex::FileReaderWriter::Prepare(const nynex::FileReaderWriter::ConversionParameters &_params) {
CloseFiles();
mReady = false;
mParams = _params;
PrepareConverters();
mReady = true;
}
void nynex::FileReaderWriter::PrepareConverters()
{
try {
if (TaggedDecodingFromCAF())
ReadCAFInfo();
OpenInputFile();
mOpenedSourceFile = true;
// get input file's format and channel layout
const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
mSrcFileLayout = mSrcFile.GetFileChannelLayout();
if (mParams.flags & kOpt_Verbose) {
printf("Input file: %s, %qd frames", mParams.input.filePath ? basename((char*)mParams.input.filePath) : "?",
mSrcFile.GetNumberFrames());
if (mSrcFileLayout.IsValid()) {
printf(", %s", CAChannelLayouts::ConstantToString(mSrcFileLayout.Tag()));
}
printf("\n");
}
mSrcFormat = srcFormat;
// prepare output file's format
mDestFormat = mParams.output.dataFormat;
bool encoding = !mDestFormat.IsPCM();
bool decoding = !srcFormat.IsPCM();
// Don't want to allow this
XThrowIf((!encoding && mDestFormat.mSampleRate == 0.), -1, "destination sample rate not defined");
// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
if (mParams.input.channelLayoutTag != 0) {
XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
mSrcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
mSrcFile.SetFileChannelLayout(mSrcFileLayout); // happens
}
// destination channel layout
int outChannels = mParams.output.channels;
if (mParams.output.channelLayoutTag != 0) {
// use the one specified by caller, if any
mDstFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
} /*else if (srcFileLayout.IsValid()) { // shouldn't allow this
// otherwise, assume the same as the source, if any
destFileLayout = srcFileLayout;
destLayoutDefaulted = true;
}*/
if (mDstFileLayout.IsValid()) {
// the output channel layout specifies the number of output channels
if (outChannels != -1)
XThrowIf((unsigned)outChannels != mDstFileLayout.NumberChannels(), -1,
"output channel layout has wrong number of channels");
else
outChannels = mDstFileLayout.NumberChannels();
}
/* this is totally wrong if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
// adjust the output format's channels; output.channels overrides the channels
// can't rely on source because we have so many possible files
XThrowIf( (outChannels == -1) , -1, "output channels never specified");
if (outChannels > 0) {
mDestFormat.mChannelsPerFrame = outChannels;
mDestFormat.mBytesPerPacket *= outChannels;
mDestFormat.mBytesPerFrame *= outChannels;
}
} */
CAStreamBasicDescription srcClientFormat, destClientFormat;
if (encoding) {
// encoding
mSrcClientFormat = mSrcFormat;
// if we're removing channels, do it on the source file
// (solves problems with mono-only codecs like AMR and is more efficient)
// this bit belongs in Input() only
// destformat needs to be specified in input parameters
if (mSrcFormat.mChannelsPerFrame > mDestFormat.mChannelsPerFrame) {
mSrcClientFormat.ChangeNumberChannels(mDestFormat.mChannelsPerFrame, true);
mSrcFile.SetClientFormat(mSrcClientFormat, NULL);
}
mDestClientFormat = mSrcClientFormat;
// by here, destClientFormat will have a valid sample rate
mDstClientLayout = mSrcFileLayout.IsValid() ? mSrcFileLayout : mDstFileLayout;
} else {
// decoding or PCM->PCM
XThrowIf((mDestFormat.mSampleRate == 0.), -1, "No sample rate defined for output");
mDestClientFormat = mDestFormat;
mSrcClientFormat = mDestFormat;
mSrcClientLayout = mDstFileLayout;
mSrcFile.SetClientFormat(mSrcClientFormat, &mSrcClientLayout);
}
XThrowIf(mSrcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM");
XThrowIf(mDestClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM");
// separate, must happen on every new input file
if (mSrcFormat.mSampleRate != 0. && mDestFormat.mSampleRate != 0. && mSrcFormat.mSampleRate != mDestFormat.mSampleRate)
{
// set the SRC quality
if (mParams.output.srcQuality != -1) {
if (mParams.flags & kOpt_Verbose)
printf("SRC quality = %ld\n", mParams.output.srcQuality);
if (!encoding)
mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
}
// set the SRC complexity
if (mParams.output.srcComplexity) {
if (mParams.flags & kOpt_Verbose)
printf("SRC complexity = '%4.4s'\n", (char *)(&mParams.output.srcComplexity));
if (!encoding)
mSrcFile.SetConverterProperty('srca'/*kAudioConverterSampleRateConverterComplexity*/, sizeof(UInt32), &mParams.output.srcComplexity, true);
}
}
if (decoding) {
if (mParams.output.primeMethod != -1)
mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
// set any user defined properties
UInt32 i = 0;
while (mParams.output.userProperty[i].propertyID != 0 && i < kMaxUserProps)
{
if (mParams.flags & kOpt_Verbose)
printf("user property '%4.4s' = %ld\n", (char *)(&mParams.output.userProperty[i].propertyID), mParams.output.userProperty[i].propertyValue);
mSrcFile.SetConverterProperty(mParams.output.userProperty[i].propertyID, sizeof(SInt32), &mParams.output.userProperty[i].propertyValue);
++i;
}
}
if (mParams.output.creationFlags & kAudioFileFlags_DontPageAlignAudioData) {
if (mParams.flags & kOpt_Verbose) {
printf("no filler chunks\n");
}
}
} catch (...) {
mReady = false;
try { mSrcFile.Close(); } catch (...) { } // close in Output() before doing anything else
try { mDestFile.Close(); } catch (...) { } // close in Output() before doing anything else
if (mCreatedOutputFile)
unlink(mOutName); // don't do this ever
throw;
}
}
nynex::FileReaderWriter::ReadBuffer * nynex::FileReaderWriter::GetNextReadBuffer() throw (CAXException) {
// prepare I/O buffers
UInt32 bytesToRead = 0x10000;
UInt32 framesToRead = bytesToRead; // OK, ReadPackets will limit as appropriate
ReadBuffer * out = new ReadBuffer();
out->readBuffer = CABufferList::New("readbuf", mSrcClientFormat);
out->readBuffer->AllocateBuffers(bytesToRead);
out->readPtrs = CABufferList::New("readptrs", mSrcClientFormat);
out->nFrames = framesToRead;
out->readPtrs->SetFrom(out->readBuffer);
mSrcFile.Read(out->nFrames, &out->readPtrs->GetModifiableBufferList());
return out;
}
void nynex::FileReaderWriter::WriteFromBuffer(nynex::FileReaderWriter::ReadBuffer * in) throw (CAXException) {
XThrowIf(in == NULL, -1, "NULL passed into WriteFromBuffer");
mDestFile.Write(in->nFrames, &in->readPtrs->GetModifiableBufferList());
}
#define kMaxFilename 64
struct CAFSourceInfo {
// our private user data chunk -- careful about compiler laying this out!
// big endian
char asbd[40];
UInt32 filetype;
char filename[kMaxFilename];
};
static void ASBD_NtoB(const AudioStreamBasicDescription *infmt, AudioStreamBasicDescription *outfmt)
{
*(UInt64 *)&outfmt->mSampleRate = EndianU64_NtoB(*(UInt64 *)&infmt->mSampleRate);<|fim▁hole|> outfmt->mFormatID = EndianU32_NtoB(infmt->mFormatID);
outfmt->mFormatFlags = EndianU32_NtoB(infmt->mFormatFlags);
outfmt->mBytesPerPacket = EndianU32_NtoB(infmt->mBytesPerPacket);
outfmt->mFramesPerPacket = EndianU32_NtoB(infmt->mFramesPerPacket);
outfmt->mBytesPerFrame = EndianU32_NtoB(infmt->mBytesPerFrame);
outfmt->mChannelsPerFrame = EndianU32_NtoB(infmt->mChannelsPerFrame);
outfmt->mBitsPerChannel = EndianU32_NtoB(infmt->mBitsPerChannel);
}
static void ASBD_BtoN(const AudioStreamBasicDescription *infmt, AudioStreamBasicDescription *outfmt)
{
*(UInt64 *)&outfmt->mSampleRate = EndianU64_BtoN(*(UInt64 *)&infmt->mSampleRate);
outfmt->mFormatID = EndianU32_BtoN(infmt->mFormatID);
outfmt->mFormatFlags = EndianU32_BtoN(infmt->mFormatFlags);
outfmt->mBytesPerPacket = EndianU32_BtoN(infmt->mBytesPerPacket);
outfmt->mFramesPerPacket = EndianU32_BtoN(infmt->mFramesPerPacket);
outfmt->mBytesPerFrame = EndianU32_BtoN(infmt->mBytesPerFrame);
outfmt->mChannelsPerFrame = EndianU32_BtoN(infmt->mChannelsPerFrame);
outfmt->mBitsPerChannel = EndianU32_BtoN(infmt->mBitsPerChannel);
}
void nynex::FileReaderWriter::WriteCAFInfo()
{
AudioFileID afid = 0;
CAFSourceInfo info;
UInt32 size;
try {
CACFURL url = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8 *)mParams.input.filePath, strlen(mParams.input.filePath), false);
XThrowIf(url.GetCFObject() == NULL, -1, "couldn't locate input file");
XThrowIfError(AudioFileOpenURL(url.GetCFObject(), fsRdPerm, 0, &afid), "couldn't open input file");
size = sizeof(AudioFileTypeID);
XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
AudioFileClose(afid);
afid = 0;
url = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8 *)mOutName, strlen(mOutName), false);
XThrowIf(url.GetCFObject() == NULL, -1, "couldn't locate output file");
XThrowIfError(AudioFileOpenURL(url.GetCFObject(), fsRdWrPerm, 0, &afid), "couldn't open output file");
const char *srcFilename = strrchr(mParams.input.filePath, '/');
if (srcFilename++ == NULL) srcFilename = mParams.input.filePath;
ASBD_NtoB(&mSrcFormat, (AudioStreamBasicDescription *)info.asbd);
int namelen = std::min(kMaxFilename-1, (int)strlen(srcFilename));
memcpy(info.filename, srcFilename, namelen);
info.filename[namelen++] = 0;
info.filetype = EndianU32_NtoB(info.filetype);
XThrowIfError(AudioFileSetUserData(afid, 'srcI', 0, offsetof(CAFSourceInfo, filename) + namelen, &info), "couldn't set CAF file's source info chunk");
AudioFileClose(afid);
}
catch (...) {
if (afid)
AudioFileClose(afid);
throw;
}
}
void nynex::FileReaderWriter::ReadCAFInfo()
{
AudioFileID afid = 0;
CAFSourceInfo info;
UInt32 size;
OSStatus err;
try {
CACFURL url = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8 *)mParams.input.filePath, strlen(mParams.input.filePath), false);
XThrowIf(!url.IsValid(), -1, "couldn't locate input file");
XThrowIfError(AudioFileOpenURL(url.GetCFObject(), fsRdPerm, 0, &afid), "couldn't open input file");
size = sizeof(AudioFileTypeID);
XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
if (info.filetype == kAudioFileCAFType) {
size = sizeof(info);
err = AudioFileGetUserData(afid, 'srcI', 0, &size, &info);
if (!err) {
// restore the following from the original file info:
// filetype
// data format
// filename
AudioStreamBasicDescription destfmt;
ASBD_BtoN((AudioStreamBasicDescription *)info.asbd, &destfmt);
mParams.output.dataFormat = destfmt;
mParams.output.fileType = EndianU32_BtoN(info.filetype);
if (mParams.output.filePath == NULL) {
int len = strlen(mParams.input.filePath) + strlen(info.filename) + 2;
char *newname = (char *)malloc(len); // $$$ leaked
const char *dir = dirname((char*)mParams.input.filePath);
if (dir && (dir[0] !='.' && dir[1] != '/'))
sprintf(newname, "%s/%s", dir, info.filename);
else
strcpy(newname, info.filename);
mParams.output.filePath = newname;
mParams.flags = (mParams.flags & ~kOpt_OverwriteOutputFile) | kOpt_NoSanitizeOutputFormat;
}
}
}
AudioFileClose(afid);
}
catch (...) {
if (afid)
AudioFileClose(afid);
throw;
}
}<|fim▁end|> | |
<|file_name|>single_thread_download.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# single_thread_download.py
import os
import urllib.request
import urllib.error<|fim▁hole|>import shutil
# 单线程
def single_thread_download(url, file_name=None, overwrite=False):
# 如果文件名为空,则从 URL 中获取文件名
if file_name is None:
file_name = url.rpartition('/')[-1]
# 潜在 bug:如果不覆盖己有文件,而已有文件不完整(eg. 没下载全),会有潜在影响
if os.path.exists(file_name) and (not overwrite):
return
try:
with urllib.request.urlopen(url) as response, open(file_name, 'wb') as out_stream:
shutil.copyfileobj(response, out_stream)
except urllib.error.URLError as e:
print(e.errno, '\n', e.reason, '\n')
# single_thread_download("http://screencasts.b0.upaiyun.com/podcasts/nil_podcast_1.m4a")<|fim▁end|> | |
<|file_name|>zz_generated_operations_client.go<|end_file_name|><|fim▁begin|>//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armmariadb
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"net/http"
)
// OperationsClient contains the methods for the Operations group.
// Don't use this type directly, use NewOperationsClient() instead.
type OperationsClient struct {
host string<|fim▁hole|>}
// NewOperationsClient creates a new instance of OperationsClient with the specified values.
// credential - used to authorize requests. Usually a credential from azidentity.
// options - pass nil to accept the default values.
func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientOptions) *OperationsClient {
cp := arm.ClientOptions{}
if options != nil {
cp = *options
}
if len(cp.Endpoint) == 0 {
cp.Endpoint = arm.AzurePublicCloud
}
client := &OperationsClient{
host: string(cp.Endpoint),
pl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),
}
return client
}
// List - Lists all of the available REST API operations.
// If the operation fails it returns an *azcore.ResponseError type.
// options - OperationsClientListOptions contains the optional parameters for the OperationsClient.List method.
func (client *OperationsClient) List(ctx context.Context, options *OperationsClientListOptions) (OperationsClientListResponse, error) {
req, err := client.listCreateRequest(ctx, options)
if err != nil {
return OperationsClientListResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return OperationsClientListResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return OperationsClientListResponse{}, runtime.NewResponseError(resp)
}
return client.listHandleResponse(resp)
}
// listCreateRequest creates the List request.
func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsClientListOptions) (*policy.Request, error) {
urlPath := "/providers/Microsoft.DBforMariaDB/operations"
req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2018-06-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("Accept", "application/json")
return req, nil
}
// listHandleResponse handles the List response.
func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsClientListResponse, error) {
result := OperationsClientListResponse{RawResponse: resp}
if err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {
return OperationsClientListResponse{}, err
}
return result, nil
}<|fim▁end|> | pl runtime.Pipeline |
<|file_name|>NodeSerializationCodes.py<|end_file_name|><|fim▁begin|>from Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,<|fim▁hole|> 'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'StringInterpolationExpr': 64,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
'FunctionCallArgument': 96,
'TupleElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
'FunctionCallArgumentList': 164,
'TupleElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringInterpolationSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]<|fim▁end|> | |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, blank=True)),<|fim▁hole|> ('members', models.ManyToManyField(related_name='member', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(related_name='owner', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]<|fim▁end|> | ('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)), |
<|file_name|>socket.js<|end_file_name|><|fim▁begin|>'use strict';
var phonetic = require('phonetic');
var socketio = require('socket.io');
var _ = require('underscore');
var load = function(http) {
var io = socketio(http);
var ioNamespace = '/';
var getEmptyRoomId = function() {
var roomId = null;
do {
roomId = phonetic.generate().toLowerCase()
} while (io.nsps[ioNamespace].adapter.rooms[roomId]);
return roomId;
};
var sendRoomInfo = function(socket, info) {
if (!info.roomId) {
return;
}
var clients = io.nsps[ioNamespace].adapter.rooms[info.roomId];
io.sockets.in(info.roomId).emit('room.info', {
id: info.roomId,<|fim▁hole|>
var onJoin = function(socket, info, data) {
if (info.roomId) {
return;
}
info.roomId = data && data.roomId ? data.roomId : null;
if (!info.roomId || !io.nsps[ioNamespace].adapter.rooms[data.roomId]) {
info.roomId = getEmptyRoomId(socket);
console.log('[Socket] Assigning room id ' + info.roomId + ' to ip ' + socket.handshake.address);
} else {
console.log('[Socket] Assigning room id ' + info.roomId + ' to ip ' + socket.handshake.address + ' (from client)');
}
socket.join(info.roomId);
socket.emit('join', {
roomId: info.roomId
});
sendRoomInfo(socket, info);
};
var onEvent = function(socket, info, event, data) {
if (!info.roomId) {
return;
}
socket.broadcast.to(info.roomId).emit(event, data);
};
var onChunk = function(socket, info, data) {
socket.emit('file.ack', {
guid: data.guid
});
onEvent(socket, info, 'file.chunk', data);
};
var onConnection = function(socket) {
console.log('[Socket] New connection from ip ' + socket.handshake.address);
var info = {
roomId: null
};
socket.on('disconnect', function() {
console.log('[Socket] Connection from ip ' + socket.handshake.address + ' disconnected');
sendRoomInfo(socket, info);
});
socket.on('join', _.partial(onJoin, socket, info));
socket.on('file.start', _.partial(onEvent, socket, info, 'file.start'));
socket.on('file.chunk', _.partial(onChunk, socket, info));
}
io.on('connection', onConnection);
};
module.exports = {
load: load
};<|fim▁end|> | count: clients ? Object.keys(clients).length : 0
});
}; |
<|file_name|>Pool.ts<|end_file_name|><|fim▁begin|>import { assert, tostring, wipe, LuaArray } from "@wowts/lua";
import { insert, remove } from "@wowts/table";
import { format } from "@wowts/string";
export class OvalePool<T> {
pool:LuaArray<T> = {};
size = 0;
unused = 0;
name: string;
constructor(name: string) {
this.name = name || "OvalePool";
}
Get() {
// OvalePool.StartProfiling(this.name);
assert(this.pool);
let item = remove(this.pool);
if (item) {
this.unused = this.unused - 1;
} else {
this.size = this.size + 1;
item = <T>{}
}
// OvalePool.StopProfiling(this.name);
return item;
}
Release(item:T):void {
// OvalePool.StartProfiling(this.name);
assert(this.pool);
this.Clean(item);
wipe(item);
insert(this.pool, item);
this.unused = this.unused + 1;
// OvalePool.StopProfiling(this.name);
}
Drain():void {
//OvalePool.StartProfiling(this.name);
this.pool = {}
this.size = this.size - this.unused;
this.unused = 0;
//OvalePool.StopProfiling(this.name);<|fim▁hole|> DebuggingInfo():string {
return format("Pool %s has size %d with %d item(s).", tostring(this.name), this.size, this.unused);
}
Clean(item: T): void {
}
}<|fim▁end|> | } |
<|file_name|>simple.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import math
def area(a, b, c):
A1 = ((4*math.pi)*a**2)
A2 = ((4*math.pi)*b**2)
A3 = ((4*math.pi)*c**2)
Avg = (A1+A2+A3)/3
return Avg
def output(a, b ,c , d, e):
return """
Hello there, {}!
equation: ((4*math.pi)*{}**2)((4*math.pi)*{}**2)((4*math.pi)*{}**2)
Calculating average area of three spheres...
the answer is: {}
""".format(a, b, c, d, e)
def main():
Name = raw_input("Name: ")
Area1 = raw_input("Area of 1st sphere: ")
Area2 = raw_input("Area of 2nd sphere: ")
Area3 = raw_input("Area of 3rd sphere: ")
e = area(int(Area1), int(Area2), int(Area3))
print output(Name, Area1, Area2, Area3, e)
main()<|fim▁end|> | |
<|file_name|>Html5Exporter.ts<|end_file_name|><|fim▁begin|>import * as fs from 'fs-extra';
import * as path from 'path';
import {KhaExporter} from './KhaExporter';
import {convert} from '../Converter';
import {executeHaxe} from '../Haxe';
import {Options} from '../Options';
import {exportImage} from '../ImageTool';
import {Library} from '../Project';
import {VrApi} from '../VrApi';
export class Html5Exporter extends KhaExporter {
width: number;
height: number;
constructor(options: Options) {
super(options);
}
backend(): string {
return 'HTML5';
}
isDebugHtml5() {
return this.sysdir() === 'debug-html5';
}
isNode() {
return this.sysdir() === 'node';
}
isHtml5Worker() {
return this.sysdir() === 'html5worker';
}
haxeOptions(name: string, targetOptions: any, defines: Array<string>) {
defines.push('sys_g1');
defines.push('sys_g2');
defines.push('sys_g3');
defines.push('sys_a1');
defines.push('sys_a2');
defines.push('kha_js');
defines.push('kha_g1');
defines.push('kha_g2');
defines.push('kha_g3');
defines.push('kha_a1');
defines.push('kha_a2');
if (targetOptions.html5.noKeyboard) {
defines.push('kha_no_keyboard');<|fim▁hole|> if (this.options.vr === VrApi.WebVR) {
defines.push('kha_webvr');
}
let canvasId = targetOptions.html5.canvasId == null ? 'khanvas' : targetOptions.html5.canvasId;
defines.push('canvas_id=' + canvasId);
let scriptName = this.isHtml5Worker() ? 'khaworker' : 'kha';
if (targetOptions.html5.scriptName != null && !(this.isNode() || this.isDebugHtml5())) {
scriptName = targetOptions.html5.scriptName;
}
defines.push('script_name=' + scriptName);
let webgl = targetOptions.html5.webgl == null ? true : targetOptions.html5.webgl;
if (webgl) {
defines.push('sys_g4');
defines.push('kha_g4');
defines.push('kha_webgl');
} else {
defines.push('kha_html5_canvas');
}
if (this.isNode()) {
defines.push('nodejs');
defines.push('sys_node');
defines.push('sys_server');
defines.push('kha_node');
defines.push('kha_server');
}
else {
defines.push('sys_' + this.options.target);
defines.push('kha_' + this.options.target);
defines.push('kha_' + this.options.target + '_js');
}
if (this.isDebugHtml5()) {
this.parameters.push('-debug');
defines.push('sys_debug_html5');
defines.push('kha_debug_html5');
defines.push('kha_html5');
}
if (this.isHtml5Worker()) {
defines.push('js-classic');
}
return {
from: this.options.from.toString(),
to: path.join(this.sysdir(), scriptName + '.js'),
sources: this.sources,
libraries: this.libraries,
defines: defines,
parameters: this.parameters,
haxeDirectory: this.options.haxe,
system: this.sysdir(),
language: 'js',
width: this.width,
height: this.height,
name: name,
main: this.options.main,
};
}
async export(name: string, _targetOptions: any, haxeOptions: any): Promise<void> {
let targetOptions = {
canvasId: 'khanvas',
scriptName: this.isHtml5Worker() ? 'khaworker' : 'kha'
};
if (_targetOptions != null && _targetOptions.html5 != null) {
let userOptions = _targetOptions.html5;
if (userOptions.canvasId != null) targetOptions.canvasId = userOptions.canvasId;
if (userOptions.scriptName != null) targetOptions.scriptName = userOptions.scriptName;
}
fs.ensureDirSync(path.join(this.options.to, this.sysdir()));
if (this.isDebugHtml5()) {
let index = path.join(this.options.to, this.sysdir(), 'index.html');
let protoindex = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'debug-html5', 'index.html'), {encoding: 'utf8'});
protoindex = protoindex.replace(/{Name}/g, name);
protoindex = protoindex.replace(/{Width}/g, '' + this.width);
protoindex = protoindex.replace(/{Height}/g, '' + this.height);
protoindex = protoindex.replace(/{CanvasId}/g, '' + targetOptions.canvasId);
protoindex = protoindex.replace(/{ScriptName}/g, '' + targetOptions.scriptName);
fs.writeFileSync(index.toString(), protoindex);
let pack = path.join(this.options.to, this.sysdir(), 'package.json');
let protopackage = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'debug-html5', 'package.json'), {encoding: 'utf8'});
protopackage = protopackage.replace(/{Name}/g, name);
fs.writeFileSync(pack.toString(), protopackage);
let electron = path.join(this.options.to, this.sysdir(), 'electron.js');
let protoelectron = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'debug-html5', 'electron.js'), {encoding: 'utf8'});
protoelectron = protoelectron.replace(/{Width}/g, '' + this.width);
protoelectron = protoelectron.replace(/{Height}/g, '' + this.height);
fs.writeFileSync(electron.toString(), protoelectron);
}
else if (this.isNode()) {
let pack = path.join(this.options.to, this.sysdir(), 'package.json');
let protopackage = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'node', 'package.json'), 'utf8');
protopackage = protopackage.replace(/{Name}/g, name);
fs.writeFileSync(pack, protopackage);
let protoserver = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'node', 'server.js'), 'utf8');
fs.writeFileSync(path.join(this.options.to, this.sysdir(), 'server.js'), protoserver);
}
else if (!this.isHtml5Worker()) {
let index = path.join(this.options.to, this.sysdir(), 'index.html');
if (!fs.existsSync(index)) {
let protoindex = fs.readFileSync(path.join(__dirname, '..', '..', 'Data', 'html5', 'index.html'), {encoding: 'utf8'});
protoindex = protoindex.replace(/{Name}/g, name);
protoindex = protoindex.replace(/{Width}/g, '' + this.width);
protoindex = protoindex.replace(/{Height}/g, '' + this.height);
protoindex = protoindex.replace(/{CanvasId}/g, '' + targetOptions.canvasId);
protoindex = protoindex.replace(/{ScriptName}/g, '' + targetOptions.scriptName);
fs.writeFileSync(index.toString(), protoindex);
}
}
}
/*copyMusic(platform, from, to, encoders, callback) {
Files.createDirectories(this.directory.resolve(this.sysdir()).resolve(to).parent());
Converter.convert(from, this.directory.resolve(this.sysdir()).resolve(to + '.ogg'), encoders.oggEncoder, (ogg) => {
Converter.convert(from, this.directory.resolve(this.sysdir()).resolve(to + '.mp4'), encoders.aacEncoder, (mp4) => {
var files = [];
if (ogg) files.push(to + '.ogg');
if (mp4) files.push(to + '.mp4');
callback(files);
});
});
}*/
async copySound(platform: string, from: string, to: string, options: any) {
fs.ensureDirSync(path.join(this.options.to, this.sysdir(), path.dirname(to)));
let ogg = await convert(from, path.join(this.options.to, this.sysdir(), to + '.ogg'), this.options.ogg);
let mp4 = false;
let mp3 = false;
if (!this.isDebugHtml5()) {
mp4 = await convert(from, path.join(this.options.to, this.sysdir(), to + '.mp4'), this.options.aac);
if (!mp4) {
mp3 = await convert(from, path.join(this.options.to, this.sysdir(), to + '.mp3'), this.options.mp3);
}
}
let files: string[] = [];
if (ogg) files.push(to + '.ogg');
if (mp4) files.push(to + '.mp4');
if (mp3) files.push(to + '.mp3');
return files;
}
async copyImage(platform: string, from: string, to: string, options: any, cache: any) {
let format = await exportImage(this.options.kha, from, path.join(this.options.to, this.sysdir(), to), options, undefined, false, false, cache);
return [to + '.' + format];
}
async copyBlob(platform: string, from: string, to: string, options: any) {
fs.copySync(from.toString(), path.join(this.options.to, this.sysdir(), to), { overwrite: true });
return [to];
}
async copyVideo(platform: string, from: string, to: string, options: any) {
fs.ensureDirSync(path.join(this.options.to, this.sysdir(), path.dirname(to)));
let mp4 = false;
if (!this.isDebugHtml5()) {
mp4 = await convert(from, path.join(this.options.to, this.sysdir(), to + '.mp4'), this.options.h264);
}
let webm = await convert(from, path.join(this.options.to, this.sysdir(), to + '.webm'), this.options.webm);
let files: string[] = [];
if (mp4) files.push(to + '.mp4');
if (webm) files.push(to + '.webm');
return files;
}
}<|fim▁end|> | }
|
<|file_name|>PermalinkProvider.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2008-2015 The Open Source Geospatial Foundation
*
* Published under the BSD license.
* See https://github.com/geoext/geoext2/blob/master/license.txt for the full
* text of the license.
*/
/*
* @include OpenLayers/Util.js
* @requires GeoExt/Version.js
*/
/**
* The permalink provider.
*
* Sample code displaying a new permalink each time the map is moved:
*
* // create permalink provider
* var permalinkProvider = Ext.create('GeoExt.state.PermalinkProvider', {});
* // set it in the state manager
* Ext.state.Manager.setProvider(permalinkProvider);
* // create a map panel, and make it stateful
* var mapPanel = Ext.create('GeoExt.panel.Map', {
* renderTo: "map",
* layers: [
* new OpenLayers.Layer.WMS(
* "Global Imagery",
* "http://maps.opengeo.org/geowebcache/service/wms",
* {layers: "bluemarble"}
* )
* ],
* stateId: "map",
* prettyStateKeys: true // for pretty permalinks
* });
* // display permalink each time state is changed
* permalinkProvider.on({
* statechange: function(provider, name, value) {
* alert(provider.getLink());
* }
* });
*
* @class GeoExt.state.PermalinkProvider
*/
Ext.define('GeoExt.state.PermalinkProvider', {
extend : 'Ext.state.Provider',
requires : [
'GeoExt.Version'
],
alias : 'widget.gx_permalinkprovider',
/**
*
*/
constructor: function(config){
this.callParent(arguments);
config = config || {};
var url = config.url;
delete config.url;
Ext.apply(this, config);
this.state = this.readURL(url);
},
/**
* Specifies whether type of state values should be encoded and decoded.
* Set it to `false` if you work with components that don't require
* encoding types, and want pretty permalinks.
*
* @property{Boolean}
* @private
*/
encodeType: true,
/**
* Create a state object from a URL.
*
* @param url {String} The URL to get the state from.
* @return {Object} The state object.
* @private
*/
readURL: function(url) {
var state = {};
var params = OpenLayers.Util.getParameters(url);
var k, split, stateId;
for(k in params) {
if(params.hasOwnProperty(k)) {
split = k.split("_");
if(split.length > 1) {
stateId = split[0];
state[stateId] = state[stateId] || {};
state[stateId][split.slice(1).join("_")] = this.encodeType ?
this.decodeValue(params[k]) : params[k];
}
}
}
return state;
},
/**
* Returns the permalink corresponding to the current state.
*
* @param base {String} The base URL, optional.
* @return {String} The permalink.
*/
getLink: function(base) {
base = base || document.location.href;
var params = {};
var id, k, state = this.state;
for(id in state) {
if(state.hasOwnProperty(id)) {
for(k in state[id]) {<|fim▁hole|> }
}
// merge params in the URL into the state params
OpenLayers.Util.applyDefaults(
params, OpenLayers.Util.getParameters(base));
var paramsStr = OpenLayers.Util.getParameterString(params);
var qMark = base.indexOf("?");
if(qMark > 0) {
base = base.substring(0, qMark);
}
return Ext.urlAppend(base, paramsStr);
}
});<|fim▁end|> | params[id + "_" + k] = this.encodeType ?
unescape(this.encodeValue(state[id][k])) : state[id][k];
} |
<|file_name|>video.rs<|end_file_name|><|fim▁begin|>pub const WIDTH: u8 = 64;
pub const HEIGHT: u8 = 32;
pub const BYTES_WIDTH: u8 = WIDTH / 8;
/// CHIP-8 gliphs, see: mattmik.com/chip8.html
pub static GLYPHS: [u8; 16*5] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
pub struct Video {
pub data: [u8; BYTES_WIDTH as usize * HEIGHT as usize],
pub screen_modified: bool,
}
impl Video {
pub fn new() -> Video {
Video {
data: [0x00; BYTES_WIDTH as usize * HEIGHT as usize],
screen_modified: true,
}
}
pub fn clear(&mut self) {
self.screen_modified = true;
for px in self.data.iter_mut() {
*px = 0x0;
}
}
pub fn draw(&mut self, x: u8, y: u8, val: u8) -> u8 {
self.screen_modified = true;
let x = x % WIDTH;
let y = y % HEIGHT;
let i = (x / 8 + y * BYTES_WIDTH) as usize;
let shift = x % 8;
// This draw command was not byte aligned, so we need xor over 2 bytes
if shift != 0 {
let i2 = ((x / 8 + 1) % BYTES_WIDTH + y * BYTES_WIDTH) as usize;
let lval = val >> shift as usize;
let rval = val << (8 - shift as usize);
let lold = self.data[i];
self.data[i] ^= lval;
let rold = self.data[i2];
self.data[i2] ^= rval;
<|fim▁hole|> if flipped(lold, self.data[i]) || flipped(rold, self.data[i2]) { 1 } else { 0 }
}
else {
let old = self.data[i];
self.data[i] ^= val;
// If any bits were flipped as a result of drawing the sprite then return 1
if flipped(old, self.data[i]) { 1 } else { 0 }
}
}
}
/// Returns true if any of the bits have been fliped from set to unset
fn flipped(v1: u8, v2: u8) -> bool {
v1 & !v2 != 0
}
#[test]
fn test_flipped() {
assert!(flipped(0b_0000_0000, 0b_0001_0000) == false);
assert!(flipped(0b_0001_0000, 0b_0000_0000) == true);
assert!(flipped(0b_1010_0101, 0b_1111_1111) == false);
assert!(flipped(0b_1010_0101, 0b_0000_0000) == true);
assert!(flipped(0b_1010_0101, 0b_1010_0100) == true);
assert!(flipped(0b_1111_0000, 0b_1111_1111) == false);
assert!(flipped(0b_1111_0000, 0b_0000_1111) == true);
}<|fim▁end|> | // If any bits were flipped as a result of drawing the sprite then return 1 |
<|file_name|>5_2.rs<|end_file_name|><|fim▁begin|>/*
rush支持各种花括号风格,
也支持类似python的控制结构省略花括号,
注意表达式过长折行的时候不能省略括号
*/
void main()
{
if(1||
2)
printl "true"
if 1
printl 2
printl 3
if 1 {
printl 2
printl 3
}
if(1)
{
printl(2);
printl(3);
<|fim▁hole|>
if 1 {printl(2);}
func(1,
2)
func(1,2
)
}
void func(int a,
int b)
{
printl a+b
}<|fim▁end|> | }
|
<|file_name|>component.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
const $ = Ember.$;
import layout from './template';
import styles from './styles';
/*
* Turn header transparent after scrolling
* down a few pixels:<|fim▁hole|>const HEADER_OFFSET = 60;
export default Ember.Component.extend({
layout,
styles,
tagName: 'nav',
localClassNames: 'nav',
localClassNameBindings: [
'isSmall',
'invert',
'showComponents',
],
isSmall: false,
componentsTitle: "More components",
showComponents: false,
invertLogoColors: Ember.computed('isSmall', 'invert', function() {
return !this.get('isSmall') && !this.get('invert');
}),
githubURL: Ember.computed('config', function() {
return this.get('config.githubURL');
}),
_scrollListener: null,
didInsertElement() {
this._scrollListener = Ember.run.bind(this, this.didScroll);
$(window).on('scroll', this._scrollListener);
},
willDestroyElement() {
if (this._scrollListener) {
$(window).off('scroll', this._scrollListener);
}
},
didScroll() {
let scrollPos = $(window).scrollTop();
let reachedOffset = (scrollPos > HEADER_OFFSET);
this.set('isSmall', reachedOffset);
},
/*
* Determine if the user is on a touch device:
*/
hasTouch: Ember.computed(function() {
return (('ontouchstart' in window) || window.DocumentTouch);
}),
actions: {
toggleComponents(value, e) {
if (e && e.stopPropagation) {
e.stopPropagation();
}
if (value !== null && value !== undefined) {
this.set('showComponents', value);
} else {
this.toggleProperty('showComponents');
}
},
},
});<|fim▁end|> | */ |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>var assert = require("should");
var exec = require('child_process').exec;
var restify = require('restify');
var boplishHost = exec(__dirname + '/../run.js --bootstrap ws://chris.ac:5000 --port 10000',
function (error, stdout, stderr) {
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);
if (error !== null) {
console.log('exec error: ' + error);
}
});
describe('BOPlish Emulation Host test', function() {
this.timeout(5000);
var restClient;
it('should create client', function() {
restClient = restify.createJsonClient({
url: 'http://localhost:10000',
version: '*'
});
});
var peerId;
it('should start Peer', function(done) {
restClient.post('/peer', function(err, req, res, obj) {<|fim▁hole|> done();
});
});
it('should list Peer Ids Peer', function(done) {
restClient.get('/peers', function(err, req, res, obj) {
assert.ifError(err);
obj.should.containEql(peerId);
done();
});
});
it('should get Peer status', function(done) {
restClient.get('/peer' + '/' + peerId, function(err, req, res, obj) {
assert.ifError(err);
obj.id.should.not.be.empty;
obj.started.should.not.be.empty;
obj.bootstrapNode.should.not.be.empty;
done();
});
});
it('should stop Peer', function(done) {
restClient.del('/peer' + '/' + peerId, function(err, req, res, obj) {
assert.ifError(err);
obj.id.should.not.be.empty;
obj.status.should.equal('killed');
done();
});
});
it('should stop all Peers', function(done) {
var peerId1, peerId2;
restClient.post('/peer', function(err, req, res, obj) {
assert.ifError(err);
peerId1 = obj.id;
restClient.post('/peer', function(err, req, res, obj) {
assert.ifError(err);
peerId2 = obj.id;
restClient.del('/killAll', function(err, req, res, obj) {
restClient.get('/listAllIds', function(err, req, res, obj) {
assert.ifError(err);
obj.should.be.empty;
done();
});
});
});
});
});
it('should get Host status', function(done) {
restClient.get('/status', function(err, req, res, obj) {
assert.ifError(err);
obj.startDate.should.not.be.empty;
obj.bootstrapNode.should.not.be.empty;
obj.numberOfPeers.should.equal(0);
done();
});
});
it('should request log handler');
after(function() {
boplishHost.kill();
});
});<|fim▁end|> | assert.ifError(err);
peerId = obj.id;
peerId.should.not.be.empty; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def build_from_state(state):
if state.scad_type is OPENSCAD:
build_with_openscad(state)
if state.scad_type is COFFEESCAD:
build_with_coffeescad(state)<|fim▁end|> | from tailorscad.builder.openscad import build_with_openscad
from tailorscad.builder.coffeescad import build_with_coffeescad
from tailorscad.constants import OPENSCAD
from tailorscad.constants import COFFEESCAD |
<|file_name|>classes.ts<|end_file_name|><|fim▁begin|>/**
* Anonymous class
*/
class A {
a: any;
constructor(a: number) {
this.a = a;
}
}
/**
* Named class
*/
class B {
a: any;
b: any;
constructor(a, b) {
this.a = a;
this.b = b;
}
}
/**
* Named class extension
*/<|fim▁hole|> this.b = b;
}
}
/**
* Anonymous class extension
*/
class D extends B {
c: any;
constructor(a, b, c) {
super(a, b);
this.c = c;
}
}
/**
* goog.defineClass based classes
*/
class E extends C {
constructor(a, b) {
super(a, b);
}
}
let nested = {};
nested.klass = class {};
class F {
// inline comment
/**
* block comment
*/
constructor() {}
}
class G {
/**
* ES6 method short hand.
*/
method() {}
}<|fim▁end|> | class C extends A {
b: any;
constructor(a, b) {
super(a); |
<|file_name|>FormPost.js<|end_file_name|><|fim▁begin|>import React, { Component } from 'react';
export default React.createClass({
getInitialState: function () {
return { title: '', body: '' };
},
handleChangeTitle: function (e) {
this.setState({ title: e.target.value });
},
handleChangeBody: function (e) {<|fim▁hole|>
handleSubmit: function (e) {
e.preventDefault();
this.props.addPost(this.state);
},
render() {
return (
<div>
<h3>New post</h3>
<form onSubmit={this.handleSubmit}>
<input type="text"
placeholder="sdfsd"
value={this.title}
placeholder="title"
onChange={this.handleChangeTitle} />
<br />
<textarea type="text"
placeholder="sdfsd"
placeholder="body"
onChange={this.handleChangeBody} >
{this.body}
</textarea>
<button>Submit</button>
</form>
</div>
);
},
});<|fim▁end|> | this.setState({ body: e.target.value });
}, |
<|file_name|>fields.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import json
from odoo import fields
def monkey_patch(cls):
""" Return a method decorator to monkey-patch the given class. """
def decorate(func):
name = func.__name__
func.super = getattr(cls, name, None)
setattr(cls, name, func)
return func
return decorate
#
# Implement sparse fields by monkey-patching fields.Field
#
fields.Field.__doc__ += """
.. _field-sparse:
.. rubric:: Sparse fields
Sparse fields have a very small probability of being not null. Therefore
many such fields can be serialized compactly into a common location, the
latter being a so-called "serialized" field.
:param sparse: the name of the field where the value of this field must
be stored.
"""
@monkey_patch(fields.Field)
def _get_attrs(self, model, name):
attrs = _get_attrs.super(self, model, name)
if attrs.get('sparse'):
# by default, sparse fields are not stored and not copied
attrs['store'] = False
attrs['copy'] = attrs.get('copy', False)
attrs['compute'] = self._compute_sparse
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_sparse
return attrs
@monkey_patch(fields.Field)
def _compute_sparse(self, records):
for record in records:
values = record[self.sparse]
record[self.name] = values.get(self.name)
if self.relational:
for record in records:
record[self.name] = record[self.name].exists()
@monkey_patch(fields.Field)
def _inverse_sparse(self, records):
for record in records:
values = record[self.sparse]
value = self.convert_to_read(record[self.name], record, use_name_get=False)
if value:
if values.get(self.name) != value:
values[self.name] = value
record[self.sparse] = values
else:
if self.name in values:
values.pop(self.name)
record[self.sparse] = values
#
# Definition and implementation of serialized fields
#
class Serialized(fields.Field):
""" Serialized fields provide the storage for sparse fields. """
type = 'serialized'
_slots = {
'prefetch': False, # not prefetched by default
}<|fim▁hole|>
def convert_to_cache(self, value, record, validate=True):
# cache format: dict
value = value or {}
return value if isinstance(value, dict) else json.loads(value)
fields.Serialized = Serialized<|fim▁end|> | column_type = ('text', 'text')
def convert_to_column(self, value, record, values=None):
return json.dumps(value) |
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>"""Let's Encrypt constants."""
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=["/etc/letsencrypt/cli.ini"],
verbose_count=-(logging.WARNING / 10),
server="https://www.letsencrypt-demo.org/acme/new-reg",
rsa_key_size=2048,
rollback_checkpoints=0,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
backup_dir="/var/lib/letsencrypt/backups",
key_dir="/etc/letsencrypt/keys",
certs_dir="/etc/letsencrypt/certs",
cert_path="/etc/letsencrypt/certs/cert-letsencrypt.pem",
chain_path="/etc/letsencrypt/certs/chain-letsencrypt.pem",
renewer_config_file="/etc/letsencrypt/renewer.conf",
no_verify_ssl=False,
dvsni_port=challenges.DVSNI.PORT,
)<|fim▁hole|> renewer_config_file="/etc/letsencrypt/renewer.conf",
renewal_configs_dir="/etc/letsencrypt/configs",
archive_dir="/etc/letsencrypt/archive",
live_dir="/etc/letsencrypt/live",
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.DVSNI, challenges.SimpleHTTP])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to IConfig.work_dir)."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
IConfig.work_dir)."""
CERT_KEY_BACKUP_DIR = "keys-certs"
"""Directory where all certificates and keys are stored (relative to
IConfig.work_dir. Used for easy revocation."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
ACCOUNT_KEYS_DIR = "keys"
"""Directory where account keys are saved. Relative to ACCOUNTS_DIR."""
REC_TOKEN_DIR = "recovery_tokens"
"""Directory where all recovery tokens are saved (relative to
IConfig.work_dir)."""<|fim▁end|> | """Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict( |
<|file_name|>error_sp_hom.py<|end_file_name|><|fim▁begin|>'''
Hom family of models based on: [Drukker2013]_
Following: [Anselin2011]_
'''
__author__ = "Luc Anselin [email protected], Daniel Arribas-Bel [email protected]"
from scipy import sparse as SP
import numpy as np
from numpy import linalg as la
import ols as OLS
from pysal import lag_spatial
from utils import power_expansion, set_endog, iter_msg, sp_att
from utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments
from utils import get_spFilter, get_lags, _moments2eqs
from utils import spdot, RegressionPropsY, set_warn
import twosls as TSLS
import user_output as USER
import summary_output as SUMMARY
__all__ = ["GM_Error_Hom", "GM_Endog_Error_Hom", "GM_Combo_Hom"]
class BaseGM_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity (note: no
consistency checks, diagnostics or constant added); based on
Drukker et al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011) (default). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
xtx : float
X'X
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
Model commands
>>> reg = BaseGM_Error_Hom(y, X, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
>>> print np.around(reg.vm, 4) #doctest: +SKIP
[[ 1.51340700e+02 -5.29060000e+00 -1.85650000e+00 -2.40000000e-03]
[ -5.29060000e+00 2.46700000e-01 5.14000000e-02 3.00000000e-04]
[ -1.85650000e+00 5.14000000e-02 3.21000000e-02 -1.00000000e-04]
[ -2.40000000e-03 3.00000000e-04 -1.00000000e-04 3.37000000e-02]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. OLS --> \tilde{\delta}
ols = OLS.BaseOLS(y=y, x=x)
self.x, self.y, self.n, self.k, self.xtx = ols.x, ols.y, ols.n, ols.k, ols.xtx
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, ols.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. SWLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
ols_s = OLS.BaseOLS(y=y_s, x=x_s)
self.predy = spdot(self.x, ols_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((ols_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom_ols(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Error_Hom(BaseGM_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity, with results
and diagnostics; based on Drukker et al. (2013) [Drukker2013]_, following Anselin
(2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xtx : float
X'X
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) and CRIME (crime) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Error_Hom(y, X, w=w, A1='hom_sc', name_y='home value', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. This is why you obtain as many coefficient estimates as
standard errors, which you calculate taking the square root of the
diagonal of the variance-covariance matrix of the parameters:
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error_Hom.__init__(self, y=y, x=x_constant, w=w.sparse, A1=A1,
max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> reg = BaseGM_Endog_Error_Hom(y, X, yd, q, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. S2SLS --> \tilde{\delta}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.x, self.z, self.h, self.y, self.hth = tsls.x, tsls.z, tsls.h, tsls.y, tsls.hth
self.yend, self.q, self.n, self.k = tsls.yend, tsls.q, tsls.n, tsls.k
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, tsls.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. GS2SLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
yend_s = get_spFilter(w, lambda_old, self.yend)
tsls_s = TSLS.BaseTSLS(y=y_s, x=x_s, yend=yend_s, h=self.h)
self.predy = spdot(self.z, tsls_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old, tsls_s.z)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((tsls_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Endog_Error_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity and endogenous
variables, with results and diagnostics; based on Drukker et al. (2013)
[Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case we consider CRIME (crime rates) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for CRIME. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Endog_Error_Hom(y, X, yd, q, w=w, A1='hom_sc', name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. Hence, we find the same number of betas as of standard errors,
which we calculate taking the square root of the diagonal of the
variance-covariance matrix:
>>> print reg.name_z
['CONSTANT', 'inc', 'crime', 'lambda']
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q,
A1=A1, max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Combo_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo_Hom(y, X, yend=yd2, q=q2, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo_Hom(y, X, yd2, q2, w=w.sparse, A1='hom_sc')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x, w=w, yend=yend, q=q, A1=A1,
max_iter=max_iter, epsilon=epsilon)
class GM_Combo_Hom(BaseGM_Combo_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables, with results and diagnostics; based on Drukker et
al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always necessary)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each<|fim▁hole|> Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
Example only with spatial lag
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo_Hom(y, X, w=w, A1='hom_sc', name_x=['inc'],\
name_y='hoval', name_yend=['crime'], name_q=['discbd'],\
name_ds='columbus')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include CRIME (crime rates) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo_Hom(y, X, yd, q, w=w, A1='hom_sc', \
name_ds='columbus')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, A1=A1, lag_q=lag_q,
max_iter=max_iter, epsilon=epsilon)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy,
yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo_Hom(reg=self, w=w, vm=vm)
# Functions
def moments_hom(w, wA1, wA2, u):
'''
Compute G and g matrices for the spatial error model with homoscedasticity
as in Anselin [Anselin2011]_ (2011).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = w.shape[0]
A1u = wA1 * u
A2u = wA2 * u
wu = w * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, A2u)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = 2 * np.dot(wu.T * wA1, u)
G12 = -np.dot(wu.T * wA1, wu)
G21 = 2 * np.dot(wu.T * wA2, u)
G22 = -np.dot(wu.T * wA2, wu)
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def get_vc_hom(w, wA1, wA2, reg, lambdapar, z_s=None, for_omegaOLS=False):
'''
VC matrix \psi of Spatial error with homoscedasticity. As in
Anselin (2011) [Anselin2011]_ (p. 20)
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lambdapar : float
Spatial parameter estimated in previous step of the
procedure
z_s : array
optional argument for spatially filtered Z (to be
passed only if endogenous variables are present)
for_omegaOLS : boolean
If True (default=False), it also returns P, needed
only in the computation of Omega
Returns
-------
psi : array
2x2 VC matrix
a1 : array
nx1 vector a1. If z_s=None, a1 = 0.
a2 : array
nx1 vector a2. If z_s=None, a2 = 0.
p : array
P matrix. If z_s=None or for_omegaOLS=False, p=0.
'''
u_s = get_spFilter(w, lambdapar, reg.u)
n = float(w.shape[0])
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
mu4 = np.sum(u_s ** 4) / n
tr11 = wA1 * wA1
tr11 = np.sum(tr11.diagonal())
tr12 = wA1 * (wA2 * 2)
tr12 = np.sum(tr12.diagonal())
tr22 = wA2 * wA2 * 2
tr22 = np.sum(tr22.diagonal())
vecd1 = np.array([wA1.diagonal()]).T
psi11 = 2 * sig2 ** 2 * tr11 + \
(mu4 - 3 * sig2 ** 2) * np.dot(vecd1.T, vecd1)
psi12 = sig2 ** 2 * tr12
psi22 = sig2 ** 2 * tr22
a1, a2, p = 0., 0., 0.
if for_omegaOLS:
x_s = get_spFilter(w, lambdapar, reg.x)
p = la.inv(spdot(x_s.T, x_s) / n)
if issubclass(type(z_s), np.ndarray) or \
issubclass(type(z_s), SP.csr.csr_matrix) or \
issubclass(type(z_s), SP.csc.csc_matrix):
alpha1 = (-2 / n) * spdot(z_s.T, wA1 * u_s)
alpha2 = (-2 / n) * spdot(z_s.T, wA2 * u_s)
hth = spdot(reg.h.T, reg.h)
hthni = la.inv(hth / n)
htzsn = spdot(reg.h.T, z_s) / n
p = spdot(hthni, htzsn)
p = spdot(p, la.inv(spdot(htzsn.T, p)))
hp = spdot(reg.h, p)
a1 = spdot(hp, alpha1)
a2 = spdot(hp, alpha2)
psi11 = psi11 + \
sig2 * spdot(a1.T, a1) + \
2 * mu3 * spdot(a1.T, vecd1)
psi12 = psi12 + \
sig2 * spdot(a1.T, a2) + \
mu3 * spdot(a2.T, vecd1) # 3rd term=0
psi22 = psi22 + \
sig2 * spdot(a2.T, a2) # 3rd&4th terms=0 bc vecd2=0
psi = np.array(
[[psi11[0][0], psi12[0][0]], [psi12[0][0], psi22[0][0]]]) / n
return psi, a1, a2, p
def get_omega_hom(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models with endogenous variables computed as in
Anselin (2011) [Anselin2011]_ (p. 21).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
z_s = get_spFilter(w, lamb, reg.z)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, z_s)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
t2 = spdot(reg.h.T, np.hstack((a1, a2)))
psiDL = (mu3 * spdot(reg.h.T, np.hstack((vecdA1, np.zeros((int(n), 1))))) +
sig2 * spdot(reg.h.T, np.hstack((a1, a2)))) / n
oDD = spdot(la.inv(spdot(reg.h.T, reg.h)), spdot(reg.h.T, z_s))
oDD = sig2 * la.inv(spdot(z_s.T, spdot(reg.h, oDD)))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def get_omega_hom_ols(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models without endogenous variables (OLS) computed
as in Anselin (2011) [Anselin2011]_.
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
x_s = get_spFilter(w, lamb, reg.x)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, for_omegaOLS=True)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
oDD = sig2 * la.inv(spdot(x_s.T, x_s))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
#oDL = np.zeros((oDD.shape[0], oLL.shape[1]))
mu3 = np.sum(u_s ** 3) / n
psiDL = (mu3 * spdot(reg.x.T, np.hstack((vecdA1, np.zeros((int(n), 1)))))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()<|fim▁end|> | independent (exogenous) variable, including the constant
yend : array |
<|file_name|>alert_checkpoint_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the<|fim▁hole|> http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import urllib2
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import logging
import traceback
from resource_management.libraries.functions.namenode_ha_utils import get_all_namenode_addresses
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
from resource_management.core.environment import Environment
LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
HDFS_SITE_KEY = '{{hdfs-site}}'
RESULT_STATE_UNKNOWN = 'UNKNOWN'
RESULT_STATE_SKIPPED = 'SKIPPED'
NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
PERCENT_WARNING_KEY = 'checkpoint.time.warning.threshold'
PERCENT_WARNING_DEFAULT = 200
PERCENT_CRITICAL_KEY = 'checkpoint.time.critical.threshold'
PERCENT_CRITICAL_DEFAULT = 200
CHECKPOINT_TX_MULTIPLIER_WARNING_KEY = 'checkpoint.txns.multiplier.warning.threshold'
CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT = 2
CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY = 'checkpoint.txns.multiplier.critical.threshold'
CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT = 4
CHECKPOINT_TX_DEFAULT = 1000000
CHECKPOINT_PERIOD_DEFAULT = 21600
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
logger = logging.getLogger('ambari_alerts')
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
uri = None
scheme = 'http'
http_uri = None
https_uri = None
http_policy = 'HTTP_ONLY'
checkpoint_tx = CHECKPOINT_TX_DEFAULT
checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
# hdfs-site is required
if not HDFS_SITE_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
if NN_HTTP_POLICY_KEY in configurations:
http_policy = configurations[NN_HTTP_POLICY_KEY]
if NN_CHECKPOINT_TX_KEY in configurations:
checkpoint_tx = configurations[NN_CHECKPOINT_TX_KEY]
if NN_CHECKPOINT_PERIOD_KEY in configurations:
checkpoint_period = configurations[NN_CHECKPOINT_PERIOD_KEY]
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
executable_paths = None
if EXECUTABLE_SEARCH_PATHS in configurations:
executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
kerberos_keytab = None
if KERBEROS_KEYTAB in configurations:
kerberos_keytab = configurations[KERBEROS_KEYTAB]
kerberos_principal = None
if KERBEROS_PRINCIPAL in configurations:
kerberos_principal = configurations[KERBEROS_PRINCIPAL]
kerberos_principal = kerberos_principal.replace('_HOST', host_name)
# parse script arguments
connection_timeout = CONNECTION_TIMEOUT_DEFAULT
if CONNECTION_TIMEOUT_KEY in parameters:
connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
percent_warning = PERCENT_WARNING_DEFAULT
if PERCENT_WARNING_KEY in parameters:
percent_warning = float(parameters[PERCENT_WARNING_KEY])
percent_critical = PERCENT_CRITICAL_DEFAULT
if PERCENT_CRITICAL_KEY in parameters:
percent_critical = float(parameters[PERCENT_CRITICAL_KEY])
checkpoint_txn_multiplier_warning = CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT
if CHECKPOINT_TX_MULTIPLIER_WARNING_KEY in parameters:
checkpoint_txn_multiplier_warning = float(parameters[CHECKPOINT_TX_MULTIPLIER_WARNING_KEY])
checkpoint_txn_multiplier_critical = CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT
if CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY in parameters:
checkpoint_txn_multiplier_critical = float(parameters[CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY])
kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
# determine the right URI and whether to use SSL
hdfs_site = configurations[HDFS_SITE_KEY]
scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
nn_addresses = get_all_namenode_addresses(hdfs_site)
for nn_address in nn_addresses:
if nn_address.startswith(host_name + ":"):
uri = nn_address
break
if not uri:
return (RESULT_STATE_SKIPPED, ['NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))])
current_time = int(round(time.time() * 1000))
last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
# start out assuming an OK status
label = None
result_code = "OK"
try:
if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
env = Environment.get_instance()
# curl requires an integer timeout
curl_connection_timeout = int(connection_timeout)
last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
kerberos_principal, last_checkpoint_time_qry,"checkpoint_time_alert", executable_paths, False,
"NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])
journal_transaction_info_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
kerberos_principal, journal_transaction_info_qry,"checkpoint_time_alert", executable_paths,
False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
journal_transaction_info_response_json = json.loads(journal_transaction_info_response)
journal_transaction_info = journal_transaction_info_response_json["beans"][0]["JournalTransactionInfo"]
else:
last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,
"LastCheckpointTime", connection_timeout))
journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,
"JournalTransactionInfo", connection_timeout)
journal_transaction_info_dict = json.loads(journal_transaction_info)
last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
transaction_difference = last_tx - most_recent_tx
delta = (current_time - last_checkpoint_time)/1000
label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
is_checkpoint_txn_warning = transaction_difference > checkpoint_txn_multiplier_warning * int(checkpoint_tx)
is_checkpoint_txn_critical = transaction_difference > checkpoint_txn_multiplier_critical * int(checkpoint_tx)
# Either too many uncommitted transactions or missed check-pointing for
# long time decided by the thresholds
if is_checkpoint_txn_critical or (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
logger.debug('Raising critical alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
result_code = 'CRITICAL'
elif is_checkpoint_txn_warning or (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
logger.debug('Raising warning alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
result_code = 'WARNING'
except:
label = traceback.format_exc()
result_code = 'UNKNOWN'
return ((result_code, [label]))
def get_time(delta):
h = int(delta/3600)
m = int((delta % 3600)/60)
return {'h':h, 'm':m}
def get_value_from_jmx(query, jmx_property, connection_timeout):
response = None
try:
response = urllib2.urlopen(query, timeout=connection_timeout)
data = response.read()
data_dict = json.loads(data)
return data_dict["beans"][0][jmx_property]
finally:
if response is not None:
try:
response.close()
except:
pass<|fim▁end|> | "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
|
<|file_name|>ConfigTriggerMakerPP2016.C<|end_file_name|><|fim▁begin|><|fim▁hole|>void ConfigTriggerMakerPP2016(){
gROOT->LoadMacro("$ALICE_PHYSICS/PWG/EMCAL/macros/AddTaskEmcalTriggerMakerNew.C");
AliEmcalTriggerMakerTask *triggermaker = AddTaskEmcalTriggerMakerNew("EmcalTriggers", "", "", kTRUE);
triggermaker->SetUseL0Amplitudes(kFALSE);
triggermaker->SelectCollisionCandidates(AliVEvent::kAny);
//triggermaker->SelectCollisionCandidates(AliVEvent::kAnyINT | AliVEvent::kEMCEGA | AliVEvent::kEMCEJE);
triggermaker->GetTriggerMaker()->ConfigureForPP2015();
}<|fim▁end|> | |
<|file_name|>DefaultBlockWorkerClient.java<|end_file_name|><|fim▁begin|>/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.client.block.stream;
import alluxio.conf.AlluxioConfiguration;
import alluxio.conf.PropertyKey;
import alluxio.exception.status.AlluxioStatusException;
import alluxio.exception.status.UnauthenticatedException;
import alluxio.grpc.BlockWorkerGrpc;
import alluxio.grpc.CacheRequest;
import alluxio.grpc.ClearMetricsRequest;
import alluxio.grpc.ClearMetricsResponse;
import alluxio.grpc.CreateLocalBlockRequest;
import alluxio.grpc.CreateLocalBlockResponse;
import alluxio.grpc.DataMessageMarshaller;
import alluxio.grpc.DataMessageMarshallerProvider;
import alluxio.grpc.GrpcChannel;
import alluxio.grpc.GrpcChannelBuilder;
import alluxio.grpc.GrpcNetworkGroup;
import alluxio.grpc.GrpcSerializationUtils;
import alluxio.grpc.GrpcServerAddress;
import alluxio.grpc.MoveBlockRequest;
import alluxio.grpc.MoveBlockResponse;
import alluxio.grpc.OpenLocalBlockRequest;
import alluxio.grpc.OpenLocalBlockResponse;
import alluxio.grpc.ReadRequest;
import alluxio.grpc.ReadResponse;
import alluxio.grpc.RemoveBlockRequest;
import alluxio.grpc.RemoveBlockResponse;
import alluxio.grpc.WriteRequest;
import alluxio.grpc.WriteResponse;
import alluxio.resource.AlluxioResourceLeakDetectorFactory;
import alluxio.retry.RetryPolicy;
import alluxio.retry.RetryUtils;
import alluxio.security.user.UserState;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.ResourceLeakTracker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
/**
* Default implementation of {@link BlockWorkerClient}.
*/
public class DefaultBlockWorkerClient implements BlockWorkerClient {
private static final Logger LOG =
LoggerFactory.getLogger(DefaultBlockWorkerClient.class.getName());
private static final ResourceLeakDetector<DefaultBlockWorkerClient> DETECTOR =
AlluxioResourceLeakDetectorFactory.instance()
.newResourceLeakDetector(DefaultBlockWorkerClient.class);
private GrpcChannel mStreamingChannel;
private GrpcChannel mRpcChannel;
private GrpcServerAddress mAddress;
private final long mRpcTimeoutMs;
private BlockWorkerGrpc.BlockWorkerStub mStreamingAsyncStub;
private BlockWorkerGrpc.BlockWorkerBlockingStub mRpcBlockingStub;
private BlockWorkerGrpc.BlockWorkerStub mRpcAsyncStub;
@Nullable
private final ResourceLeakTracker<DefaultBlockWorkerClient> mTracker;
/**
* Creates a client instance for communicating with block worker.
*
* @param userState the user state
* @param address the address of the worker
* @param alluxioConf Alluxio configuration
*/
public DefaultBlockWorkerClient(UserState userState, GrpcServerAddress address,
AlluxioConfiguration alluxioConf) throws IOException {
RetryPolicy retryPolicy = RetryUtils.defaultClientRetry(
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_MAX_DURATION),
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_BASE_SLEEP_MS),
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_MAX_SLEEP_MS));
UnauthenticatedException lastException = null;
// TODO(feng): unify worker client with AbstractClient
while (retryPolicy.attempt()) {
try {
// Disables channel pooling for data streaming to achieve better throughput.
// Channel is still reused due to client pooling.
mStreamingChannel = GrpcChannelBuilder.newBuilder(address, alluxioConf)
.setSubject(userState.getSubject())
.setNetworkGroup(GrpcNetworkGroup.STREAMING)
.setClientType("DefaultBlockWorkerClient-Stream")
.build();
mStreamingChannel.intercept(new StreamSerializationClientInterceptor());
// Uses default pooling strategy for RPC calls for better scalability.
mRpcChannel = GrpcChannelBuilder.newBuilder(address, alluxioConf)
.setSubject(userState.getSubject())
.setNetworkGroup(GrpcNetworkGroup.RPC)
.setClientType("DefaultBlockWorkerClient-Rpc")
.build();
lastException = null;
break;
} catch (StatusRuntimeException e) {
close();
throw AlluxioStatusException.fromStatusRuntimeException(e);
} catch (UnauthenticatedException e) {
close();
userState.relogin();
lastException = e;
}
}
if (lastException != null) {
throw lastException;
}
mStreamingAsyncStub = BlockWorkerGrpc.newStub(mStreamingChannel);
mRpcBlockingStub = BlockWorkerGrpc.newBlockingStub(mRpcChannel);
mRpcAsyncStub = BlockWorkerGrpc.newStub(mRpcChannel);
mAddress = address;
mRpcTimeoutMs = alluxioConf.getMs(PropertyKey.USER_RPC_RETRY_MAX_DURATION);
mTracker = DETECTOR.track(this);
}
@Override
public boolean isShutdown() {
return mStreamingChannel.isShutdown() || mRpcChannel.isShutdown();
}
@Override
public boolean isHealthy() {
return !isShutdown() && mStreamingChannel.isHealthy() && mRpcChannel.isHealthy();
}
@Override
public void close() throws IOException {
try (Closer closer = Closer.create()) {
closer.register(() -> {
if (mStreamingChannel != null) {
mStreamingChannel.shutdown();
}
});
closer.register(() -> {
if (mRpcChannel != null) {
mRpcChannel.shutdown();
}
});
closer.register(() -> {
if (mTracker != null) {
mTracker.close(this);
}
});
}
}
@Override
public StreamObserver<WriteRequest> writeBlock(StreamObserver<WriteResponse> responseObserver) {
if (responseObserver instanceof DataMessageMarshallerProvider) {
DataMessageMarshaller<WriteRequest> marshaller =
((DataMessageMarshallerProvider<WriteRequest, WriteResponse>) responseObserver)
.getRequestMarshaller();
Preconditions.checkNotNull(marshaller, "marshaller");
return mStreamingAsyncStub
.withOption(GrpcSerializationUtils.OVERRIDDEN_METHOD_DESCRIPTOR,
BlockWorkerGrpc.getWriteBlockMethod().toBuilder()<|fim▁hole|> .setRequestMarshaller(marshaller)
.build())
.writeBlock(responseObserver);
} else {
return mStreamingAsyncStub.writeBlock(responseObserver);
}
}
@Override
public StreamObserver<ReadRequest> readBlock(StreamObserver<ReadResponse> responseObserver) {
if (responseObserver instanceof DataMessageMarshallerProvider) {
DataMessageMarshaller<ReadResponse> marshaller =
((DataMessageMarshallerProvider<ReadRequest, ReadResponse>) responseObserver)
.getResponseMarshaller();
Preconditions.checkNotNull(marshaller);
return mStreamingAsyncStub
.withOption(GrpcSerializationUtils.OVERRIDDEN_METHOD_DESCRIPTOR,
BlockWorkerGrpc.getReadBlockMethod().toBuilder()
.setResponseMarshaller(marshaller)
.build())
.readBlock(responseObserver);
} else {
return mStreamingAsyncStub.readBlock(responseObserver);
}
}
@Override
public StreamObserver<CreateLocalBlockRequest> createLocalBlock(
StreamObserver<CreateLocalBlockResponse> responseObserver) {
return mStreamingAsyncStub.createLocalBlock(responseObserver);
}
@Override
public StreamObserver<OpenLocalBlockRequest> openLocalBlock(
StreamObserver<OpenLocalBlockResponse> responseObserver) {
return mStreamingAsyncStub.openLocalBlock(responseObserver);
}
@Override
public RemoveBlockResponse removeBlock(final RemoveBlockRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.removeBlock(request);
}
@Override
public MoveBlockResponse moveBlock(MoveBlockRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.moveBlock(request);
}
@Override
public ClearMetricsResponse clearMetrics(ClearMetricsRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.clearMetrics(request);
}
@Override
public void cache(CacheRequest request) {
boolean async = request.getAsync();
try {
mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS).cache(request);
} catch (Exception e) {
if (!async) {
throw e;
}
LOG.warn("Error sending async cache request {} to worker {}.", request, mAddress, e);
}
}
}<|fim▁end|> | |
<|file_name|>beatbox.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import pygame
from tools import singleton
@singleton
class Audio(object):
def __init__(self, initial_musics={}, initial_sounds={}):
if pygame.mixer.get_init() is None:
pygame.mixer.init()
self.__mute = False
self.__sounds = initial_sounds
self.__musics = initial_musics
def register_sound(self, sound_id, sound_object):
self.__sounds[sound_id] = sound_object
def register_music(self, music_id, music_object):<|fim▁hole|> return False
del(self.__sounds[sound_id])
def unregister_music(self, music_id):
if music_id not in self.__musics.keys():
return False
del(self.__musics[music_id])
@property
def sounds(self):
return self.__sounds.keys()
@property
def musics(self):
return self.__musics.keys()
@property
def is_muted(self):
return self.__mute
def mute(self):
if self.is_muted:
return
pygame.mixer.music.stop()
self.__mute = True
def unmute(self):
if not self.is_muted:
return
pygame.mixer.music.play(-1)
def set_mute(self, new_state=True):
if new_state:
self.mute()
else:
self.unmute()
def set_bgm_music(self, music_id):
if music_id not in self.musics:
return False
pygame.mixer.music.load(self.__musics[music_id])
if not self.is_muted:
pygame.mixer.music.play(-1)
return False
def play_sound(self, sound_id):
if self.is_muted:
return True
if sound_id not in self.sounds:
return False
self.__sounds[sound_id].play()
return True
# Create default instance
AUDIO = Audio()<|fim▁end|> | self.__musics[music_id] = music_object
def unregister_sound(self, sound_id):
if sound_id not in self.__sounds.keys(): |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
from django.conf import settings as dsettings
from django.contrib.auth import models as authModels
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from microblog import models, settings
from taggit.models import Tag, TaggedItem
from decorator import decorator
try:
import json
except ImportError:
import simplejson as json
def render_json(f):
"""
decoratore da applicare ad una vista per serializzare in json il risultato.
"""
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json.dumps(d, indent=2)
else:
ct = 'application/json'
j = json.dumps
def wrapper(func, *args, **kw):
try:
result = func(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, HttpResponse):
return result
else:
result = j(result)
status = 200
return HttpResponse(content=result, content_type=ct, status=status)
return decorator(wrapper, f)
def post_list(request):
return render(request, 'microblog/post_list.html', {})<|fim▁hole|> category = get_object_or_404(models.Category, name=category)
return render_to_response(
'microblog/category.html',
{
'category': category,
},
context_instance=RequestContext(request)
)
def post_list_by_year(request, year, month=None):
return render_to_response(
'microblog/list_by_year.html',
{
'year': year,
'month': month,
},
context_instance=RequestContext(request)
)
def tag(request, tag):
tag = get_object_or_404(Tag, name=tag)
return render_to_response(
'microblog/tag.html',
{
'tag': tag,
},
context_instance=RequestContext(request)
)
def author(request, author):
user = [
u for u in authModels.User.objects.all()
if slugify('%s-%s' % (u.first_name, u.last_name)) == author
]
if not user:
raise Http404()
else:
user = user[0]
return render_to_response(
'microblog/author.html',
{
'author': user,
},
context_instance=RequestContext(request)
)
def _paginate_posts(post_list, request):
if settings.MICROBLOG_POST_LIST_PAGINATION:
paginator = Paginator(post_list, settings.MICROBLOG_POST_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (EmptyPage, InvalidPage):
posts = paginator.page(1)
else:
paginator = Paginator(post_list, len(post_list) or 1)
posts = paginator.page(1)
return posts
def _posts_list(request, featured=False):
if settings.MICROBLOG_LANGUAGE_FALLBACK_ON_POST_LIST:
lang = None
else:
lang = request.LANGUAGE_CODE
return models.Post.objects\
.byLanguage(lang)\
.byFeatured(featured)\
.published()
def _post_detail(request, content):
if not settings.MICROBLOG_POST_FILTER([content.post], request.user):
raise Http404()
return render_to_response(
'microblog/post_detail.html',
{
'post': content.post,
'content': content
},
context_instance=RequestContext(request)
)
def _trackback_ping(request, content):
def success():
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>0</error></response>')
return HttpResponse(content=x, content_type='text/xml')
def failure(message=''):
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>1</error><message>%s</message></response>') % message
return HttpResponse(content=x, content_type='text/xml', status=400)
if request.method != 'POST':
return failure('only POST method is supported')
if not request.POST.get('url'):
return failure('url argument is mandatory')
t = {
'url': request.POST['url'],
'blog_name': request.POST.get('blog_name', ''),
'title': request.POST.get('title', ''),
'excerpt': request.POST.get('excerpt', ''),
}
from microblog.moderation import moderate
if not moderate(request, 'trackback', t['title'], url=t['url']):
return failure('moderated')
content.new_trackback(**t)
return success()
@render_json
def _comment_count(request, content):
post = content.post
if settings.MICROBLOG_COMMENT == 'comment':
import django_comments as comments
from django.contrib.contenttypes.models import ContentType
model = comments.get_model()
q = model.objects.filter(
content_type=ContentType.objects.get_for_model(post),
object_pk=post.id,
is_public=True
)
return q.count()
else:
import httplib2
from urllib import quote
h = httplib2.Http()
params = {
'forum_api_key': settings.MICROBLOG_COMMENT_DISQUS_FORUM_KEY,
'url': content.get_url(),
}
args = '&'.join('%s=%s' % (k, quote(v)) for k, v in params.items())
url = settings.MICROBLOG_COMMENT_DISQUS_API_URL + 'get_thread_by_url?%s' % args
resp, page = h.request(url)
if resp.status != 200:
return -1
page = json.loads(page)
if not page['succeeded']:
return -1
elif page['message'] is None:
return 0
else:
return page['message']['num_comments']
def _post404(f):
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except models.PostContent.DoesNotExist:
raise Http404()
return wrapper
if settings.MICROBLOG_URL_STYLE == 'date':
def _get(slug, year, month, day):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndDate(slug, year, month, day)
@_post404
def post_detail(request, year, month, day, slug):
return _post_detail(
request,
content=_get(slug, year, month, day)
)
@_post404
def trackback_ping(request, year, month, day, slug):
return _trackback_ping(
request,
content=_get(slug, year, month, day)
)
@_post404
def comment_count(request, year, month, day, slug):
return _comment_count(
request,
content = _get(slug, year, month, day)
)
elif settings.MICROBLOG_URL_STYLE == 'category':
def _get(slug, category):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndCategory(slug, category)
@_post404
def post_detail(request, category, slug):
return _post_detail(
request,
content=_get(slug, category),
)
@_post404
def trackback_ping(request, category, slug):
return _trackback_ping(
request,
content=_get(slug, category),
)
@_post404
def comment_count(request, category, slug):
return _comment_count(
request,
content=_get(slug, category),
)<|fim▁end|> |
def category(request, category): |
<|file_name|>dbRoutes.js<|end_file_name|><|fim▁begin|>import db from './db';
const authenticatedOnly = (req, res, next) => {
if (req.isAuthenticated()) { return next(); }
return res.status(401).send('Authentication required');
};
const dbRoutes = (app) => {
app.post('/users', authenticatedOnly, (req, res) => {
db.update(req.body, (doc) => {
if (process.env.NODE_ENV !== 'production') {
console.log('Saved:', doc);
}
res.send(doc.data);
});
});
};
<|fim▁hole|>export default dbRoutes;<|fim▁end|> | |
<|file_name|>linter.js<|end_file_name|><|fim▁begin|>/**
* A wrapper around JSLint to drop things into the console
*
* Copyright (C) 2011 Nikolay Nemshilov
*/
var RightJS = require('./right-server.js');
var JSLint = require('./jslint').JSLINT;
var fs = require('fs');
exports.Linter = new RightJS.Class({
extend: {
Options: {
debug: false, // no debug
devel: false, // no console.log s
evil: false, // no evals
passfail: false, // don't stop on errors
onevar: false, // allow more than one 'var' definition
forin: true , // allow for in without ownershipt checks
indent: 2 , // enforce 2 spaces indent
maxerr: 12 , // max number of errors
},
Okays: [
"Move 'var' declarations to the top of the function.",
"Do not use 'new' for side effects.",
"The Function constructor is eval."
]
},
/**
* Basic constructor
*
* @param {String} the source
* @param {String} the linter options
* @return void
*/
initialize: function(src, options) {
this.source = src;
this.options = options;
},<|fim▁hole|> * Runs the linter
*
* @return {Linter} this
*/
run: function() {
var options = {}, okays = [], patches = '';
// extracting the additional options
try { // skipping non-existing patch files
patches = fs.readFileSync(this.options).toString();
} catch(e) {}
eval(patches);
JSLint.okays = this.constructor.Okays.concat(okays);
JSLint(
fs.readFileSync(this.source).toString(),
Object.merge(this.constructor.Options, options)
);
this.errors = JSLint.errors.compact();
this.failed = this.errors.length > 0;
return this;
},
/**
* Prints out the check report
*
* @return {Linter} this
*/
report: function() {
if (this.errors.empty()) {
console.log("\u001B[32m - JSLint check successfully passed\u001B[0m");
} else {
console.log("\u001B[31m - JSLint check failed in: "+ this.source + "\u001B[0m");
this.errors.each(function(error) {
var report = "\n", j=0, pointer='';
for (; j < error.character-1; j++) { pointer += '-'; }
report += " \u001B[35m"+ error.reason +"\u001B[0m ";
if (error.evidence) {
report += "Line: "+ error.line + ", Char: "+ error.character + "\n";
report += " "+ error.evidence + "\n";
report += " \u001B[33m"+ pointer + "^\u001B[0m";
}
console.log(report);
});
console.log("\n")
}
return this;
}
});<|fim▁end|> |
/** |
<|file_name|>intersectdialog.py<|end_file_name|><|fim▁begin|># emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
class IntersectDialog(QDialog):
"""A dialog for action of intersection."""
def __init__(self, model, parent=None):
super(IntersectDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):<|fim▁hole|>
# initialize widgets
source_label = QLabel("Source")
self.source_combo = QComboBox()
mask_label = QLabel("Mask")
self.mask_combo = QComboBox()
vol_list = self._model.getItemList()
self.source_combo.addItems(QStringList(vol_list))
row = self._model.currentIndex().row()
self.source_combo.setCurrentIndex(row)
self.mask_combo.addItems(QStringList(vol_list))
out_label = QLabel("Output volume name")
self.out_edit = QLineEdit()
# layout config
grid_layout = QGridLayout()
#grid_layout.addWidget(source_label, 0, 0)
#grid_layout.addWidget(self.source_combo, 0, 1)
grid_layout.addWidget(mask_label, 0, 0)
grid_layout.addWidget(self.mask_combo, 0, 1)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1)
# button config
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.source_combo.currentIndexChanged.connect(self._create_output)
self.mask_combo.currentIndexChanged.connect(self._create_output)
self.run_button.clicked.connect(self._run_intersect)
self.cancel_button.clicked.connect(self.done)
def _create_output(self):
source_name = self.source_combo.currentText()
mask_name = self.mask_combo.currentText()
output_name = '_'.join([str(source_name), str(mask_name)])
self.out_edit.setText(output_name)
def _run_intersect(self):
"""Run an intersecting processing."""
vol_name = str(self.out_edit.text())
if not vol_name:
QMessageBox.critical(self, "No output volume name",
"Please specify output volume's name!")
return
source_row = self.source_combo.currentIndex()
mask_row = self.mask_combo.currentIndex()
source_data = self._model.data(self._model.index(source_row),
Qt.UserRole + 4)
mask_data = self._model.data(self._model.index(mask_row),
Qt.UserRole + 4)
new_vol = imtool.intersect(source_data, mask_data)
self._model.addItem(new_vol,
None,
vol_name,
self._model._data[0].get_header(),
0, 100, 255, 'rainbow')
self.done(0)<|fim▁end|> | """Initialize GUI."""
# set dialog title
self.setWindowTitle("Intersect") |
<|file_name|>RadioInput.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# RadioInput.py
#
# Copyright (C) 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
from gi.repository import Gtk, GObject
class RadioInput(Gtk.Box):
__gsignals__ = {
'radio-changed': (GObject.SIGNAL_RUN_FIRST, None, ())
}
def __init__(self, values):
super(RadioInput, self).__init__(orientation=Gtk.Orientation.VERTICAL)
buttons = []
# First value is treated differently
first_value = values[0]
self._first_radio = Gtk.RadioButton.new_with_label(None, first_value)
self._first_radio.connect(
'toggled',
self._emit_value_changed,
first_value
)
self.pack_start(self._first_radio, False, False, 20)
values.pop(0)
for v in values:
radio = Gtk.RadioButton.new_with_label_from_widget(
self._first_radio, v
)
self.pack_start(radio, False, False, 10)
buttons.append(radio)
def _emit_value_changed(self, widget, value):
self.emit('radio-changed')
def get_selected_text(self):
group = self._first_radio.get_group()
for button in group:
if button.get_active():
return str(button.get_label())
print "no selected text for radiobuttons"
def get_focusable_widget(self):
'''
:returns: tuple (bool, widget)<|fim▁hole|> '''
return (False, None)<|fim▁end|> | The first argument is whether there is a widget
that should be focused on, the second is the
widget in question |
<|file_name|>Size.java<|end_file_name|><|fim▁begin|>package xyw.ning.juicer.poco.model;
import xyw.ning.juicer.base.NoProguard;
/**
* Created by Ning-win on 2016/7/30.
*/
public class Size implements NoProguard {
public String url;
public long width;<|fim▁hole|><|fim▁end|> | public long height;
} |
<|file_name|>add.py<|end_file_name|><|fim▁begin|>"""
project and subproject adding
# @@ needs tests
"""
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile
from opencore.browser.formhandler import OctopoLite, action
from opencore.i18n import _
from opencore.interfaces import IHomePage
from opencore.interfaces.event import AfterProjectAddedEvent
from opencore.browser.naming import get_view_names
from opencore.project.browser.base import ProjectBaseView
from topp.featurelets.interfaces import IFeatureletSupporter, IFeaturelet
from topp.utils.text import valid_title, valid_id, strip_extra_whitespace
from zope import event
from zope.component import getAdapters, getMultiAdapter
from zope.interface import implements
import logging
log = logging.getLogger('opencore.project.browser.add')
class ProjectAddView(ProjectBaseView, OctopoLite):
template = ZopeTwoPageTemplateFile('create.pt')
def reserved_names(self):
return list(get_view_names(self.context)) + ['people', 'projects', 'unique', 'summary', 'pending']
@action('validate')
def validate(self, target=None, fields=None):
putils = getToolByName(self.context, 'plone_utils')
errors = {}
id_ = self.request.form.get('projid')
id_ = putils.normalizeString(id_)
if (self.context.has_key(id_)
or id_ in self.reserved_names()):
errors['oc-id-error'] = {
'html': 'The requested url is already taken.',
'action': 'copy',
'effects': 'highlight'
}
else:
errors['oc-id-error'] = {
'html': '',
'action': 'copy',
'effects': ''
}
return errors
def check_logo(self, project, logo):
try:
project.setLogo(logo)
except ValueError: # must have tried to upload an unsupported filetype
self.addPortalStatusMessage('Please choose an image in gif, jpeg, png, or bmp format.')
return False
return True
@action('add')
def handle_request(self, target=None, fields=None):
#XXX all of the errors that are reported back here are not going
# through the translation machinery
putils = getToolByName(self.context, 'plone_utils')
self.request.set('__initialize_project__', True)
self.errors = {}
title = self.request.form.get('project_title')
title = strip_extra_whitespace(title)
if not isinstance(title, unicode):
title = unicode(title, 'utf-8')
self.request.form['project_title'] = title
if not valid_title(title):
self.errors['project_title'] = 'The name must contain 2 or more characters.'
id_ = self.request.form.get('projid')
if not valid_id(id_):
self.errors['id'] = 'The url must contain 2 or more characters; ' + \
'only A-Z, 0-9 and "-" are valid characters.'
else:
id_ = putils.normalizeString(id_)
if self.context.has_key(id_):
self.errors['id'] = 'The requested url is already taken.'
# Give plugin viewlets a chance to validate. We don't have a
# project yet, so they'll have to tolerate validating with the
# project container as the context.
viewlet_mgr = getMultiAdapter((self.context, self.request, self),
name='opencore.proj_prefs')
if not hasattr(viewlet_mgr, 'viewlets'):
viewlet_mgr.update()
viewlets = viewlet_mgr.viewlets
for viewlet in viewlets:
if hasattr(viewlet, 'validate'):
self.errors.update(viewlet.validate())
# XXX TO DO: handle featurelets, just like in preferences.py
if self.errors:
self.add_status_message(_(u'psm_correct_errors_below', u'Please correct the errors indicated below.'))
return
self.request.form['featurelets'] = [f['id'] for f in self.featurelets()]
# Aarrgghh!! #*!&% plone snoops into the request, and reads the form variables directly,
# so we have to set the form variables with the same names as the schema
self.request.form['title'] = title
proj = self.context.restrictedTraverse('portal_factory/OpenProject/%s' %id_)
# not calling validate because it explodes on "'" for project titles
# XXX is no validation better than an occasional ugly error?
#proj.validate(REQUEST=self.request, errors=self.errors, data=1, metadata=0)
if self.errors:
self.add_status_message(_(u'psm_correct_errors_below', u'Please correct the errors indicated below.'))
return
if id_ in self.reserved_names():
self.errors['id'] = 'Name reserved'
self.add_status_message(_(u'psm_project_name_reserved', u'The name "${project_name}" is reserved. Please try a different name.',
mapping={u'project_name':id_}))
return<|fim▁hole|>
self.context.portal_factory.doCreate(proj, id_)
proj = aq_inner(self.context)._getOb(id_)
self.notify(proj)
logo = self.request.form.get('logo')
if logo:
if not self.check_logo(proj, logo):
return
del self.request.form['logo']
hpcontext = IHomePage(proj)
hpcontext.home_page = 'summary'
# We have to look up the viewlets again, now that we have
# a project for them to use as the context to save to.
viewlet_mgr = getMultiAdapter((proj, self.request, self),
name='opencore.proj_prefs')
if not hasattr(viewlet_mgr, 'viewlets'):
viewlet_mgr.update()
for viewlet in viewlet_mgr.viewlets:
if hasattr(viewlet, 'save'):
viewlet.save()
self.template = None # Don't render anything before redirect.
site_url = getToolByName(self.context, 'portal_url')()
proj_edit_url = '%s/projects/%s/project-home/edit' % (site_url, id_)
s_message_mapping = {'title': title, 'proj_edit_url': proj_edit_url,
'project_noun': self.project_noun,}
s_message = _(u'project_created',
u'"${title}" has been created. Create a team by searching for other members to invite to your ${project_noun}, then <a href="${proj_edit_url}">edit your ${project_noun} home page</a>.',
mapping=s_message_mapping)
# self.add_status_message(s_message)
self.redirect('%s/tour' % proj.absolute_url())
def notify(self, project):
event.notify(AfterProjectAddedEvent(project, self.request))
def featurelets(self):
# create a stub object that provides IFeatureletSupporter
# is there a better way to get the list of adapters without having
# the "for" object?
# @@ dwm: look at the adapter reg or uses the apidoc api which
# featurelet to display is a policy decision on the portal
# (like opencore_properties). Might work best to build the ui
# around a policy abstraction
obj = DummyFeatureletSupporter()
flets = getAdapters((obj,), IFeaturelet)
flet_data = [dict(id=f.id,
title=f.title,
url=f._info['menu_items'][0]['action'],
checked=False,
)
for name, f in flets]
return flet_data
def homepages(self):
flet_data = self.intrinsic_homepages() + self.featurelets()
return flet_data
class DummyFeatureletSupporter(object):
implements(IFeatureletSupporter)<|fim▁end|> | |
<|file_name|>mail.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
import sys, getpass, urllib.request, urllib.error, json
def mgmt(cmd, data=None, is_json=False):
# The base URL for the management daemon. (Listens on IPv4 only.)
mgmt_uri = 'http://127.0.0.1:10222'
setup_key_auth(mgmt_uri)
req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
if e.code == 401:
try:
print(e.read().decode("utf8"))
except:
pass
print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
elif hasattr(e, 'read'):
print(e.read().decode('utf8'), file=sys.stderr)
else:
print(e, file=sys.stderr)
sys.exit(1)
resp = response.read().decode('utf8')
if is_json: resp = json.loads(resp)
return resp
def read_password():
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
while first != second:
print('Passwords not the same. Try again.')
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
return first
def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='Mail-in-a-Box Management Server',
uri=mgmt_uri,<|fim▁hole|> user=key,
passwd='')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
if len(sys.argv) < 2:
print("Usage: ")
print(" tools/mail.py user (lists users)")
print(" tools/mail.py user add [email protected] [password]")
print(" tools/mail.py user password [email protected] [password]")
print(" tools/mail.py user remove [email protected]")
print(" tools/mail.py user make-admin [email protected]")
print(" tools/mail.py user remove-admin [email protected]")
print(" tools/mail.py user admins (lists admins)")
print(" tools/mail.py alias (lists aliases)")
print(" tools/mail.py alias add [email protected] [email protected]")
print(" tools/mail.py alias add [email protected] '[email protected], [email protected]'")
print(" tools/mail.py alias remove [email protected]")
print()
print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
print()
elif sys.argv[1] == "user" and len(sys.argv) == 2:
# Dump a list of users, one per line. Mark admins with an asterisk.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if user['status'] == 'inactive': continue
print(user['email'], end='')
if "admin" in user['privileges']:
print("*", end='')
print()
elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
if len(sys.argv) < 5:
if len(sys.argv) < 4:
email = input("email: ")
else:
email = sys.argv[3]
pw = read_password()
else:
email, pw = sys.argv[3:5]
if sys.argv[2] == "add":
print(mgmt("/mail/users/add", { "email": email, "password": pw }))
elif sys.argv[2] == "password":
print(mgmt("/mail/users/password", { "email": email, "password": pw }))
elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/users/remove", { "email": sys.argv[3] }))
elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
if sys.argv[2] == "make-admin":
action = "add"
else:
action = "remove"
print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
elif sys.argv[1] == "user" and sys.argv[2] == "admins":
# Dump a list of admin users.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if "admin" in user['privileges']:
print(user['email'])
elif sys.argv[1] == "alias" and len(sys.argv) == 2:
print(mgmt("/mail/aliases"))
elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
print(mgmt("/mail/aliases/add", { "source": sys.argv[3], "destination": sys.argv[4] }))
elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/aliases/remove", { "source": sys.argv[3] }))
else:
print("Invalid command-line arguments.")
sys.exit(1)<|fim▁end|> | |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>const Discord = require("discord.js");
const client = new Discord.Client();
const settings = require("./settings.json");
const chalk = require("chalk");
const fs = require("fs");
const moment = require("moment");
require("./util/eventLoader")(client);
const log = message => {
console.log(`[${moment().format("YYYY-MM-DD HH:mm:ss")}] ${message}`);
};
client.commands = new Discord.Collection();
client.aliases = new Discord.Collection();
fs.readdir('./commands/', (err, files) => {
if (err) console.error(err);
log(`Loading a total of ${files.length} commands.`);
files.forEach(f => {
let props = require(`./commands/${f}`);
log(`Loading Command: ${props.help.name}. 👌`);
client.commands.set(props.help.name, props);
props.conf.aliases.forEach(alias => {
client.aliases.set(alias, props.help.name);
});
});
});
client.reload = command => {
return new Promise((resolve, reject) => {
try {
delete require.cache[require.resolve(`./commands/${command}`)];
let cmd = require(`./commands/${command}`);
client.commands.delete(command);
client.aliases.forEach((cmd, alias) => {
if (cmd === command) client.aliases.delete(alias);
});
client.commands.set(command, cmd);
cmd.conf.aliases.forEach(alias => {
client.aliases.set(alias, cmd.help.name);
});
resolve();
} catch (e) {
reject(e);
}
});
};
client.on("ready", () => {
const games = ["Not a Game", "The Joker Game Returns", "The Coven", "Nintendo: Choose Your Own Character 2!", "PokéDonalds"];
setInterval(() => {
const playingGame = games[~~(Math.random() * games.length)];
console.log(`Changing playing game to ${playingGame} now`);
client.user.setGame(playingGame);
}, 1800000);
client.channels.get("339257481740156928").fetchMessages({
limit: 30
})
.then(messages => console.log(`Received ${messages.size} messages`))
.catch(console.error);
});
client.elevation = message => {
/* This function should resolve to an ELEVATION level which
is then sent to the command handler for verification*/
let permlvl = 0;
let mod_role = message.guild.roles.find("name", settings.modrolename);
if (mod_role && message.member.roles.has(mod_role.id)) permlvl = 2;
let admin_role = message.guild.roles.find("name", settings.adminrolename);
if (admin_role && message.member.roles.has(admin_role.id)) permlvl = 3;
if (message.author.id === settings.ownerid) permlvl = 4;
return permlvl;
};
let autoResponse = {
"ayy": "lmao",
"ayyy": "lmao",
"ayyyy": "lmao",
"that's hot": "eso es caliente",
"lenny": "( ͡° ͜ʖ ͡°)",
"eso es caliente": "that's hot",
"drewbie": "!kick drewbie"
};
client.on("message", message => {
if (message.content === "lala") {
console.log(guild.members.find(nickname, 'asd'));
}
if (message.author.bot) return;
let msg = message.content.toLowerCase();
if (autoResponse[msg]) {
message.channel.send(autoResponse[msg]);
}
});
var regToken = /[\w\d]{24}\.[\w\d]{6}\.[\w\d-_]{27}/g;
// client.on('debug', e => {
// console.log(chalk.bgBlue.green(e.replace(regToken, 'that was redacted')));
// });
client.on("warn", e => {
console.log(chalk.bgYellow(e.replace(regToken, "that was redacted")));
});<|fim▁hole|>
client.login(process.env.TOKEN);<|fim▁end|> |
client.on("error", e => {
console.log(chalk.bgRed(e.replace(regToken, "that was redacted")));
}); |
<|file_name|>TripInfo.java<|end_file_name|><|fim▁begin|>package com.atlach.trafficdataloader;
import java.util.ArrayList;
/* Short Desc: Storage object for Trip info */
/* Trip Info > Trips > Routes */
public class TripInfo {
private int tripCount = 0;
public ArrayList<Trip> trips = null;
public String date;
public String origin;
public String destination;
public TripInfo() {
trips = new ArrayList<Trip>();
}
public int addTrip() {
Trip temp = new Trip();
if (trips.add(temp) == false) {
/* Failed */
return -1;
}
tripCount++;
return trips.indexOf(temp);<|fim▁hole|> Trip temp = trips.get(tripId);
if (temp != null) {
result = temp.addRoute(routeName, mode, dist, agency, start, end, points);
}
return result;
}
public int getTripCount() {
return tripCount;
}
public static class Trip {
public double totalDist = 0.0;
public double totalCost = 0.0;
public int totalTraffic = 0;
private int transfers = 0;
public ArrayList<Route> routes = null;
public Trip() {
routes = new ArrayList<Route>();
};
public int addRoute(String routeName, String mode, String dist, String agency, String start, String end, String points) {
Route temp = new Route();
temp.name = routeName;
temp.mode = mode;
temp.dist = dist;
temp.agency = agency;
temp.start = start;
temp.end = end;
temp.points = points;
if (routes.add(temp) == false) {
/* Failed */
return -1;
}
transfers++;
return routes.indexOf(temp);
}
public int getTransfers() {
return transfers;
}
}
public static class Route {
/* Object fields */
public String name = "";
public String mode = "";
public String dist = "0.0";
public String agency = "";
public String start = "";
public String end = "";
public String points = "";
public String cond = "";
//public String cost = "0.0";
public double costMatrix[] = {0.0, 0.0, 0.0, 0.0};
public double getRegularCost(boolean isDiscounted) {
if (isDiscounted) {
return costMatrix[1];
}
return costMatrix[0];
}
public double getSpecialCost(boolean isDiscounted) {
if (isDiscounted) {
return costMatrix[2];
}
return costMatrix[3];
}
}
}<|fim▁end|> | }
public int addRouteToTrip(int tripId, String routeName, String mode, String dist, String agency, String start, String end, String points) {
int result = -1; |
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>//! Module for parsing ISO Base Media Format aka video/mp4 streams.
//! Internal unit tests.
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use super::fallible::TryRead as _;
use super::read_mp4;
use super::Error;
use super::MediaContext;
#[cfg(feature = "mp4parse_fallible")]
use std::convert::TryInto as _;
use std::io::Cursor;
use std::io::Read as _;
extern crate test_assembler;
use self::test_assembler::*;
use boxes::{BoxType, FourCC};
enum BoxSize {
Short(u32),
Long(u64),
UncheckedShort(u32),
UncheckedLong(u64),
Auto,
}
#[allow(clippy::trivially_copy_pass_by_ref)] // TODO: Consider reworking to a copy
fn make_box<F>(size: BoxSize, name: &[u8; 4], func: F) -> Cursor<Vec<u8>>
where
F: Fn(Section) -> Section,
{
let mut section = Section::new();
let box_size = Label::new();
section = match size {
BoxSize::Short(size) | BoxSize::UncheckedShort(size) => section.B32(size),
BoxSize::Long(_) | BoxSize::UncheckedLong(_) => section.B32(1),
BoxSize::Auto => section.B32(&box_size),
};
section = section.append_bytes(name);
section = match size {
// The spec allows the 32-bit size to be 0 to indicate unknown
// length streams. It's not clear if this is valid when using a
// 64-bit size, so prohibit it for now.
BoxSize::Long(size) => {
assert!(size > 0);
section.B64(size)
}
BoxSize::UncheckedLong(size) => section.B64(size),
_ => section,
};
section = func(section);
match size {
BoxSize::Short(size) => {
if size > 0 {
assert_eq!(u64::from(size), section.size())
}
}
BoxSize::Long(size) => assert_eq!(size, section.size()),
BoxSize::Auto => {
assert!(
section.size() <= u64::from(u32::max_value()),
"Tried to use a long box with BoxSize::Auto"
);
box_size.set_const(section.size());
}
// Skip checking BoxSize::Unchecked* cases.
_ => (),
}
Cursor::new(section.get_contents().unwrap())
}
fn make_uuid_box<F>(size: BoxSize, uuid: &[u8; 16], func: F) -> Cursor<Vec<u8>>
where
F: Fn(Section) -> Section,
{
make_box(size, b"uuid", |mut s| {
for b in uuid {
s = s.B8(*b);
}
func(s)
})
}
#[allow(clippy::trivially_copy_pass_by_ref)] // TODO: Consider reworking to a copy
fn make_fullbox<F>(size: BoxSize, name: &[u8; 4], version: u8, func: F) -> Cursor<Vec<u8>>
where
F: Fn(Section) -> Section,
{
make_box(size, name, |s| func(s.B8(version).B8(0).B8(0).B8(0)))
}
#[test]
fn read_box_header_short() {
let mut stream = make_box(BoxSize::Short(8), b"test", |s| s);
let header = super::read_box_header(&mut stream).unwrap();
assert_eq!(header.name, BoxType::UnknownBox(0x7465_7374)); // "test"
assert_eq!(header.size, 8);
assert!(header.uuid.is_none());
}
#[test]
fn read_box_header_long() {
let mut stream = make_box(BoxSize::Long(16), b"test", |s| s);
let header = super::read_box_header(&mut stream).unwrap();
assert_eq!(header.name, BoxType::UnknownBox(0x7465_7374)); // "test"
assert_eq!(header.size, 16);
assert!(header.uuid.is_none());
}
#[test]
fn read_box_header_short_unknown_size() {
let mut stream = make_box(BoxSize::Short(0), b"test", |s| s);
match super::read_box_header(&mut stream) {
Err(Error::Unsupported(s)) => assert_eq!(s, "unknown sized box"),
_ => panic!("unexpected result reading box with unknown size"),
};
}
#[test]
fn read_box_header_short_invalid_size() {
let mut stream = make_box(BoxSize::UncheckedShort(2), b"test", |s| s);
match super::read_box_header(&mut stream) {
Err(Error::InvalidData(s)) => assert_eq!(s, "malformed size"),
_ => panic!("unexpected result reading box with invalid size"),
};
}
#[test]
fn read_box_header_long_invalid_size() {
let mut stream = make_box(BoxSize::UncheckedLong(2), b"test", |s| s);<|fim▁hole|> };
}
#[test]
fn read_box_header_uuid() {
const HEADER_UUID: [u8; 16] = [
0x85, 0xc0, 0xb6, 0x87, 0x82, 0x0f, 0x11, 0xe0, 0x81, 0x11, 0xf4, 0xce, 0x46, 0x2b, 0x6a,
0x48,
];
let mut stream = make_uuid_box(BoxSize::Short(24), &HEADER_UUID, |s| s);
let mut iter = super::BoxIter::new(&mut stream);
let stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::UuidBox);
assert_eq!(stream.head.size, 24);
assert!(stream.head.uuid.is_some());
assert_eq!(stream.head.uuid.unwrap(), HEADER_UUID);
}
#[test]
fn read_box_header_truncated_uuid() {
const HEADER_UUID: [u8; 16] = [
0x85, 0xc0, 0xb6, 0x87, 0x82, 0x0f, 0x11, 0xe0, 0x81, 0x11, 0xf4, 0xce, 0x46, 0x2b, 0x6a,
0x48,
];
let mut stream = make_uuid_box(BoxSize::UncheckedShort(23), &HEADER_UUID, |s| s);
let mut iter = super::BoxIter::new(&mut stream);
let stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::UuidBox);
assert_eq!(stream.head.size, 23);
assert!(stream.head.uuid.is_none());
}
#[test]
fn read_ftyp() {
let mut stream = make_box(BoxSize::Short(24), b"ftyp", |s| {
s.append_bytes(b"mp42")
.B32(0) // minor version
.append_bytes(b"isom")
.append_bytes(b"mp42")
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::FileTypeBox);
assert_eq!(stream.head.size, 24);
let parsed = super::read_ftyp(&mut stream).unwrap();
assert_eq!(parsed.major_brand, FourCC::from(*b"mp42")); // mp42
assert_eq!(parsed.minor_version, 0);
assert_eq!(parsed.compatible_brands.len(), 2);
assert_eq!(parsed.compatible_brands[0], FourCC::from(*b"isom")); // isom
assert_eq!(parsed.compatible_brands[1], FourCC::from(*b"mp42")); // mp42
}
#[test]
fn read_truncated_ftyp() {
// We declare a 24 byte box, but only write 20 bytes.
let mut stream = make_box(BoxSize::UncheckedShort(24), b"ftyp", |s| {
s.append_bytes(b"mp42")
.B32(0) // minor version
.append_bytes(b"isom")
});
let mut context = MediaContext::new();
match read_mp4(&mut stream, &mut context) {
Err(Error::UnexpectedEOF) => (),
Ok(_) => panic!("expected an error result"),
_ => panic!("expected a different error result"),
}
}
#[test]
fn read_ftyp_case() {
// Brands in BMFF are represented as a u32, so it would seem clear that
// 0x6d703432 ("mp42") is not equal to 0x4d503432 ("MP42"), but some
// demuxers treat these as case-insensitive strings, e.g. street.mp4's
// major brand is "MP42". I haven't seen case-insensitive
// compatible_brands (which we also test here), but it doesn't seem
// unlikely given the major_brand behaviour.
let mut stream = make_box(BoxSize::Auto, b"ftyp", |s| {
s.append_bytes(b"MP42")
.B32(0) // minor version
.append_bytes(b"ISOM")
.append_bytes(b"MP42")
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::FileTypeBox);
assert_eq!(stream.head.size, 24);
let parsed = super::read_ftyp(&mut stream).unwrap();
assert_eq!(parsed.major_brand, FourCC::from(*b"MP42"));
assert_eq!(parsed.minor_version, 0);
assert_eq!(parsed.compatible_brands.len(), 2);
assert_eq!(parsed.compatible_brands[0], FourCC::from(*b"ISOM")); // ISOM
assert_eq!(parsed.compatible_brands[1], FourCC::from(*b"MP42")); // MP42
}
#[test]
fn read_elst_v0() {
let mut stream = make_fullbox(BoxSize::Short(28), b"elst", 0, |s| {
s.B32(1) // list count
// first entry
.B32(1234) // duration
.B32(5678) // time
.B16(12) // rate integer
.B16(34) // rate fraction
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::EditListBox);
assert_eq!(stream.head.size, 28);
let parsed = super::read_elst(&mut stream).unwrap();
assert_eq!(parsed.edits.len(), 1);
assert_eq!(parsed.edits[0].segment_duration, 1234);
assert_eq!(parsed.edits[0].media_time, 5678);
assert_eq!(parsed.edits[0].media_rate_integer, 12);
assert_eq!(parsed.edits[0].media_rate_fraction, 34);
}
#[test]
fn read_elst_v1() {
let mut stream = make_fullbox(BoxSize::Short(56), b"elst", 1, |s| {
s.B32(2) // list count
// first entry
.B64(1234) // duration
.B64(5678) // time
.B16(12) // rate integer
.B16(34) // rate fraction
// second entry
.B64(1234) // duration
.B64(5678) // time
.B16(12) // rate integer
.B16(34) // rate fraction
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::EditListBox);
assert_eq!(stream.head.size, 56);
let parsed = super::read_elst(&mut stream).unwrap();
assert_eq!(parsed.edits.len(), 2);
assert_eq!(parsed.edits[1].segment_duration, 1234);
assert_eq!(parsed.edits[1].media_time, 5678);
assert_eq!(parsed.edits[1].media_rate_integer, 12);
assert_eq!(parsed.edits[1].media_rate_fraction, 34);
}
#[test]
fn read_mdhd_v0() {
let mut stream = make_fullbox(BoxSize::Short(32), b"mdhd", 0, |s| {
s.B32(0)
.B32(0)
.B32(1234) // timescale
.B32(5678) // duration
.B32(0)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MediaHeaderBox);
assert_eq!(stream.head.size, 32);
let parsed = super::read_mdhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, 5678);
}
#[test]
fn read_mdhd_v1() {
let mut stream = make_fullbox(BoxSize::Short(44), b"mdhd", 1, |s| {
s.B64(0)
.B64(0)
.B32(1234) // timescale
.B64(5678) // duration
.B32(0)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MediaHeaderBox);
assert_eq!(stream.head.size, 44);
let parsed = super::read_mdhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, 5678);
}
#[test]
fn read_mdhd_unknown_duration() {
let mut stream = make_fullbox(BoxSize::Short(32), b"mdhd", 0, |s| {
s.B32(0)
.B32(0)
.B32(1234) // timescale
.B32(::std::u32::MAX) // duration
.B32(0)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MediaHeaderBox);
assert_eq!(stream.head.size, 32);
let parsed = super::read_mdhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, ::std::u64::MAX);
}
#[test]
fn read_mdhd_invalid_timescale() {
let mut stream = make_fullbox(BoxSize::Short(44), b"mdhd", 1, |s| {
s.B64(0)
.B64(0)
.B32(0) // timescale
.B64(5678) // duration
.B32(0)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MediaHeaderBox);
assert_eq!(stream.head.size, 44);
let r = super::parse_mdhd(&mut stream, &mut super::Track::new(0));
assert_eq!(r.is_err(), true);
}
#[test]
fn read_mvhd_v0() {
let mut stream = make_fullbox(BoxSize::Short(108), b"mvhd", 0, |s| {
s.B32(0).B32(0).B32(1234).B32(5678).append_repeated(0, 80)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MovieHeaderBox);
assert_eq!(stream.head.size, 108);
let parsed = super::read_mvhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, 5678);
}
#[test]
fn read_mvhd_v1() {
let mut stream = make_fullbox(BoxSize::Short(120), b"mvhd", 1, |s| {
s.B64(0).B64(0).B32(1234).B64(5678).append_repeated(0, 80)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MovieHeaderBox);
assert_eq!(stream.head.size, 120);
let parsed = super::read_mvhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, 5678);
}
#[test]
fn read_mvhd_invalid_timescale() {
let mut stream = make_fullbox(BoxSize::Short(120), b"mvhd", 1, |s| {
s.B64(0).B64(0).B32(0).B64(5678).append_repeated(0, 80)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MovieHeaderBox);
assert_eq!(stream.head.size, 120);
let r = super::parse_mvhd(&mut stream);
assert_eq!(r.is_err(), true);
}
#[test]
fn read_mvhd_unknown_duration() {
let mut stream = make_fullbox(BoxSize::Short(108), b"mvhd", 0, |s| {
s.B32(0)
.B32(0)
.B32(1234)
.B32(::std::u32::MAX)
.append_repeated(0, 80)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::MovieHeaderBox);
assert_eq!(stream.head.size, 108);
let parsed = super::read_mvhd(&mut stream).unwrap();
assert_eq!(parsed.timescale, 1234);
assert_eq!(parsed.duration, ::std::u64::MAX);
}
#[test]
fn read_vpcc_version_0() {
let data_length = 12u16;
let mut stream = make_fullbox(BoxSize::Auto, b"vpcC", 0, |s| {
s.B8(2)
.B8(0)
.B8(0x82)
.B8(0)
.B16(data_length)
.append_repeated(42, data_length as usize)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::VPCodecConfigurationBox);
let r = super::read_vpcc(&mut stream);
assert!(r.is_ok());
}
// TODO: it'd be better to find a real sample here.
#[test]
#[allow(clippy::inconsistent_digit_grouping)] // Allow odd grouping for test readability.
fn read_vpcc_version_1() {
let data_length = 12u16;
let mut stream = make_fullbox(BoxSize::Auto, b"vpcC", 1, |s| {
s.B8(2) // profile
.B8(0) // level
.B8(0b1000_011_0) // bitdepth (4 bits), chroma (3 bits), video full range (1 bit)
.B8(1) // color primaries
.B8(1) // transfer characteristics
.B8(1) // matrix
.B16(data_length)
.append_repeated(42, data_length as usize)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::VPCodecConfigurationBox);
let r = super::read_vpcc(&mut stream);
match r {
Ok(vpcc) => {
assert_eq!(vpcc.bit_depth, 8);
assert_eq!(vpcc.chroma_subsampling, 3);
assert_eq!(vpcc.video_full_range_flag, false);
assert_eq!(vpcc.matrix_coefficients.unwrap(), 1);
}
_ => panic!("vpcc parsing error"),
}
}
#[test]
fn read_hdlr() {
let mut stream = make_fullbox(BoxSize::Short(45), b"hdlr", 0, |s| {
s.B32(0)
.append_bytes(b"vide")
.B32(0)
.B32(0)
.B32(0)
.append_bytes(b"VideoHandler")
.B8(0) // null-terminate string
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::HandlerBox);
assert_eq!(stream.head.size, 45);
let parsed = super::read_hdlr(&mut stream).unwrap();
assert_eq!(parsed.handler_type, FourCC::from(*b"vide"));
}
#[test]
fn read_hdlr_short_name() {
let mut stream = make_fullbox(BoxSize::Short(33), b"hdlr", 0, |s| {
s.B32(0).append_bytes(b"vide").B32(0).B32(0).B32(0).B8(0) // null-terminate string
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::HandlerBox);
assert_eq!(stream.head.size, 33);
let parsed = super::read_hdlr(&mut stream).unwrap();
assert_eq!(parsed.handler_type, FourCC::from(*b"vide"));
}
#[test]
fn read_hdlr_zero_length_name() {
let mut stream = make_fullbox(BoxSize::Short(32), b"hdlr", 0, |s| {
s.B32(0).append_bytes(b"vide").B32(0).B32(0).B32(0)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::HandlerBox);
assert_eq!(stream.head.size, 32);
let parsed = super::read_hdlr(&mut stream).unwrap();
assert_eq!(parsed.handler_type, FourCC::from(*b"vide"));
}
fn flac_streaminfo() -> Vec<u8> {
vec![
0x10, 0x00, 0x10, 0x00, 0x00, 0x0a, 0x11, 0x00, 0x38, 0x32, 0x0a, 0xc4, 0x42, 0xf0, 0x00,
0xc9, 0xdf, 0xae, 0xb5, 0x66, 0xfc, 0x02, 0x15, 0xa3, 0xb1, 0x54, 0x61, 0x47, 0x0f, 0xfb,
0x05, 0x00, 0x33, 0xad,
]
}
#[test]
fn read_flac() {
let mut stream = make_box(BoxSize::Auto, b"fLaC", |s| {
s.append_repeated(0, 6) // reserved
.B16(1) // data reference index
.B32(0) // reserved
.B32(0) // reserved
.B16(2) // channel count
.B16(16) // bits per sample
.B16(0) // pre_defined
.B16(0) // reserved
.B32(44100 << 16) // Sample rate
.append_bytes(
&make_dfla(
FlacBlockType::StreamInfo,
true,
&flac_streaminfo(),
FlacBlockLength::Correct,
)
.into_inner(),
)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let r = super::read_audio_sample_entry(&mut stream);
assert!(r.is_ok());
}
#[derive(Clone, Copy)]
enum FlacBlockType {
StreamInfo = 0,
_Padding = 1,
_Application = 2,
_Seektable = 3,
_Comment = 4,
_Cuesheet = 5,
_Picture = 6,
_Reserved,
_Invalid = 127,
}
enum FlacBlockLength {
Correct,
Incorrect(usize),
}
fn make_dfla(
block_type: FlacBlockType,
last: bool,
data: &[u8],
data_length: FlacBlockLength,
) -> Cursor<Vec<u8>> {
assert!(data.len() < 1 << 24);
make_fullbox(BoxSize::Auto, b"dfLa", 0, |s| {
let flag = if last { 1 } else { 0 };
let size = match data_length {
FlacBlockLength::Correct => (data.len() as u32) & 0x00ff_ffff,
FlacBlockLength::Incorrect(size) => {
assert!(size < 1 << 24);
(size as u32) & 0x00ff_ffff
}
};
let block_type = (block_type as u32) & 0x7f;
s.B32(flag << 31 | block_type << 24 | size)
.append_bytes(data)
})
}
#[test]
fn read_dfla() {
let mut stream = make_dfla(
FlacBlockType::StreamInfo,
true,
&flac_streaminfo(),
FlacBlockLength::Correct,
);
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::FLACSpecificBox);
let dfla = super::read_dfla(&mut stream).unwrap();
assert_eq!(dfla.version, 0);
}
#[test]
fn long_flac_metadata() {
let streaminfo = flac_streaminfo();
let mut stream = make_dfla(
FlacBlockType::StreamInfo,
true,
&streaminfo,
FlacBlockLength::Incorrect(streaminfo.len() + 4),
);
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::FLACSpecificBox);
let r = super::read_dfla(&mut stream);
assert!(r.is_err());
}
#[test]
fn read_opus() {
let mut stream = make_box(BoxSize::Auto, b"Opus", |s| {
s.append_repeated(0, 6)
.B16(1) // data reference index
.B32(0)
.B32(0)
.B16(2) // channel count
.B16(16) // bits per sample
.B16(0)
.B16(0)
.B32(48000 << 16) // Sample rate is always 48 kHz for Opus.
.append_bytes(&make_dops().into_inner())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let r = super::read_audio_sample_entry(&mut stream);
assert!(r.is_ok());
}
fn make_dops() -> Cursor<Vec<u8>> {
make_box(BoxSize::Auto, b"dOps", |s| {
s.B8(0) // version
.B8(2) // channel count
.B16(348) // pre-skip
.B32(44100) // original sample rate
.B16(0) // gain
.B8(0) // channel mapping
})
}
#[test]
fn read_dops() {
let mut stream = make_dops();
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
assert_eq!(stream.head.name, BoxType::OpusSpecificBox);
let r = super::read_dops(&mut stream);
assert!(r.is_ok());
}
#[test]
fn serialize_opus_header() {
let opus = super::OpusSpecificBox {
version: 0,
output_channel_count: 1,
pre_skip: 342,
input_sample_rate: 24000,
output_gain: 0,
channel_mapping_family: 0,
channel_mapping_table: None,
};
let mut v = Vec::<u8>::new();
super::serialize_opus_header(&opus, &mut v).unwrap();
assert_eq!(v.len(), 19);
assert_eq!(
v,
vec![
0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64, 0x01, 0x01, 0x56, 0x01, 0xc0, 0x5d,
0x00, 0x00, 0x00, 0x00, 0x00,
]
);
let opus = super::OpusSpecificBox {
version: 0,
output_channel_count: 6,
pre_skip: 152,
input_sample_rate: 48000,
output_gain: 0,
channel_mapping_family: 1,
channel_mapping_table: Some(super::ChannelMappingTable {
stream_count: 4,
coupled_count: 2,
channel_mapping: vec![0, 4, 1, 2, 3, 5].into(),
}),
};
let mut v = Vec::<u8>::new();
super::serialize_opus_header(&opus, &mut v).unwrap();
assert_eq!(v.len(), 27);
assert_eq!(
v,
vec![
0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64, 0x01, 0x06, 0x98, 0x00, 0x80, 0xbb,
0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x02, 0x00, 0x04, 0x01, 0x02, 0x03, 0x05,
]
);
}
#[test]
fn read_alac() {
let mut stream = make_box(BoxSize::Auto, b"alac", |s| {
s.append_repeated(0, 6) // reserved
.B16(1) // data reference index
.B32(0) // reserved
.B32(0) // reserved
.B16(2) // channel count
.B16(16) // bits per sample
.B16(0) // pre_defined
.B16(0) // reserved
.B32(44100 << 16) // Sample rate
.append_bytes(
&make_fullbox(BoxSize::Auto, b"alac", 0, |s| s.append_bytes(&[0xfa; 24]))
.into_inner(),
)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let r = super::read_audio_sample_entry(&mut stream);
assert!(r.is_ok());
}
#[test]
fn avcc_limit() {
let mut stream = make_box(BoxSize::Auto, b"avc1", |s| {
s.append_repeated(0, 6)
.B16(1)
.append_repeated(0, 16)
.B16(320)
.B16(240)
.append_repeated(0, 14)
.append_repeated(0, 32)
.append_repeated(0, 4)
.B32(0xffff_ffff)
.append_bytes(b"avcC")
.append_repeated(0, 100)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_video_sample_entry(&mut stream, &FourCC::from(*b"isom")) {
Err(Error::InvalidData(s)) => assert_eq!(s, "read_buf size exceeds BUF_SIZE_LIMIT"),
Ok(_) => panic!("expected an error result"),
_ => panic!("expected a different error result"),
}
}
#[test]
fn esds_limit() {
let mut stream = make_box(BoxSize::Auto, b"mp4a", |s| {
s.append_repeated(0, 6)
.B16(1)
.B32(0)
.B32(0)
.B16(2)
.B16(16)
.B16(0)
.B16(0)
.B32(48000 << 16)
.B32(0xffff_ffff)
.append_bytes(b"esds")
.append_repeated(0, 100)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_audio_sample_entry(&mut stream) {
Err(Error::InvalidData(s)) => assert_eq!(s, "read_buf size exceeds BUF_SIZE_LIMIT"),
Ok(_) => panic!("expected an error result"),
_ => panic!("expected a different error result"),
}
}
#[test]
fn esds_limit_2() {
let mut stream = make_box(BoxSize::Auto, b"mp4a", |s| {
s.append_repeated(0, 6)
.B16(1)
.B32(0)
.B32(0)
.B16(2)
.B16(16)
.B16(0)
.B16(0)
.B32(48000 << 16)
.B32(8)
.append_bytes(b"esds")
.append_repeated(0, 4)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_audio_sample_entry(&mut stream) {
Err(Error::UnexpectedEOF) => (),
Ok(_) => panic!("expected an error result"),
_ => panic!("expected a different error result"),
}
}
#[test]
fn read_elst_zero_entries() {
let mut stream = make_fullbox(BoxSize::Auto, b"elst", 0, |s| s.B32(0).B16(12).B16(34));
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_elst(&mut stream) {
Ok(elst) => assert_eq!(elst.edits.len(), 0),
_ => panic!("expected no error"),
}
}
fn make_elst() -> Cursor<Vec<u8>> {
make_fullbox(BoxSize::Auto, b"elst", 1, |s| {
s.B32(1)
// first entry
.B64(1234) // duration
.B64(0xffff_ffff_ffff_ffff) // time
.B16(12) // rate integer
.B16(34) // rate fraction
})
}
#[test]
fn read_edts_bogus() {
// First edit list entry has a media_time of -1, so we expect a second
// edit list entry to be present to provide a valid media_time.
// Bogus edts are ignored.
let mut stream = make_box(BoxSize::Auto, b"edts", |s| {
s.append_bytes(&make_elst().into_inner())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let mut track = super::Track::new(0);
match super::read_edts(&mut stream, &mut track) {
Ok(_) => {
assert_eq!(track.media_time, None);
assert_eq!(track.empty_duration, None);
}
_ => panic!("expected no error"),
}
}
#[test]
fn skip_padding_in_boxes() {
// Padding data could be added in the end of these boxes. Parser needs to skip
// them instead of returning error.
let box_names = vec![b"stts", b"stsc", b"stsz", b"stco", b"co64", b"stss"];
for name in box_names {
let mut stream = make_fullbox(BoxSize::Auto, name, 1, |s| {
s.append_repeated(0, 100) // add padding data
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match name {
b"stts" => {
super::read_stts(&mut stream).expect("fail to skip padding: stts");
}
b"stsc" => {
super::read_stsc(&mut stream).expect("fail to skip padding: stsc");
}
b"stsz" => {
super::read_stsz(&mut stream).expect("fail to skip padding: stsz");
}
b"stco" => {
super::read_stco(&mut stream).expect("fail to skip padding: stco");
}
b"co64" => {
super::read_co64(&mut stream).expect("fail to skip padding: co64");
}
b"stss" => {
super::read_stss(&mut stream).expect("fail to skip padding: stss");
}
_ => (),
}
}
}
#[test]
fn skip_padding_in_stsd() {
// Padding data could be added in the end of stsd boxes. Parser needs to skip
// them instead of returning error.
let avc = make_box(BoxSize::Auto, b"avc1", |s| {
s.append_repeated(0, 6)
.B16(1)
.append_repeated(0, 16)
.B16(320)
.B16(240)
.append_repeated(0, 14)
.append_repeated(0, 32)
.append_repeated(0, 4)
.B32(0xffff_ffff)
.append_bytes(b"avcC")
.append_repeated(0, 100)
})
.into_inner();
let mut stream = make_fullbox(BoxSize::Auto, b"stsd", 0, |s| {
s.B32(1)
.append_bytes(avc.as_slice())
.append_repeated(0, 100) // add padding data
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
super::read_stsd(
&mut stream,
&mut super::Track::new(0),
&FourCC::from(*b"isom"),
)
.expect("fail to skip padding: stsd");
}
#[test]
fn read_qt_wave_atom() {
let esds = make_fullbox(BoxSize::Auto, b"esds", 0, |s| {
s.B8(0x03) // elementary stream descriptor tag
.B8(0x12) // esds length
.append_repeated(0, 2)
.B8(0x00) // flags
.B8(0x04) // decoder config descriptor tag
.B8(0x0d) // dcds length
.B8(0x6b) // mp3
.append_repeated(0, 12)
})
.into_inner();
let chan = make_box(BoxSize::Auto, b"chan", |s| {
s.append_repeated(0, 10) // we don't care its data.
})
.into_inner();
let wave = make_box(BoxSize::Auto, b"wave", |s| s.append_bytes(esds.as_slice())).into_inner();
let mut stream = make_box(BoxSize::Auto, b"mp4a", |s| {
s.append_repeated(0, 6)
.B16(1) // data_reference_count
.B16(1) // verion: qt -> 1
.append_repeated(0, 6)
.B16(2)
.B16(16)
.append_repeated(0, 4)
.B32(48000 << 16)
.append_repeated(0, 16)
.append_bytes(wave.as_slice())
.append_bytes(chan.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let sample_entry =
super::read_audio_sample_entry(&mut stream).expect("fail to read qt wave atom");
match sample_entry {
super::SampleEntry::Audio(sample_entry) => {
assert_eq!(sample_entry.codec_type, super::CodecType::MP3)
}
_ => panic!("fail to read audio sample enctry"),
}
}
#[test]
fn read_descriptor_80() {
let aac_esds = vec![
0x03, 0x80, 0x80, 0x80, 0x22, 0x00, 0x02, 0x00, 0x04, 0x80, 0x80, 0x80, 0x17, 0x40, 0x15,
0x00, 0x00, 0x00, 0x00, 0x03, 0x22, 0xBC, 0x00, 0x01, 0xF5, 0x83, 0x05, 0x80, 0x80, 0x80,
0x02, 0x11, 0x90, 0x06, 0x80, 0x80, 0x80, 0x01, 0x02,
];
let aac_dc_descriptor = &aac_esds[31..33];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(aac_esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let es = super::read_esds(&mut stream).unwrap();
assert_eq!(es.audio_codec, super::CodecType::AAC);
assert_eq!(es.audio_object_type, Some(2));
assert_eq!(es.extended_audio_object_type, None);
assert_eq!(es.audio_sample_rate, Some(48000));
assert_eq!(es.audio_channel_count, Some(2));
assert_eq!(es.codec_esds, aac_esds);
assert_eq!(es.decoder_specific_data, aac_dc_descriptor);
}
#[test]
fn read_esds() {
let aac_esds = vec![
0x03, 0x24, 0x00, 0x00, 0x00, 0x04, 0x1c, 0x40, 0x15, 0x00, 0x12, 0x00, 0x00, 0x01, 0xf4,
0x00, 0x00, 0x01, 0xf4, 0x00, 0x05, 0x0d, 0x13, 0x00, 0x05, 0x88, 0x05, 0x00, 0x48, 0x21,
0x10, 0x00, 0x56, 0xe5, 0x98, 0x06, 0x01, 0x02,
];
let aac_dc_descriptor = &aac_esds[22..35];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(aac_esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let es = super::read_esds(&mut stream).unwrap();
assert_eq!(es.audio_codec, super::CodecType::AAC);
assert_eq!(es.audio_object_type, Some(2));
assert_eq!(es.extended_audio_object_type, None);
assert_eq!(es.audio_sample_rate, Some(24000));
assert_eq!(es.audio_channel_count, Some(6));
assert_eq!(es.codec_esds, aac_esds);
assert_eq!(es.decoder_specific_data, aac_dc_descriptor);
}
#[test]
fn read_esds_aac_type5() {
let aac_esds = vec![
0x03, 0x80, 0x80, 0x80, 0x2F, 0x00, 0x00, 0x00, 0x04, 0x80, 0x80, 0x80, 0x21, 0x40, 0x15,
0x00, 0x15, 0x00, 0x00, 0x03, 0xED, 0xAA, 0x00, 0x03, 0x6B, 0x00, 0x05, 0x80, 0x80, 0x80,
0x0F, 0x2B, 0x01, 0x88, 0x02, 0xC4, 0x04, 0x90, 0x2C, 0x10, 0x8C, 0x80, 0x00, 0x00, 0xED,
0x40, 0x06, 0x80, 0x80, 0x80, 0x01, 0x02,
];
let aac_dc_descriptor = &aac_esds[31..46];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(aac_esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let es = super::read_esds(&mut stream).unwrap();
assert_eq!(es.audio_codec, super::CodecType::AAC);
assert_eq!(es.audio_object_type, Some(2));
assert_eq!(es.extended_audio_object_type, Some(5));
assert_eq!(es.audio_sample_rate, Some(24000));
assert_eq!(es.audio_channel_count, Some(8));
assert_eq!(es.codec_esds, aac_esds);
assert_eq!(es.decoder_specific_data, aac_dc_descriptor);
}
#[test]
fn read_stsd_mp4v() {
let mp4v = vec![
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xd0, 0x01, 0xe0, 0x00, 0x48,
0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00,
0x18, 0xff, 0xff, 0x00, 0x00, 0x00, 0x4c, 0x65, 0x73, 0x64, 0x73, 0x00, 0x00, 0x00, 0x00,
0x03, 0x3e, 0x00, 0x00, 0x1f, 0x04, 0x36, 0x20, 0x11, 0x01, 0x77, 0x00, 0x00, 0x03, 0xe8,
0x00, 0x00, 0x03, 0xe8, 0x00, 0x05, 0x27, 0x00, 0x00, 0x01, 0xb0, 0x05, 0x00, 0x00, 0x01,
0xb5, 0x0e, 0xcf, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20, 0x00, 0x86, 0xe0, 0x00,
0x2e, 0xa6, 0x60, 0x16, 0xf4, 0x01, 0xf4, 0x24, 0xc8, 0x01, 0xe5, 0x16, 0x84, 0x3c, 0x14,
0x63, 0x06, 0x01, 0x02,
];
let esds_specific_data = &mp4v[90..];
println!("esds_specific_data {:?}", esds_specific_data);
let mut stream = make_box(BoxSize::Auto, b"mp4v", |s| s.append_bytes(mp4v.as_slice()));
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let sample_entry =
super::read_video_sample_entry(&mut stream, &FourCC::from(*b"isom")).unwrap();
match sample_entry {
super::SampleEntry::Video(v) => {
assert_eq!(v.codec_type, super::CodecType::MP4V);
assert_eq!(v.width, 720);
assert_eq!(v.height, 480);
match v.codec_specific {
super::VideoCodecSpecific::ESDSConfig(esds_data) => {
assert_eq!(esds_data.as_slice(), esds_specific_data);
}
_ => panic!("it should be ESDSConfig!"),
}
}
_ => panic!("it should be a video sample entry!"),
}
}
#[test]
fn read_esds_one_byte_extension_descriptor() {
let esds = vec![
0x00, 0x03, 0x80, 0x1b, 0x00, 0x00, 0x00, 0x04, 0x80, 0x12, 0x40, 0x15, 0x00, 0x06, 0x00,
0x00, 0x01, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x80, 0x02, 0x11, 0x90, 0x06, 0x01,
0x02,
];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let es = super::read_esds(&mut stream).unwrap();
assert_eq!(es.audio_codec, super::CodecType::AAC);
assert_eq!(es.audio_object_type, Some(2));
assert_eq!(es.extended_audio_object_type, None);
assert_eq!(es.audio_sample_rate, Some(48000));
assert_eq!(es.audio_channel_count, Some(2));
}
#[test]
fn read_esds_byte_extension_descriptor() {
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.B16(0x0003)
.B16(0x8181) // extension byte length 0x81
.append_repeated(0, 0x81)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_esds(&mut stream) {
Ok(_) => (),
_ => panic!("fail to parse descriptor extension byte length"),
}
}
#[test]
fn read_f4v_stsd() {
let mut stream = make_box(BoxSize::Auto, b".mp3", |s| {
s.append_repeated(0, 6)
.B16(1)
.B16(0)
.append_repeated(0, 6)
.B16(2)
.B16(16)
.append_repeated(0, 4)
.B32(48000 << 16)
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let sample_entry =
super::read_audio_sample_entry(&mut stream).expect("failed to read f4v stsd atom");
match sample_entry {
super::SampleEntry::Audio(sample_entry) => {
assert_eq!(sample_entry.codec_type, super::CodecType::MP3)
}
_ => panic!("fail to read audio sample enctry"),
}
}
#[test]
fn max_table_limit() {
let elst = make_fullbox(BoxSize::Auto, b"elst", 1, |s| {
s.B32(super::TABLE_SIZE_LIMIT + 1)
})
.into_inner();
let mut stream = make_box(BoxSize::Auto, b"edts", |s| s.append_bytes(elst.as_slice()));
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let mut track = super::Track::new(0);
match super::read_edts(&mut stream, &mut track) {
Err(Error::OutOfMemory) => (),
Ok(_) => panic!("expected an error result"),
_ => panic!("expected a different error result"),
}
}
#[test]
fn unknown_video_sample_entry() {
let unknown_codec = make_box(BoxSize::Auto, b"yyyy", |s| s.append_repeated(0, 16)).into_inner();
let mut stream = make_box(BoxSize::Auto, b"xxxx", |s| {
s.append_repeated(0, 6)
.B16(1)
.append_repeated(0, 16)
.B16(0)
.B16(0)
.append_repeated(0, 14)
.append_repeated(0, 32)
.append_repeated(0, 4)
.append_bytes(unknown_codec.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_video_sample_entry(&mut stream, &FourCC::from(*b"isom")) {
Ok(super::SampleEntry::Unknown) => (),
_ => panic!("expected a different error result"),
}
}
#[test]
fn unknown_audio_sample_entry() {
let unknown_codec = make_box(BoxSize::Auto, b"yyyy", |s| s.append_repeated(0, 16)).into_inner();
let mut stream = make_box(BoxSize::Auto, b"xxxx", |s| {
s.append_repeated(0, 6)
.B16(1)
.B32(0)
.B32(0)
.B16(2)
.B16(16)
.B16(0)
.B16(0)
.B32(48000 << 16)
.append_bytes(unknown_codec.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_audio_sample_entry(&mut stream) {
Ok(super::SampleEntry::Unknown) => (),
_ => panic!("expected a different error result"),
}
}
#[test]
fn read_esds_invalid_descriptor() {
// tag 0x06, 0xff, 0x7f is incorrect.
let esds = vec![
0x03, 0x80, 0x80, 0x80, 0x22, 0x00, 0x00, 0x00, 0x04, 0x80, 0x80, 0x80, 0x14, 0x40, 0x01,
0x00, 0x04, 0x00, 0x00, 0x00, 0xfa, 0x00, 0x00, 0x00, 0xfa, 0x00, 0x05, 0x80, 0x80, 0x80,
0x02, 0xe8, 0x35, 0x06, 0xff, 0x7f, 0x00, 0x00,
];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_esds(&mut stream) {
Err(Error::InvalidData(s)) => assert_eq!(s, "Invalid descriptor."),
_ => panic!("unexpected result with invalid descriptor"),
}
}
#[test]
fn read_esds_redundant_descriptor() {
// the '2' at the end is redundant data.
let esds = vec![
3, 25, 0, 1, 0, 4, 19, 64, 21, 0, 0, 0, 0, 0, 0, 0, 0, 1, 119, 0, 5, 2, 18, 16, 6, 1, 2,
];
let mut stream = make_box(BoxSize::Auto, b"esds", |s| {
s.B32(0) // reserved
.append_bytes(esds.as_slice())
});
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
match super::read_esds(&mut stream) {
Ok(esds) => assert_eq!(esds.audio_codec, super::CodecType::AAC),
_ => panic!("unexpected result with invalid descriptor"),
}
}
#[test]
fn read_invalid_pssh() {
// invalid pssh header length
let pssh = vec![
0x00, 0x00, 0x00, 0x01, 0x70, 0x73, 0x73, 0x68, 0x01, 0x00, 0x00, 0x00, 0x10, 0x77, 0xef,
0xec, 0xc0, 0xb2, 0x4d, 0x02, 0xac, 0xe3, 0x3c, 0x1e, 0x52, 0xe2, 0xfb, 0x4b, 0x00, 0x00,
0x00, 0x02, 0x7e, 0x57, 0x1d, 0x01, 0x7e,
];
let mut stream = make_box(BoxSize::Auto, b"moov", |s| s.append_bytes(pssh.as_slice()));
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let mut context = super::MediaContext::new();
match super::read_moov(&mut stream, &mut context) {
Err(Error::InvalidData(s)) => assert_eq!(s, "read_buf size exceeds BUF_SIZE_LIMIT"),
_ => panic!("unexpected result with invalid descriptor"),
}
}
#[test]
fn read_stsd_lpcm() {
// Extract from sample converted by ffmpeg.
// "ffmpeg -i ./gizmo-short.mp4 -acodec pcm_s16le -ar 96000 -vcodec copy -f mov gizmo-short.mov"
let lpcm = vec![
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0x00, 0x10, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x48, 0x40, 0xf7, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x18, 0x63, 0x68, 0x61, 0x6e, 0x00, 0x00, 0x00,
0x00, 0x00, 0x64, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
let mut stream = make_box(BoxSize::Auto, b"lpcm", |s| s.append_bytes(lpcm.as_slice()));
let mut iter = super::BoxIter::new(&mut stream);
let mut stream = iter.next_box().unwrap().unwrap();
let sample_entry = super::read_audio_sample_entry(&mut stream).unwrap();
match sample_entry {
#[allow(clippy::float_cmp)] // The float comparison below is valid and intended.
super::SampleEntry::Audio(a) => {
assert_eq!(a.codec_type, super::CodecType::LPCM);
assert_eq!(a.samplerate, 96000.0);
assert_eq!(a.channelcount, 1);
match a.codec_specific {
super::AudioCodecSpecific::LPCM => (),
_ => panic!("it should be LPCM!"),
}
}
_ => panic!("it should be a audio sample entry!"),
}
}
#[test]
fn read_to_end_() {
let mut src = b"1234567890".take(5);
let buf = src.read_into_try_vec().unwrap();
assert_eq!(buf.len(), 5);
assert_eq!(buf.into_inner(), b"12345");
}
#[test]
#[cfg(feature = "mp4parse_fallible")]
fn read_to_end_oom() {
let mut src = b"1234567890".take(std::usize::MAX.try_into().expect("usize < u64"));
assert!(src.read_into_try_vec().is_err());
}<|fim▁end|> | match super::read_box_header(&mut stream) {
Err(Error::InvalidData(s)) => assert_eq!(s, "malformed wide size"),
_ => panic!("unexpected result reading box with invalid size"), |
<|file_name|>TestAccessControl.cpp<|end_file_name|><|fim▁begin|>/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "access/AccessControl.h"
#include "access/examples/ExampleAccessControlDelegate.h"
#include <lib/core/CHIPCore.h>
#include <lib/support/UnitTestRegistration.h>
#include <nlunit-test.h>
namespace {
using namespace chip;
using namespace chip::Access;
using Entry = AccessControl::Entry;
using EntryIterator = AccessControl::EntryIterator;
using Target = Entry::Target;
AccessControl accessControl(Examples::GetAccessControlDelegate(nullptr));
constexpr ClusterId kOnOffCluster = 0x0000'0006;
constexpr ClusterId kLevelControlCluster = 0x0000'0008;
constexpr ClusterId kAccessControlCluster = 0x0000'001F;
constexpr ClusterId kColorControlCluster = 0x0000'0300;
constexpr NodeId kPaseVerifier0 = NodeIdFromPAKEKeyId(0x0000);
constexpr NodeId kPaseVerifier1 = NodeIdFromPAKEKeyId(0x0001);
constexpr NodeId kPaseVerifier3 = NodeIdFromPAKEKeyId(0x0003);
constexpr NodeId kOperationalNodeId0 = 0x0123456789ABCDEF;
constexpr NodeId kOperationalNodeId1 = 0x1234567812345678;
constexpr NodeId kOperationalNodeId2 = 0x1122334455667788;
constexpr NodeId kOperationalNodeId3 = 0x1111111111111111;
constexpr NodeId kOperationalNodeId4 = 0x2222222222222222;
constexpr NodeId kOperationalNodeId5 = 0x3333333333333333;
constexpr CASEAuthTag kCASEAuthTag0 = 0x0001'0001;
constexpr CASEAuthTag kCASEAuthTag1 = 0x0002'0001;
constexpr CASEAuthTag kCASEAuthTag2 = 0xABCD'0002;
constexpr CASEAuthTag kCASEAuthTag3 = 0xABCD'0008;
constexpr CASEAuthTag kCASEAuthTag4 = 0xABCD'ABCD;
constexpr NodeId kCASEAuthTagAsNodeId0 = NodeIdFromCASEAuthTag(kCASEAuthTag0);
constexpr NodeId kCASEAuthTagAsNodeId1 = NodeIdFromCASEAuthTag(kCASEAuthTag1);
constexpr NodeId kCASEAuthTagAsNodeId2 = NodeIdFromCASEAuthTag(kCASEAuthTag2);
constexpr NodeId kCASEAuthTagAsNodeId3 = NodeIdFromCASEAuthTag(kCASEAuthTag3);
constexpr NodeId kCASEAuthTagAsNodeId4 = NodeIdFromCASEAuthTag(kCASEAuthTag4);
constexpr NodeId kGroup2 = NodeIdFromGroupId(0x0002);
constexpr NodeId kGroup4 = NodeIdFromGroupId(0x0004);
constexpr NodeId kGroup6 = NodeIdFromGroupId(0x0006);
constexpr NodeId kGroup8 = NodeIdFromGroupId(0x0008);
constexpr AuthMode authModes[] = { AuthMode::kCase, AuthMode::kGroup };
constexpr FabricIndex fabricIndexes[] = { 1, 2, 3 };
constexpr Privilege privileges[] = { Privilege::kView, Privilege::kProxyView, Privilege::kOperate, Privilege::kManage,
Privilege::kAdminister };
constexpr NodeId subjects[][3] = { {
kOperationalNodeId0,
kCASEAuthTagAsNodeId1,
kCASEAuthTagAsNodeId2,
},
{
kGroup4,
kGroup6,
kGroup8,
} };
constexpr Target targets[] = {
{ .flags = Target::kCluster, .cluster = kOnOffCluster },
{ .flags = Target::kEndpoint, .endpoint = 3 },
{ .flags = Target::kCluster | Target::kEndpoint, .cluster = kLevelControlCluster, .endpoint = 5 },
};
constexpr FabricIndex invalidFabricIndexes[] = { kUndefinedFabricIndex };
// clang-format off
constexpr NodeId validCaseSubjects[] = {
0x0000'0000'0000'0001, // min operational
0x0000'0000'0000'0002,
0x0123'4567'89AB'CDEF,
0xFFFF'FFEF'FFFF'FFFE,
0xFFFF'FFEF'FFFF'FFFF, // max operational
NodeIdFromCASEAuthTag(0x0000'0001),
NodeIdFromCASEAuthTag(0x0000'0002),
NodeIdFromCASEAuthTag(0x0000'FFFE),
NodeIdFromCASEAuthTag(0x0000'FFFF),
NodeIdFromCASEAuthTag(0x0001'0001),
NodeIdFromCASEAuthTag(0x0001'0002),
NodeIdFromCASEAuthTag(0x0001'FFFE),
NodeIdFromCASEAuthTag(0x0001'FFFF),
NodeIdFromCASEAuthTag(0xFFFE'0001),
NodeIdFromCASEAuthTag(0xFFFE'0002),
NodeIdFromCASEAuthTag(0xFFFE'FFFE),
NodeIdFromCASEAuthTag(0xFFFE'FFFF),
NodeIdFromCASEAuthTag(0xFFFF'0001),
NodeIdFromCASEAuthTag(0xFFFF'0002),
NodeIdFromCASEAuthTag(0xFFFF'FFFE),
NodeIdFromCASEAuthTag(0xFFFF'FFFF),
};
// clang-format on
// clang-format off
constexpr NodeId validGroupSubjects[] = {
NodeIdFromGroupId(0x0001), // start of fabric-scoped
NodeIdFromGroupId(0x0002),
NodeIdFromGroupId(0x7FFE),
NodeIdFromGroupId(0x7FFF), // end of fabric-scoped
NodeIdFromGroupId(0x8000), // start of universal
NodeIdFromGroupId(0x8001),
NodeIdFromGroupId(0xFFFB),
NodeIdFromGroupId(0xFFFC), // end of universal
NodeIdFromGroupId(0xFFFD), // all proxies
NodeIdFromGroupId(0xFFFE), // all non sleepy
NodeIdFromGroupId(0xFFFF), // all nodes
};
// clang-format on
// clang-format off
constexpr NodeId validPaseSubjects[] = {
NodeIdFromPAKEKeyId(0x0000), // start
NodeIdFromPAKEKeyId(0x0001),
NodeIdFromPAKEKeyId(0xFFFE),
NodeIdFromPAKEKeyId(0xFFFF), // end
};
// clang-format on
// clang-format off
constexpr NodeId invalidSubjects[] = {
0x0000'0000'0000'0000, // unspecified
0xFFFF'FFF0'0000'0000, // start reserved
0xFFFF'FFF0'0000'0001,
0xFFFF'FFF0'FFFF'FFFE,
0xFFFF'FFF0'FFFF'FFFF, // end reserved
0xFFFF'FFF1'0000'0000, // start reserved
0xFFFF'FFF1'0000'0001,
0xFFFF'FFF1'FFFF'FFFE,
0xFFFF'FFF1'FFFF'FFFF, // end reserved
0xFFFF'FFF2'0000'0000, // start reserved
0xFFFF'FFF2'0000'0001,
0xFFFF'FFF2'FFFF'FFFE,
0xFFFF'FFF2'FFFF'FFFF, // end reserved
0xFFFF'FFF3'0000'0000, // start reserved
0xFFFF'FFF3'0000'0001,
0xFFFF'FFF3'FFFF'FFFE,
0xFFFF'FFF3'FFFF'FFFF, // end reserved
0xFFFF'FFF4'0000'0000, // start reserved
0xFFFF'FFF4'0000'0001,
0xFFFF'FFF4'FFFF'FFFE,
0xFFFF'FFF4'FFFF'FFFF, // end reserved
0xFFFF'FFF5'0000'0000, // start reserved
0xFFFF'FFF5'0000'0001,
0xFFFF'FFF5'FFFF'FFFE,
0xFFFF'FFF5'FFFF'FFFF, // end reserved
0xFFFF'FFF6'0000'0000, // start reserved
0xFFFF'FFF6'0000'0001,
0xFFFF'FFF6'FFFF'FFFE,
0xFFFF'FFF6'FFFF'FFFF, // end reserved
0xFFFF'FFF7'0000'0000, // start reserved
0xFFFF'FFF7'0000'0001,
0xFFFF'FFF7'FFFF'FFFE,
0xFFFF'FFF7'FFFF'FFFF, // end reserved
0xFFFF'FFF8'0000'0000, // start reserved
0xFFFF'FFF8'0000'0001,
0xFFFF'FFF8'FFFF'FFFE,
0xFFFF'FFF8'FFFF'FFFF, // end reserved
0xFFFF'FFF9'0000'0000, // start reserved
0xFFFF'FFF9'0000'0001,
0xFFFF'FFF9'FFFF'FFFE,
0xFFFF'FFF9'FFFF'FFFF, // end reserved
0xFFFF'FFFA'0000'0000, // start reserved
0xFFFF'FFFA'0000'0001,
0xFFFF'FFFA'FFFF'FFFE,
0xFFFF'FFFA'FFFF'FFFF, // end reserved
0xFFFF'FFFB'0001'0000, // PASE with unused bits used
0xFFFF'FFFB'0001'0001, // PASE with unused bits used
0xFFFF'FFFB'0001'FFFE, // PASE with unused bits used
0xFFFF'FFFB'0001'FFFF, // PASE with unused bits used
0xFFFF'FFFB'FFFE'0000, // PASE with unused bits used
0xFFFF'FFFB'FFFE'0001, // PASE with unused bits used
0xFFFF'FFFB'FFFE'FFFE, // PASE with unused bits used
0xFFFF'FFFB'FFFE'FFFF, // PASE with unused bits used
0xFFFF'FFFB'FFFF'0000, // PASE with unused bits used
0xFFFF'FFFB'FFFF'0001, // PASE with unused bits used
0xFFFF'FFFB'FFFF'FFFE, // PASE with unused bits used
0xFFFF'FFFB'FFFF'FFFF, // PASE with unused bits used
0xFFFF'FFFC'0000'0000, // start reserved
0xFFFF'FFFC'0000'0001,
0xFFFF'FFFC'FFFF'FFFE,
0xFFFF'FFFC'FFFF'FFFF, // end reserved
0xFFFF'FFFD'0000'0000, // CAT with version 0
0xFFFF'FFFD'0001'0000, // CAT with version 0
0xFFFF'FFFD'FFFE'0000, // CAT with version 0
0xFFFF'FFFD'FFFF'0000, // CAT with version 0
0xFFFF'FFFE'0000'0000, // start temporary local
0xFFFF'FFFE'0000'0001,
0xFFFF'FFFE'FFFF'FFFE,
0xFFFF'FFFE'FFFF'FFFF, // end temporary local (used for placeholder)
0xFFFF'FFFF'0000'0000, // start reserved
0xFFFF'FFFF'0000'0001,
0xFFFF'FFFF'FFFE'FFFE,
0xFFFF'FFFF'FFFE'FFFF, // end reserved
0xFFFF'FFFF'FFFF'0000, // group 0
};
// clang-format on
// clang-format off
constexpr ClusterId validClusters[] = {
0x0000'0000, // start std
0x0000'0001,
0x0000'7FFE,
0x0000'7FFF, // end std
0x0001'FC00, // start MS
0x0001'FC01,
0x0001'FFFD,
0x0001'FFFE, // end MS
0xFFFD'FC00, // start MS
0xFFFD'FC01,
0xFFFD'FFFD,
0xFFFD'FFFE, // end MS
0xFFFE'FC00, // start MS
0xFFFE'FC01,
0xFFFE'FFFD,
0xFFFE'FFFE, // end MS
};
// clang-format on
// clang-format off
constexpr ClusterId invalidClusters[] = {
0x0000'8000, // start unused
0x0000'8001,
0x0000'FBFE,
0x0000'FBFF, // end unused
0x0000'FC00, // start MS
0x0000'FC01,
0x0000'FFFD,
0x0000'FFFE, // end MS
0x0000'FFFF, // wildcard
0x0001'0000, // start std
0x0001'0001,
0x0001'7FFE,
0x0001'7FFF, // end std
0x0001'8000, // start unused
0x0001'8001,
0x0001'FBFE,
0x0001'FBFF, // end unused
0x0001'FFFF, // wildcard
0xFFFE'0000, // start std
0xFFFE'0001,
0xFFFE'7FFE,
0xFFFE'7FFF, // end std
0xFFFE'8000, // start unused
0xFFFE'8001,
0xFFFE'FBFE,
0xFFFE'FBFF, // end unused
0xFFFE'FFFF, // wildcard
0xFFFF'0000, // start std
0xFFFF'0001,
0xFFFF'7FFE,
0xFFFF'7FFF, // end std
0xFFFF'8000, // start unused
0xFFFF'8001,
0xFFFF'FBFE,
0xFFFF'FBFF, // end unused
0xFFFF'FC00, // start MS
0xFFFF'FC01,
0xFFFF'FFFD,
0xFFFF'FFFE, // end MS
0xFFFF'FFFF, // wildcard
};
// clang-format on
// clang-format off
constexpr EndpointId validEndpoints[] = {
0x0000, // start
0x0001,
0xFFFD,
0xFFFE, // end
};
// clang-format on
// clang-format off
constexpr EndpointId invalidEndpoints[] = {
kInvalidEndpointId
};
// clang-format on
// clang-format off
constexpr DeviceTypeId validDeviceTypes[] = {
0x0000'0000, // start
0x0000'0001,
0x0000'BFFE,
0x0000'BFFF, // end
0x0001'0000, // start
0x0001'0001,
0x0001'BFFE,
0x0001'BFFF, // end
0xFFFD'0000, // start
0xFFFD'0001,
0xFFFD'BFFE,
0xFFFD'BFFF, // end
0xFFFE'0000, // start
0xFFFE'0001,
0xFFFE'BFFE,
0xFFFE'BFFF, // end
};
// clang-format on
// clang-format off
constexpr DeviceTypeId invalidDeviceTypes[] = {
0x0000'C000, // start unused
0x0000'C001,
0x0000'FFFD,
0x0000'FFFE, // end unused
0x0000'FFFF, // wildcard
0x0001'C000, // start unused
0x0001'C001,
0x0001'FFFD,
0x0001'FFFE, // end unused
0x0001'FFFF, // wildcard
0xFFFE'C000, // start unused
0xFFFE'C001,
0xFFFE'FFFD,
0xFFFE'FFFE, // end unused
0xFFFE'FFFF, // wildcard
0xFFFF'0000, // start used
0xFFFF'0001,
0xFFFF'BFFE,
0xFFFF'BFFF, // end used
0xFFFF'C000, // start unused
0xFFFF'C001,
0xFFFF'FFFD,
0xFFFF'FFFE, // end unused
0xFFFF'FFFF, // wildcard
};
// clang-format on
// For testing, supports one subject and target, allows any value (valid or invalid)
class TestEntryDelegate : public Entry::Delegate
{
public:
void Release() override {}
CHIP_ERROR GetAuthMode(AuthMode & authMode) const override
{
authMode = mAuthMode;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetFabricIndex(FabricIndex & fabricIndex) const override
{
fabricIndex = mFabricIndex;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetPrivilege(Privilege & privilege) const override
{
privilege = mPrivilege;
return CHIP_NO_ERROR;
}
CHIP_ERROR SetAuthMode(AuthMode authMode) override
{
mAuthMode = authMode;
return CHIP_NO_ERROR;
}
CHIP_ERROR SetFabricIndex(FabricIndex fabricIndex) override
{
mFabricIndex = fabricIndex;
return CHIP_NO_ERROR;
}
CHIP_ERROR SetPrivilege(Privilege privilege) override
{
mPrivilege = privilege;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetSubjectCount(size_t & count) const override
{
count = mSubjectCount;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetSubject(size_t index, NodeId & subject) const override
{
VerifyOrDie(index < mSubjectCount);
subject = mSubject;
return CHIP_NO_ERROR;
}
CHIP_ERROR SetSubject(size_t index, NodeId subject) override
{
VerifyOrDie(index < mSubjectCount);
mSubject = subject;
return CHIP_NO_ERROR;
}
CHIP_ERROR AddSubject(size_t * index, NodeId subject) override
{
VerifyOrDie(mSubjectCount == 0);
mSubjectCount = 1;
mSubject = subject;
return CHIP_NO_ERROR;
}
CHIP_ERROR RemoveSubject(size_t index) override
{
VerifyOrDie(mSubjectCount == 1);
mSubjectCount = 0;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetTargetCount(size_t & count) const override
{
count = mTargetCount;
return CHIP_NO_ERROR;
}
CHIP_ERROR GetTarget(size_t index, Target & target) const override
{
VerifyOrDie(index < mTargetCount);
target = mTarget;
return CHIP_NO_ERROR;
}
CHIP_ERROR SetTarget(size_t index, const Target & target) override
{
VerifyOrDie(index < mTargetCount);
mTarget = target;
return CHIP_NO_ERROR;
}
CHIP_ERROR AddTarget(size_t * index, const Target & target) override
{
VerifyOrDie(mTargetCount == 0);
mTargetCount = 1;
mTarget = target;
return CHIP_NO_ERROR;
}
CHIP_ERROR RemoveTarget(size_t index) override
{
VerifyOrDie(mTargetCount == 1);
mTargetCount = 0;
return CHIP_NO_ERROR;
}
FabricIndex mFabricIndex = 1;
Privilege mPrivilege = Privilege::kView;
AuthMode mAuthMode = AuthMode::kCase;
NodeId mSubject = kOperationalNodeId0;
Target mTarget = { .flags = Target::kCluster, .cluster = kOnOffCluster };
size_t mSubjectCount = 1;
size_t mTargetCount = 1;
};
bool operator==(const Target & a, const Target & b)
{
if (a.flags != b.flags)
return false;
if ((a.flags & Target::kCluster) && a.cluster != b.cluster)
return false;
if ((a.flags & Target::kEndpoint) && a.endpoint != b.endpoint)
return false;
if ((a.flags & Target::kDeviceType) && a.deviceType != b.deviceType)
return false;
return true;
}
bool operator!=(const Target & a, const Target & b)
{
return !(a == b);
}
struct EntryData
{
static constexpr int kMaxSubjects = 3;
static constexpr int kMaxTargets = 3;
FabricIndex fabricIndex = kUndefinedFabricIndex;
Privilege privilege = Privilege::kView;
AuthMode authMode = AuthMode::kNone;
NodeId subjects[kMaxSubjects] = { 0 };
Target targets[kMaxTargets] = { { 0 } };
void Clear() { *this = EntryData(); }
bool IsEmpty() const { return authMode == AuthMode::kNone; }
size_t GetSubjectCount() const
{
size_t count = 0;
for (auto & subject : subjects)
{
if (subject == kUndefinedNodeId)
{
break;
}
count++;
}
return count;
}
void AddSubject(size_t * index, NodeId subject)
{
size_t count = GetSubjectCount();
if (count < kMaxSubjects)
{
subjects[count] = subject;
if (index)
{
*index = count;
}
}
}
void RemoveSubject(size_t index)
{
size_t count = GetSubjectCount();
if (index < count)
{
while (++index < kMaxSubjects)
{
subjects[index - 1] = subjects[index];
}
subjects[kMaxSubjects - 1] = { 0 };
}
}
size_t GetTargetCount() const
{
size_t count = 0;
for (auto & target : targets)
{
if (target.flags == 0)
{
break;
}
count++;
}
return count;
}
void AddTarget(size_t * index, const Target & target)
{
size_t count = GetTargetCount();
if (count < kMaxTargets)
{
targets[count] = target;
if (index)
{
*index = count;
}
}
}
void RemoveTarget(size_t index)
{
size_t count = GetTargetCount();
if (index < count)
{
while (++index < kMaxTargets)
{
targets[index - 1] = targets[index];
}
targets[kMaxTargets - 1] = { 0 };
}
}
};
CHIP_ERROR CompareEntry(const Entry & entry, const EntryData & entryData)
{
AuthMode authMode = AuthMode::kNone;
ReturnErrorOnFailure(entry.GetAuthMode(authMode));
ReturnErrorCodeIf(authMode != entryData.authMode, CHIP_ERROR_INCORRECT_STATE);
FabricIndex fabricIndex = kUndefinedFabricIndex;
ReturnErrorOnFailure(entry.GetFabricIndex(fabricIndex));
ReturnErrorCodeIf(fabricIndex != entryData.fabricIndex, CHIP_ERROR_INCORRECT_STATE);
Privilege privilege = Privilege::kView;
ReturnErrorOnFailure(entry.GetPrivilege(privilege));
ReturnErrorCodeIf(privilege != entryData.privilege, CHIP_ERROR_INCORRECT_STATE);
size_t subjectCount = 0;
ReturnErrorOnFailure(entry.GetSubjectCount(subjectCount));
ReturnErrorCodeIf(subjectCount != entryData.GetSubjectCount(), CHIP_ERROR_INCORRECT_STATE);
for (size_t i = 0; i < subjectCount; ++i)
{
NodeId subject = kUndefinedNodeId;
ReturnErrorOnFailure(entry.GetSubject(i, subject));
ReturnErrorCodeIf(subject != entryData.subjects[i], CHIP_ERROR_INCORRECT_STATE);
}
size_t targetCount = 0;
ReturnErrorOnFailure(entry.GetTargetCount(targetCount));
ReturnErrorCodeIf(targetCount != entryData.GetTargetCount(), CHIP_ERROR_INCORRECT_STATE);
for (size_t i = 0; i < targetCount; ++i)
{
Target target;
ReturnErrorOnFailure(entry.GetTarget(i, target));
ReturnErrorCodeIf(target != entryData.targets[i], CHIP_ERROR_INCORRECT_STATE);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR LoadEntry(Entry & entry, const EntryData & entryData)
{
ReturnErrorOnFailure(entry.SetAuthMode(entryData.authMode));
ReturnErrorOnFailure(entry.SetFabricIndex(entryData.fabricIndex));
ReturnErrorOnFailure(entry.SetPrivilege(entryData.privilege));
for (size_t i = 0; i < entryData.GetSubjectCount(); ++i)
{
ReturnErrorOnFailure(entry.AddSubject(nullptr, entryData.subjects[i]));
}
for (size_t i = 0; i < entryData.GetTargetCount(); ++i)
{
ReturnErrorOnFailure(entry.AddTarget(nullptr, entryData.targets[i]));
}
return CHIP_NO_ERROR;
}
CHIP_ERROR ClearAccessControl(AccessControl & ac)
{
CHIP_ERROR err;
do
{
err = accessControl.DeleteEntry(0);
} while (err == CHIP_NO_ERROR);
return CHIP_NO_ERROR;
}
CHIP_ERROR CompareAccessControl(AccessControl & ac, const EntryData * entryData, size_t count)
{
Entry entry;
for (size_t i = 0; i < count; ++i, ++entryData)
{
ReturnErrorOnFailure(ac.ReadEntry(i, entry));
ReturnErrorOnFailure(CompareEntry(entry, *entryData));
}
ReturnErrorCodeIf(ac.ReadEntry(count, entry) == CHIP_NO_ERROR, CHIP_ERROR_INCORRECT_STATE);
return CHIP_NO_ERROR;
}
CHIP_ERROR LoadAccessControl(AccessControl & ac, const EntryData * entryData, size_t count)
{
Entry entry;
for (size_t i = 0; i < count; ++i, ++entryData)
{
ReturnErrorOnFailure(ac.PrepareEntry(entry));
ReturnErrorOnFailure(LoadEntry(entry, *entryData));
ReturnErrorOnFailure(ac.CreateEntry(nullptr, entry));
}
return CHIP_NO_ERROR;
}
constexpr EntryData entryData1[] = {
{
.fabricIndex = 1,
.privilege = Privilege::kAdminister,
.authMode = AuthMode::kCase,
.subjects = { kOperationalNodeId3 },
},
{
.fabricIndex = 1,
.privilege = Privilege::kView,
.authMode = AuthMode::kCase,
},
{
.fabricIndex = 2,
.privilege = Privilege::kAdminister,
.authMode = AuthMode::kCase,
.subjects = { kOperationalNodeId4 },
},
{
.fabricIndex = 1,
.privilege = Privilege::kOperate,
.authMode = AuthMode::kCase,
.targets = { { .flags = Target::kCluster, .cluster = kOnOffCluster } },
},
{
.fabricIndex = 2,
.privilege = Privilege::kManage,
.authMode = AuthMode::kCase,
.subjects = { kOperationalNodeId5 },
.targets = { { .flags = Target::kCluster | Target::kEndpoint, .cluster = kOnOffCluster, .endpoint = 2 } },
},
{
.fabricIndex = 2,
.privilege = Privilege::kProxyView,
.authMode = AuthMode::kGroup,
.subjects = { kGroup2 },
.targets = { { .flags = Target::kCluster | Target::kEndpoint, .cluster = kLevelControlCluster, .endpoint = 1 },
{ .flags = Target::kCluster, .cluster = kOnOffCluster },
{ .flags = Target::kEndpoint, .endpoint = 2 } },
},
{
.fabricIndex = 1,
.privilege = Privilege::kAdminister,
.authMode = AuthMode::kCase,
.subjects = { kCASEAuthTagAsNodeId0 },
},
{
.fabricIndex = 2,
.privilege = Privilege::kManage,
.authMode = AuthMode::kCase,
.subjects = { kCASEAuthTagAsNodeId3, kCASEAuthTagAsNodeId1 },
.targets = { { .flags = Target::kCluster, .cluster = kOnOffCluster } },
},
{
.fabricIndex = 2,
.privilege = Privilege::kOperate,
.authMode = AuthMode::kCase,
.subjects = { kCASEAuthTagAsNodeId4, kCASEAuthTagAsNodeId1 },
.targets = { { .flags = Target::kCluster, .cluster = kLevelControlCluster } },
},
};
constexpr size_t entryData1Count = ArraySize(entryData1);
struct CheckData
{
SubjectDescriptor subjectDescriptor;
RequestPath requestPath;
Privilege privilege;
bool allow;
};
constexpr CheckData checkData1[] = {
// Checks for implicit PASE
{ .subjectDescriptor = { .fabricIndex = 0, .authMode = AuthMode::kPase, .subject = kPaseVerifier0 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kPase, .subject = kPaseVerifier0 },
.requestPath = { .cluster = 3, .endpoint = 4 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kPase, .subject = kPaseVerifier0 },
.requestPath = { .cluster = 5, .endpoint = 6 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kPase, .subject = kPaseVerifier1 },
.requestPath = { .cluster = 5, .endpoint = 6 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 3, .authMode = AuthMode::kPase, .subject = kPaseVerifier3 },
.requestPath = { .cluster = 7, .endpoint = 8 },
.privilege = Privilege::kAdminister,
.allow = true },
// Checks for entry 0
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = kAccessControlCluster, .endpoint = 0 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 3, .endpoint = 4 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 5, .endpoint = 6 },
.privilege = Privilege::kView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 7, .endpoint = 8 },
.privilege = Privilege::kProxyView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kGroup, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
// Checks for entry 1
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = 11, .endpoint = 13 },
.privilege = Privilege::kView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = 11, .endpoint = 13 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = 11, .endpoint = 13 },
.privilege = Privilege::kView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kGroup, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = 11, .endpoint = 13 },
.privilege = Privilege::kView,
.allow = false },
// Checks for entry 2
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = kAccessControlCluster, .endpoint = 0 },
.privilege = Privilege::kAdminister,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 3, .endpoint = 4 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 5, .endpoint = 6 },
.privilege = Privilege::kView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 7, .endpoint = 8 },
.privilege = Privilege::kProxyView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kOperationalNodeId4 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = 1, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
// Checks for entry 3
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 11 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId2 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 13 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 11 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = 123, .endpoint = 11 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId1 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 11 },
.privilege = Privilege::kManage,
.allow = false },
// Checks for entry 4
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kCase, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId3 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 2 },
.privilege = Privilege::kManage,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 1 },
.privilege = Privilege::kManage,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kOperationalNodeId5 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 2 },
.privilege = Privilege::kAdminister,
.allow = false },
// Checks for entry 5
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 3 },
.privilege = Privilege::kProxyView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kColorControlCluster, .endpoint = 2 },
.privilege = Privilege::kProxyView,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kCase, .subject = kGroup2 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup4 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kColorControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kColorControlCluster, .endpoint = 1 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 3 },
.privilege = Privilege::kProxyView,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2, .authMode = AuthMode::kGroup, .subject = kGroup2 },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
// Checks for entry 6
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kUndefinedCAT, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 1,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kUndefinedCAT, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 1,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag1, kUndefinedCAT, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
// Checks for entry 7
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kUndefinedCAT, kUndefinedCAT } },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kCASEAuthTag2, kUndefinedCAT } },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kCASEAuthTag3, kUndefinedCAT } },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kCASEAuthTag4, kUndefinedCAT } },
.requestPath = { .cluster = kOnOffCluster, .endpoint = 1 },
.privilege = Privilege::kManage,
.allow = true },
// Checks for entry 8
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kCASEAuthTag3, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 1 },
.privilege = Privilege::kOperate,
.allow = false },
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag0, kCASEAuthTag4, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 2 },
.privilege = Privilege::kOperate,
.allow = true },
{ .subjectDescriptor = { .fabricIndex = 2,
.authMode = AuthMode::kCase,
.cats = { kCASEAuthTag1, kUndefinedCAT, kUndefinedCAT } },
.requestPath = { .cluster = kLevelControlCluster, .endpoint = 2 },
.privilege = Privilege::kOperate,
.allow = true },
};
void MetaTest(nlTestSuite * inSuite, void * inContext)
{
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, entryData1, entryData1Count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, CompareAccessControl(accessControl, entryData1, entryData1Count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.DeleteEntry(3) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, CompareAccessControl(accessControl, entryData1, entryData1Count) != CHIP_NO_ERROR);
}
void TestAclValidateAuthModeSubject(nlTestSuite * inSuite, void * inContext)
{
TestEntryDelegate delegate; // outlive entry
Entry entry;
// Use prepared entry for valid cases
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kView) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId0) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { .flags = Target::kCluster, .cluster = kOnOffCluster }) == CHIP_NO_ERROR);
// Each case tries to update the first entry, then add a second entry, then unconditionally delete it
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
// CASE and group may have empty subjects list
{
NL_TEST_ASSERT(inSuite, entry.RemoveSubject(0) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kGroup) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId0) == CHIP_NO_ERROR);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
for (auto subject : validCaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kGroup) == CHIP_NO_ERROR);
for (auto subject : validGroupSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// Use test entry for invalid cases (to ensure it can hold invalid data)
entry.SetDelegate(delegate);
// Operational PASE not supported
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kPase) == CHIP_NO_ERROR);
for (auto subject : validPaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
for (auto subject : validGroupSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : validPaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : invalidSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kGroup) == CHIP_NO_ERROR);
for (auto subject : validCaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : validPaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : invalidSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kPase) == CHIP_NO_ERROR);
for (auto subject : validCaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : validGroupSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : invalidSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kNone) == CHIP_NO_ERROR);
for (auto subject : validCaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : validGroupSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : validPaseSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto subject : invalidSubjects)
{
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// Next cases have no subject
NL_TEST_ASSERT(inSuite, entry.RemoveSubject(0) == CHIP_NO_ERROR);
// PASE must have subject
{
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kPase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// None is not a real auth mode but also shouldn't work with no subject
{
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kNone) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
void TestAclValidateFabricIndex(nlTestSuite * inSuite, void * inContext)
{
TestEntryDelegate delegate; // outlive entry
Entry entry;
// Use prepared entry for valid cases
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kView) == CHIP_NO_ERROR);<|fim▁hole|> NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { .flags = Target::kCluster, .cluster = kOnOffCluster }) == CHIP_NO_ERROR);
// Each case tries to update the first entry, then add a second entry, then unconditionally delete it
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
for (auto fabricIndex : fabricIndexes)
{
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// Use test entry for invalid cases (to ensure it can hold invalid data)
entry.SetDelegate(delegate);
for (auto fabricIndex : invalidFabricIndexes)
{
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
void TestAclValidatePrivilege(nlTestSuite * inSuite, void * inContext)
{
TestEntryDelegate delegate; // outlive entry
Entry entry;
// Use prepared entry for valid cases
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kView) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId0) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { .flags = Target::kCluster, .cluster = kOnOffCluster }) == CHIP_NO_ERROR);
// Each case tries to update the first entry, then add a second entry, then unconditionally delete it
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
for (auto privilege : privileges)
{
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(privilege) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// Use test entry for invalid cases (to ensure it can hold invalid data)
entry.SetDelegate(delegate);
// Cannot grant administer privilege to group auth mode
{
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kAdminister) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kGroup) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetSubject(0, kGroup4) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
void TestAclValidateTarget(nlTestSuite * inSuite, void * inContext)
{
TestEntryDelegate delegate; // outlive entry
Entry entry;
// Use prepared entry for valid cases
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kView) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId0) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { .flags = Target::kCluster, .cluster = kOnOffCluster }) == CHIP_NO_ERROR);
// Each case tries to update the first entry, then add a second entry, then unconditionally delete it
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
for (auto cluster : validClusters)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kCluster, .cluster = cluster }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto endpoint : validEndpoints)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kEndpoint, .endpoint = endpoint }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
// TODO(#14431): device type target not yet supported (flip != to == when supported)
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kDeviceType, .deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto cluster : validClusters)
{
for (auto endpoint : validEndpoints)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(0, { .flags = Target::kCluster | Target::kEndpoint, .cluster = cluster, .endpoint = endpoint }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) == CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
// TODO(#14431): device type target not yet supported (flip != to == when supported)
for (auto cluster : validClusters)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kCluster | Target::kDeviceType, .cluster = cluster, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
// Use test entry for invalid cases (to ensure it can hold invalid data)
entry.SetDelegate(delegate);
// Cannot target endpoint and device type
for (auto endpoint : validEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kEndpoint | Target::kDeviceType, .endpoint = endpoint, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
// Cannot target all
for (auto cluster : validClusters)
{
for (auto endpoint : validEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
// Cannot target none
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = 0 }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto cluster : invalidClusters)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kCluster, .cluster = cluster }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto endpoint : invalidEndpoints)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kEndpoint, .endpoint = endpoint }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(inSuite, entry.SetTarget(0, { .flags = Target::kDeviceType, .deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : invalidEndpoints)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(0, { .flags = Target::kCluster | Target::kEndpoint, .cluster = cluster, .endpoint = endpoint }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : validEndpoints)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(0, { .flags = Target::kCluster | Target::kEndpoint, .cluster = cluster, .endpoint = endpoint }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : validClusters)
{
for (auto endpoint : invalidEndpoints)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(0, { .flags = Target::kCluster | Target::kEndpoint, .cluster = cluster, .endpoint = endpoint }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : invalidClusters)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kCluster | Target::kDeviceType, .cluster = cluster, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : invalidClusters)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kCluster | Target::kDeviceType, .cluster = cluster, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : validClusters)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kCluster | Target::kDeviceType, .cluster = cluster, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kEndpoint | Target::kDeviceType, .endpoint = endpoint, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kEndpoint | Target::kDeviceType, .endpoint = endpoint, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto endpoint : validEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(
inSuite,
entry.SetTarget(
0, { .flags = Target::kEndpoint | Target::kDeviceType, .endpoint = endpoint, .deviceType = deviceType }) ==
CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : validEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : validClusters)
{
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : invalidClusters)
{
for (auto endpoint : validEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : validClusters)
{
for (auto endpoint : invalidEndpoints)
{
for (auto deviceType : validDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
for (auto cluster : validClusters)
{
for (auto endpoint : validEndpoints)
{
for (auto deviceType : invalidDeviceTypes)
{
NL_TEST_ASSERT(inSuite,
entry.SetTarget(0,
{ .flags = Target::kCluster | Target::kEndpoint | Target::kDeviceType,
.cluster = cluster,
.endpoint = endpoint,
.deviceType = deviceType }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(0, entry) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(nullptr, entry) != CHIP_NO_ERROR);
accessControl.DeleteEntry(1);
}
}
}
}
void TestCheck(nlTestSuite * inSuite, void * inContext)
{
LoadAccessControl(accessControl, entryData1, entryData1Count);
for (const auto & checkData : checkData1)
{
CHIP_ERROR expectedResult = checkData.allow ? CHIP_NO_ERROR : CHIP_ERROR_ACCESS_DENIED;
NL_TEST_ASSERT(inSuite,
accessControl.Check(checkData.subjectDescriptor, checkData.requestPath, checkData.privilege) ==
expectedResult);
}
}
void TestCreateReadEntry(nlTestSuite * inSuite, void * inContext)
{
for (size_t i = 0; i < entryData1Count; ++i)
{
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, entryData1 + i, 1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, CompareAccessControl(accessControl, entryData1, i + 1) == CHIP_NO_ERROR);
}
}
void TestDeleteEntry(nlTestSuite * inSuite, void * inContext)
{
EntryData data[entryData1Count];
for (size_t pos = 0; pos < ArraySize(data); ++pos)
{
for (size_t count = ArraySize(data) - pos; count > 0; --count)
{
memcpy(data, entryData1, sizeof(data));
NL_TEST_ASSERT(inSuite, ClearAccessControl(accessControl) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, data, ArraySize(data)) == CHIP_NO_ERROR);
memmove(&data[pos], &data[pos + count], (ArraySize(data) - count - pos) * sizeof(data[0]));
for (size_t i = 0; i < count; ++i)
{
NL_TEST_ASSERT(inSuite, accessControl.DeleteEntry(pos) == CHIP_NO_ERROR);
}
NL_TEST_ASSERT(inSuite, CompareAccessControl(accessControl, data, ArraySize(data) - count) == CHIP_NO_ERROR);
}
}
}
void TestFabricFilteredCreateEntry(nlTestSuite * inSuite, void * inContext)
{
for (auto & fabricIndex : fabricIndexes)
{
for (size_t count = 0; count < entryData1Count; ++count)
{
NL_TEST_ASSERT(inSuite, ClearAccessControl(accessControl) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, entryData1, count) == CHIP_NO_ERROR);
constexpr size_t expectedIndexes[][entryData1Count] = {
{ 0, 1, 2, 2, 3, 3, 3, 4, 4 },
{ 0, 0, 0, 1, 1, 2, 3, 3, 4 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
};
const size_t expectedIndex = expectedIndexes[&fabricIndex - fabricIndexes][count];
Entry entry;
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
size_t outIndex = 999;
FabricIndex outFabricIndex = 123;
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(&outIndex, entry, &outFabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, outIndex == expectedIndex);
NL_TEST_ASSERT(inSuite, outFabricIndex == fabricIndex);
}
}
}
void TestFabricFilteredReadEntry(nlTestSuite * inSuite, void * inContext)
{
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, entryData1, entryData1Count) == CHIP_NO_ERROR);
for (auto & fabricIndex : fabricIndexes)
{
constexpr size_t indexes[] = { 0, 1, 2, 3, 4, 5 };
for (auto & index : indexes)
{
constexpr size_t illegalIndex = entryData1Count;
constexpr size_t expectedIndexes[][ArraySize(indexes)] = {
{ 0, 1, 3, 6, illegalIndex, illegalIndex },
{ 2, 4, 5, 7, 8, illegalIndex },
{ illegalIndex, illegalIndex, illegalIndex, illegalIndex, illegalIndex, illegalIndex },
};
const size_t expectedIndex = expectedIndexes[&fabricIndex - fabricIndexes][&index - indexes];
Entry entry;
CHIP_ERROR err = accessControl.ReadEntry(index, entry, &fabricIndex);
if (expectedIndex != illegalIndex)
{
NL_TEST_ASSERT(inSuite, err == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, CompareEntry(entry, entryData1[expectedIndex]) == CHIP_NO_ERROR);
}
else
{
NL_TEST_ASSERT(inSuite, err != CHIP_NO_ERROR);
}
}
}
}
void TestIterator(nlTestSuite * inSuite, void * inContext)
{
LoadAccessControl(accessControl, entryData1, entryData1Count);
FabricIndex fabricIndex;
EntryIterator iterator;
Entry entry;
size_t count;
NL_TEST_ASSERT(inSuite, accessControl.Entries(iterator) == CHIP_NO_ERROR);
count = 0;
while (iterator.Next(entry) == CHIP_NO_ERROR)
{
NL_TEST_ASSERT(inSuite, CompareEntry(entry, entryData1[count]) == CHIP_NO_ERROR);
count++;
}
NL_TEST_ASSERT(inSuite, count == entryData1Count);
fabricIndex = kUndefinedFabricIndex;
NL_TEST_ASSERT(inSuite, accessControl.Entries(iterator, &fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, iterator.Next(entry) != CHIP_NO_ERROR);
fabricIndex = 1;
NL_TEST_ASSERT(inSuite, accessControl.Entries(iterator, &fabricIndex) == CHIP_NO_ERROR);
size_t fabric1[] = { 0, 1, 3, 6 };
count = 0;
while (iterator.Next(entry) == CHIP_NO_ERROR)
{
NL_TEST_ASSERT(inSuite, CompareEntry(entry, entryData1[fabric1[count]]) == CHIP_NO_ERROR);
count++;
}
NL_TEST_ASSERT(inSuite, count == ArraySize(fabric1));
fabricIndex = 2;
NL_TEST_ASSERT(inSuite, accessControl.Entries(iterator, &fabricIndex) == CHIP_NO_ERROR);
size_t fabric2[] = { 2, 4, 5, 7, 8 };
count = 0;
while (iterator.Next(entry) == CHIP_NO_ERROR)
{
NL_TEST_ASSERT(inSuite, CompareEntry(entry, entryData1[fabric2[count]]) == CHIP_NO_ERROR);
count++;
}
NL_TEST_ASSERT(inSuite, count == ArraySize(fabric2));
}
void TestPrepareEntry(nlTestSuite * inSuite, void * inContext)
{
Entry entry;
for (auto authMode : authModes)
{
for (auto fabricIndex : fabricIndexes)
{
for (auto privilege : privileges)
{
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
size_t subjectCount;
size_t targetCount;
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(subjectCount) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(targetCount) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subjectCount == 0);
NL_TEST_ASSERT(inSuite, targetCount == 0);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(authMode) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(privilege) == CHIP_NO_ERROR);
int subjectIndex;
switch (authMode)
{
default:
case AuthMode::kCase:
subjectIndex = 0;
break;
case AuthMode::kGroup:
subjectIndex = 1;
break;
}
for (auto subject : subjects[subjectIndex])
{
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, subject) == CHIP_NO_ERROR);
}
for (auto & target : targets)
{
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, target) == CHIP_NO_ERROR);
}
AuthMode a;
FabricIndex f;
Privilege p;
NL_TEST_ASSERT(inSuite, entry.GetAuthMode(a) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetFabricIndex(f) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetPrivilege(p) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, a == authMode);
NL_TEST_ASSERT(inSuite, f == fabricIndex);
NL_TEST_ASSERT(inSuite, p == privilege);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(subjectCount) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(targetCount) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subjectCount == ArraySize(subjects[0]));
NL_TEST_ASSERT(inSuite, targetCount == ArraySize(targets));
for (size_t i = 0; i < ArraySize(subjects[subjectIndex]); ++i)
{
NodeId n;
NL_TEST_ASSERT(inSuite, entry.GetSubject(i, n) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, n == subjects[subjectIndex][i]);
}
for (size_t i = 0; i < ArraySize(targets); ++i)
{
Target t;
NL_TEST_ASSERT(inSuite, entry.GetTarget(i, t) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, t == targets[i]);
}
}
}
}
}
void TestSubjectsTargets(nlTestSuite * inSuite, void * inContext)
{
Entry entry;
size_t index;
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kAdminister) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { Target::kCluster, 1, 0, 0 }) == CHIP_NO_ERROR);
index = 999;
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(&index, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, int(index) == 0);
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(2) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kManage) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { Target::kEndpoint, 0, 2, 0 }) == CHIP_NO_ERROR);
index = 999;
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(&index, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, int(index) == 1);
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(3) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kOperate) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kGroup) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kGroup2) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { Target::kCluster, 2, 0, 0 }) == CHIP_NO_ERROR);
index = 999;
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(&index, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, int(index) == 2);
FabricIndex fabricIndex;
Privilege privilege;
AuthMode authMode;
size_t count;
NodeId subject;
Target target;
NL_TEST_ASSERT(inSuite, accessControl.ReadEntry(0, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, fabricIndex == 1);
NL_TEST_ASSERT(inSuite, entry.GetPrivilege(privilege) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, privilege == Privilege::kAdminister);
NL_TEST_ASSERT(inSuite, entry.GetAuthMode(authMode) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, authMode == AuthMode::kCase);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 0);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 1);
NL_TEST_ASSERT(inSuite, entry.GetTarget(0, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, target.flags == Target::kCluster && target.cluster == 1);
NL_TEST_ASSERT(inSuite, accessControl.ReadEntry(1, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, fabricIndex == 2);
NL_TEST_ASSERT(inSuite, entry.GetPrivilege(privilege) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, privilege == Privilege::kManage);
NL_TEST_ASSERT(inSuite, entry.GetAuthMode(authMode) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, authMode == AuthMode::kCase);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 1);
NL_TEST_ASSERT(inSuite, entry.GetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == kOperationalNodeId1);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 1);
NL_TEST_ASSERT(inSuite, entry.GetTarget(0, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, target.flags == Target::kEndpoint && target.endpoint == 2);
NL_TEST_ASSERT(inSuite, accessControl.ReadEntry(2, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, fabricIndex == 3);
NL_TEST_ASSERT(inSuite, entry.GetPrivilege(privilege) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, privilege == Privilege::kOperate);
NL_TEST_ASSERT(inSuite, entry.GetAuthMode(authMode) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, authMode == AuthMode::kGroup);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 1);
NL_TEST_ASSERT(inSuite, entry.GetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == kGroup2);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 1);
NL_TEST_ASSERT(inSuite, entry.GetTarget(0, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, target.flags == Target::kCluster && target.cluster == 2);
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetFabricIndex(11) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetPrivilege(Privilege::kProxyView) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, 0x11111111AAAAAAAA) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, 0x22222222BBBBBBBB) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, 0x33333333CCCCCCCC) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { Target::kCluster | Target::kEndpoint, 11, 22, 0 }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddTarget(nullptr, { Target::kCluster | Target::kEndpoint, 33, 44, 0 }) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
entry.AddTarget(nullptr, { Target::kCluster | Target::kEndpoint, 0xAAAAFC01, 0x6666, 0 }) == CHIP_NO_ERROR);
index = 999;
NL_TEST_ASSERT(inSuite, accessControl.CreateEntry(&index, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, int(index) == 3);
NL_TEST_ASSERT(inSuite, accessControl.ReadEntry(3, entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetFabricIndex(fabricIndex) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, fabricIndex == 11);
NL_TEST_ASSERT(inSuite, entry.GetPrivilege(privilege) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, privilege == Privilege::kProxyView);
NL_TEST_ASSERT(inSuite, entry.GetAuthMode(authMode) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, authMode == AuthMode::kCase);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 3);
NL_TEST_ASSERT(inSuite, entry.GetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == 0x11111111AAAAAAAA);
NL_TEST_ASSERT(inSuite, entry.GetSubject(1, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == 0x22222222BBBBBBBB);
NL_TEST_ASSERT(inSuite, entry.GetSubject(2, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == 0x33333333CCCCCCCC);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 3);
NL_TEST_ASSERT(inSuite, entry.GetTarget(0, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
target.flags == (Target::kCluster | Target::kEndpoint) && target.cluster == 11 && target.endpoint == 22);
NL_TEST_ASSERT(inSuite, entry.GetTarget(1, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
target.flags == (Target::kCluster | Target::kEndpoint) && target.cluster == 33 && target.endpoint == 44);
NL_TEST_ASSERT(inSuite, entry.GetTarget(2, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
target.flags == (Target::kCluster | Target::kEndpoint) && target.cluster == 0xAAAAFC01 &&
target.endpoint == 0x6666);
NL_TEST_ASSERT(inSuite, entry.RemoveSubject(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetSubjectCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 2);
NL_TEST_ASSERT(inSuite, entry.GetSubject(0, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == 0x11111111AAAAAAAA);
NL_TEST_ASSERT(inSuite, entry.GetSubject(1, subject) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, subject == 0x33333333CCCCCCCC);
NL_TEST_ASSERT(inSuite, entry.GetSubject(2, subject) != CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.RemoveTarget(1) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.GetTargetCount(count) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, count == 2);
NL_TEST_ASSERT(inSuite, entry.GetTarget(0, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
target.flags == (Target::kCluster | Target::kEndpoint) && target.cluster == 11 && target.endpoint == 22);
NL_TEST_ASSERT(inSuite, entry.GetTarget(1, target) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite,
target.flags == (Target::kCluster | Target::kEndpoint) && target.cluster == 0xAAAAFC01 &&
target.endpoint == 0x6666);
NL_TEST_ASSERT(inSuite, entry.GetTarget(2, target) != CHIP_NO_ERROR);
}
void TestUpdateEntry(nlTestSuite * inSuite, void * inContext)
{
EntryData data[entryData1Count];
memcpy(data, entryData1, sizeof(data));
NL_TEST_ASSERT(inSuite, LoadAccessControl(accessControl, data, ArraySize(data)) == CHIP_NO_ERROR);
for (size_t i = 0; i < ArraySize(data); ++i)
{
EntryData updateData;
updateData.authMode = authModes[i % ArraySize(authModes)];
updateData.fabricIndex = fabricIndexes[i % ArraySize(fabricIndexes)];
updateData.privilege = privileges[i % (ArraySize(privileges) - 1)];
updateData.AddSubject(nullptr, subjects[i % ArraySize(authModes)][i % ArraySize(subjects[0])]);
updateData.AddTarget(nullptr, targets[i % ArraySize(targets)]);
data[i] = updateData;
{
Entry entry;
NL_TEST_ASSERT(inSuite, accessControl.PrepareEntry(entry) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, LoadEntry(entry, updateData) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, accessControl.UpdateEntry(i, entry) == CHIP_NO_ERROR);
}
NL_TEST_ASSERT(inSuite, CompareAccessControl(accessControl, data, ArraySize(data)) == CHIP_NO_ERROR);
}
}
int Setup(void * inContext)
{
SetAccessControl(accessControl);
GetAccessControl().Init();
return SUCCESS;
}
int Teardown(void * inContext)
{
GetAccessControl().Finish();
return SUCCESS;
}
int Initialize(void * inContext)
{
return ClearAccessControl(accessControl) == CHIP_NO_ERROR ? SUCCESS : FAILURE;
}
int Terminate(void * inContext)
{
return SUCCESS;
}
} // namespace
int TestAccessControl()
{
// clang-format off
constexpr nlTest tests[] = {
NL_TEST_DEF("MetaTest", MetaTest),
NL_TEST_DEF("TestPrepareEntry", TestPrepareEntry),
NL_TEST_DEF("TestCreateReadEntry", TestCreateReadEntry),
NL_TEST_DEF("TestUpdateEntry", TestUpdateEntry),
NL_TEST_DEF("TestDeleteEntry", TestDeleteEntry),
NL_TEST_DEF("TestAclValidateFabricIndex", TestAclValidateFabricIndex),
NL_TEST_DEF("TestAclValidatePrivilege", TestAclValidatePrivilege),
NL_TEST_DEF("TestAclValidateAuthModeSubject", TestAclValidateAuthModeSubject),
NL_TEST_DEF("TestAclValidateTarget", TestAclValidateTarget),
NL_TEST_DEF("TestSubjectsTargets", TestSubjectsTargets),
NL_TEST_DEF("TestIterator", TestIterator),
NL_TEST_DEF("TestFabricFilteredReadEntry", TestFabricFilteredReadEntry),
NL_TEST_DEF("TestFabricFilteredCreateEntry", TestFabricFilteredCreateEntry),
NL_TEST_DEF("TestCheck", TestCheck),
NL_TEST_SENTINEL()
};
// clang-format on
nlTestSuite suite = {
.name = "AccessControl",
.tests = tests,
.setup = Setup,
.tear_down = Teardown,
.initialize = Initialize,
.terminate = Terminate,
};
nlTestRunner(&suite, nullptr);
return nlTestRunnerStats(&suite);
}
CHIP_REGISTER_TEST_SUITE(TestAccessControl);<|fim▁end|> | NL_TEST_ASSERT(inSuite, entry.SetAuthMode(AuthMode::kCase) == CHIP_NO_ERROR);
NL_TEST_ASSERT(inSuite, entry.AddSubject(nullptr, kOperationalNodeId0) == CHIP_NO_ERROR); |
<|file_name|>jtag.go<|end_file_name|><|fim▁begin|>// Copyright 2018 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package jtag will eventually define the API to communicate with devices over
// the JTAG protocol.
//<|fim▁hole|>package jtag<|fim▁end|> | // See https://en.wikipedia.org/wiki/JTAG for background information. |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import formatDistance from '../en-US/_lib/formatDistance/index'
import formatRelative from '../en-US/_lib/formatRelative/index'
import localize from '../en-US/_lib/localize/index'<|fim▁hole|>import type { Locale } from '../types'
import formatLong from './_lib/formatLong/index'
/**
* @type {Locale}
* @category Locales
* @summary English locale (New Zealand).
* @language English
* @iso-639-2 eng
* @author Murray Lucas [@muntact]{@link https://github.com/muntact}
*/
const locale: Locale = {
code: 'en-NZ',
formatDistance: formatDistance,
formatLong: formatLong,
formatRelative: formatRelative,
localize: localize,
match: match,
options: {
weekStartsOn: 1 /* Monday */,
firstWeekContainsDate: 4,
},
}
export default locale<|fim▁end|> | import match from '../en-US/_lib/match/index' |
<|file_name|>admin.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | //= require fluent/admin/admin.js |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from snisi_reprohealth.models.PFActivities import (PFActivitiesR, AggPFActivitiesR)
# from snisi_reprohealth.models.ChildrenMortality import (ChildrenDeathR, AggChildrenDeathR)
# from snisi_reprohealth.models.MaternalMortality import (MaternalDeathR, AggMaternalDeathR)<|fim▁hole|><|fim▁end|> | # from snisi_reprohealth.models.Commodities import (RHProductsR, AggRHProductsR) |
<|file_name|>parse.py<|end_file_name|><|fim▁begin|>import re
import string
import sys
sys.path.append('/Users/exu/PlayGround/readinglists/')
from key.keys import *
from amazon.api import AmazonAPI
from html2text import html2text
pattern = re.compile("https?://.*amazon.com/gp/product/([0-9]+)/.*")
amazon = AmazonAPI(AMAZON_ACCESS_KEY_ID, AMAZON_SECRET_ACCESS_KEY, AMAZON_ASSOC_TAG, MaxQPS=0.9)
def uprint(s):
print s.encode('utf-8')
def get_asin(url):
global pattern
m = pattern.match(url)
if m and len(m.groups()) > 0:
return m.groups()[0]
def read_file():
if (len(sys.argv) < 1):
print "Please provide a file that includes a list of Amazon links."
sys.exit(-1)
fname = sys.argv[1]
f = open(fname, 'r')
products = []
for l in f.readlines():
product = amazon.lookup(ItemId=get_asin(l))
products.append([product.title, product.editorial_review, product.large_image_url, product.offer_url])
print "Got product", product.title
return products
rtitle = re.compile('(.*)(\(.*\))')
def normalize_title(title):<|fim▁hole|> new_title = splits[0][0]
else:
new_title = title
return new_title
def sanitize_text(t):
s = html2text(t)
s = string.replace(s, "'", "’")
s = string.replace(s, "**", "*")
return s
if __name__ == '__main__':
import os.path
import cPickle
pickle_file = 'products.pickle'
products = None
if os.path.isfile(pickle_file):
products = cPickle.load(open(pickle_file, 'r'))
else:
products = read_file()
f = open(pickle_file, "wb")
cPickle.dump(products, f)
for product in products:
title = normalize_title(product[0])
uprint(title)
print '=' * len(title)
review = sanitize_text(product[1])
uprint(review)
print<|fim▁end|> | """ Book titles are long. We crop out the last part that is in (part)"""
splits = re.findall(rtitle, title)
if splits: |
<|file_name|>FacebookSession.java<|end_file_name|><|fim▁begin|>package de.dvdb.domain.model.social;
import java.io.Serializable;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
<|fim▁hole|> private static final long serialVersionUID = -8753714944734959457L;
private Long id;
private String sessionKey;
private Long user;
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Column(name = "user_id")
public Long getUser() {
return user;
}
public void setUser(Long user) {
this.user = user;
}
@Column(name = "sessionkey")
public String getSessionKey() {
return sessionKey;
}
public void setSessionKey(String sessionKey) {
this.sessionKey = sessionKey;
}
}<|fim▁end|> | @Entity
@Table(name = "dvdb2_fbsession")
public class FacebookSession implements Serializable {
|
<|file_name|>utils.rs<|end_file_name|><|fim▁begin|>// Copyright 2020, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT>
use anyhow::{bail, Result};
use itertools::Itertools;
use proc_macro2::{Ident, Span};
use proc_macro_crate::crate_name;
use syn::{Attribute, DeriveInput, Lit, Meta, MetaList, NestedMeta};
// find the #[@attr_name] attribute in @attrs
pub fn find_attribute_meta(attrs: &[Attribute], attr_name: &str) -> Result<Option<MetaList>> {
let meta = match attrs.iter().find(|a| a.path.is_ident(attr_name)) {
Some(a) => a.parse_meta(),
_ => return Ok(None),
};
match meta? {
Meta::List(n) => Ok(Some(n)),
_ => bail!("wrong meta type"),
}
}
// parse a single meta like: ident = "value"
fn parse_attribute(meta: &NestedMeta) -> Result<(String, String)> {
let meta = match &meta {
NestedMeta::Meta(m) => m,
_ => bail!("wrong meta type"),
};
let meta = match meta {
Meta::NameValue(n) => n,
_ => bail!("wrong meta type"),
};
let value = match &meta.lit {
Lit::Str(s) => s.value(),
_ => bail!("wrong meta type"),
};
let ident = match meta.path.get_ident() {
None => bail!("missing ident"),
Some(ident) => ident,
};
Ok((ident.to_string(), value))
}
#[derive(Debug)]
pub enum EnumAttribute {
TypeName(String),
}
pub fn parse_enum_attribute(meta: &NestedMeta) -> Result<EnumAttribute> {
let (ident, v) = parse_attribute(meta)?;
match ident.as_ref() {
"type_name" => Ok(EnumAttribute::TypeName(v)),
s => bail!("Unknown enum meta {}", s),
}
}
pub fn find_nested_meta<'a>(meta: &'a MetaList, name: &str) -> Option<&'a NestedMeta> {
meta.nested.iter().find(|n| match n {
NestedMeta::Meta(m) => m.path().is_ident(name),
_ => false,
})
}
// Parse attribute such as:
// #[genum(type_name = "TestAnimalType")]
pub fn parse_type_name(input: &DeriveInput, attr_name: &str) -> Result<String> {
let meta = match find_attribute_meta(&input.attrs, attr_name)? {
Some(meta) => meta,
_ => bail!("Missing '{}' attribute", attr_name),
};
let meta = match find_nested_meta(&meta, "type_name") {
Some(meta) => meta,
_ => bail!("Missing meta 'type_name'"),<|fim▁hole|>
match parse_enum_attribute(&meta)? {
EnumAttribute::TypeName(n) => Ok(n),
}
}
#[derive(Debug)]
pub enum ItemAttribute {
Name(String),
Nick(String),
}
fn parse_item_attribute(meta: &NestedMeta) -> Result<ItemAttribute> {
let (ident, v) = parse_attribute(meta)?;
match ident.as_ref() {
"name" => Ok(ItemAttribute::Name(v)),
"nick" => Ok(ItemAttribute::Nick(v)),
s => bail!("Unknown item meta {}", s),
}
}
// Parse optional enum item attributes such as:
// #[genum(name = "My Name", nick = "my-nick")]
pub fn parse_item_attributes(attr_name: &str, attrs: &[Attribute]) -> Result<Vec<ItemAttribute>> {
let meta = find_attribute_meta(attrs, attr_name)?;
let v = match meta {
Some(meta) => meta
.nested
.iter()
.map(|m| parse_item_attribute(&m))
.fold_results(Vec::new(), |mut v, a| {
v.push(a);
v
})?,
None => Vec::new(),
};
Ok(v)
}
pub fn crate_ident_new() -> Ident {
let crate_name = match crate_name("glib") {
Ok(x) => x,
Err(_) => {
// In case we use it directly from glib itself (it cannot find glib as a dependency
// in this case)
"glib".to_owned()
}
};
Ident::new(&crate_name, Span::call_site())
}<|fim▁end|> | }; |
<|file_name|>kbemain.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
import KBEngine
from KBEDebug import *
def onBaseAppReady(isBootstrap):
"""
KBEngine method.
baseapp已经准备好了
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""<|fim▁hole|> """
KBEngine method.
如果返回值大于等于1.0则初始化全部完成, 否则返回准备的进度值0.0~1.0。
在此可以确保脚本层全部初始化完成之后才开放登录。
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""
return 1.0
def onReadyForShutDown():
"""
KBEngine method.
进程询问脚本层:我要shutdown了,脚本是否准备好了?
如果返回True,则进程会进入shutdown的流程,其它值会使得进程在过一段时间后再次询问。
用户可以在收到消息时进行脚本层的数据清理工作,以让脚本层的工作成果不会因为shutdown而丢失。
"""
INFO_MSG('onReadyForShutDown()')
return True
def onBaseAppShutDown(state):
"""
KBEngine method.
这个baseapp被关闭前的回调函数
@param state: 0 : 在断开所有客户端之前
1 : 在将所有entity写入数据库之前
2 : 所有entity被写入数据库之后
@type state: int
"""
INFO_MSG('onBaseAppShutDown: state=%i' % state)
def onInit(isReload):
"""
KBEngine method.
当引擎启动后初始化完所有的脚本后这个接口被调用
@param isReload: 是否是被重写加载脚本后触发的
@type isReload: bool
"""
INFO_MSG('onInit::isReload:%s' % isReload)
def onFini():
"""
KBEngine method.
引擎正式关闭
"""
INFO_MSG('onFini()')
def onCellAppDeath(addr):
"""
KBEngine method.
某个cellapp死亡
"""
WARNING_MSG('onCellAppDeath: %s' % (str(addr)))
def onGlobalData(key, value):
"""
KBEngine method.
globalData有改变
"""
DEBUG_MSG('onGlobalData: %s' % key)
def onGlobalDataDel(key):
"""
KBEngine method.
globalData有删除
"""
DEBUG_MSG('onDelGlobalData: %s' % key)
def onGlobalBases(key, value):
"""
KBEngine method.
globalBases有改变
"""
DEBUG_MSG('onGlobalBases: %s' % key)
def onGlobalBasesDel(key):
"""
KBEngine method.
globalBases有删除
"""
DEBUG_MSG('onGlobalBasesDel: %s' % key)
def onLoseChargeCB(ordersID, dbid, success, datas):
"""
KBEngine method.
有一个不明订单被处理, 可能是超时导致记录被billing
清除, 而又收到第三方充值的处理回调
"""
DEBUG_MSG('onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s' % \
(ordersID, dbid, success, datas))<|fim▁end|> | INFO_MSG('onBaseAppReady: isBootstrap=%s, appID=%s, bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \
(isBootstrap, os.getenv("KBE_COMPONENTID"), os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL")))
def onReadyForLogin(isBootstrap): |
<|file_name|>SecretKeySpec.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Alexander Y. Kleymenov
* @version $Revision$
*/
package javax.crypto.spec;
import java.io.Serializable;
import java.security.spec.KeySpec;
import java.util.Arrays;
import javax.crypto.SecretKey;
/**
* A key specification for a <code>SecretKey</code> and also a secret key
* implementation that is provider-independent. It can be used for raw secret
* keys that can be specified as <code>byte[]</code>.
*/
public class SecretKeySpec implements SecretKey, KeySpec, Serializable {
// The 5.0 spec. doesn't declare this serialVersionUID field
// In order to be compatible it is explicitly declared here
// for details see HARMONY-233
private static final long serialVersionUID = 6577238317307289933L;
private final byte[] key;
private final String algorithm;
private final String format = "RAW";
/**
* Creates a new <code>SecretKeySpec</code> for the specified key data and
* algorithm name.
*
* @param key
* the key data.
* @param algorithm
* the algorithm name.
* @throws IllegalArgumentException
* if the key data or the algorithm name is null or if the key
* data is empty.
*/
public SecretKeySpec(byte[] key, String algorithm) {
if (key == null) {
throw new IllegalArgumentException("key == null");
}
if (key.length == 0) {
throw new IllegalArgumentException("key.length == 0");
}
if (algorithm == null) {
throw new IllegalArgumentException("algorithm == null");
}
this.algorithm = algorithm;
this.key = new byte[key.length];
System.arraycopy(key, 0, this.key, 0, key.length);
}
/**
* Creates a new <code>SecretKeySpec</code> for the key data from the
* specified buffer <code>key</code> starting at <code>offset</code> with
* length <code>len</code> and the specified <code>algorithm</code> name.
*
* @param key
* the key data.
* @param offset
* the offset.
* @param len
* the size of the key data.
* @param algorithm
* the algorithm name.<|fim▁hole|> * @throws IllegalArgumentException
* if the key data or the algorithm name is null, the key data
* is empty or <code>offset</code> and <code>len</code> do not
* specify a valid chunk in the buffer <code>key</code>.
* @throws ArrayIndexOutOfBoundsException
* if <code>offset</code> or <code>len</code> is negative.
*/
public SecretKeySpec(byte[] key, int offset, int len, String algorithm) {
if (key == null) {
throw new IllegalArgumentException("key == null");
}
if (key.length == 0) {
throw new IllegalArgumentException("key.length == 0");
}
// BEGIN android-changed
if (len < 0 || offset < 0) {
throw new ArrayIndexOutOfBoundsException("len < 0 || offset < 0");
}
// END android-changed
if (key.length - offset < len) {
throw new IllegalArgumentException("key too short");
}
if (algorithm == null) {
throw new IllegalArgumentException("algorithm == null");
}
this.algorithm = algorithm;
this.key = new byte[len];
System.arraycopy(key, offset, this.key, 0, len);
}
/**
* Returns the algorithm name.
*
* @return the algorithm name.
*/
public String getAlgorithm() {
return algorithm;
}
/**
* Returns the name of the format used to encode the key.
*
* @return the format name "RAW".
*/
public String getFormat() {
return format;
}
/**
* Returns the encoded form of this secret key.
*
* @return the encoded form of this secret key.
*/
public byte[] getEncoded() {
byte[] result = new byte[key.length];
System.arraycopy(key, 0, result, 0, key.length);
return result;
}
/**
* Returns the hash code of this <code>SecretKeySpec</code> object.
*
* @return the hash code.
*/
@Override
public int hashCode() {
int result = algorithm.length();
for (byte element : key) {
result += element;
}
return result;
}
/**
* Compares the specified object with this <code>SecretKeySpec</code>
* instance.
*
* @param obj
* the object to compare.
* @return true if the algorithm name and key of both object are equal,
* otherwise false.
*/
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof SecretKeySpec)) {
return false;
}
SecretKeySpec ks = (SecretKeySpec) obj;
return (algorithm.equalsIgnoreCase(ks.algorithm))
&& (Arrays.equals(key, ks.key));
}
}<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! A bounded SPSC channel.
use arc::{Arc, ArcTrait};
use select::{Selectable, _Selectable};
use {Error, Sendable};
mod imp;
#[cfg(test)] mod test;
#[cfg(test)] mod bench;
/// Creates a new bounded SPSC channel.
///
/// ### Panic
///
/// Panics if `next_power_of_two(cap) * sizeof(T) >= isize::MAX`.
pub fn new<'a, T: Sendable+'a>(cap: usize) -> (Producer<'a, T>, Consumer<'a, T>) {
let packet = Arc::new(imp::Packet::new(cap));
packet.set_id(packet.unique_id());
(Producer { data: packet.clone() }, Consumer { data: packet })
}
/// The producing half of a bounded SPSC channel.
pub struct Producer<'a, T: Sendable+'a> {
data: Arc<imp::Packet<'a, T>>,
}
impl<'a, T: Sendable+'a> Producer<'a, T> {
/// Sends a message over the channel. Blocks if the buffer is full.
///
/// ### Errors
///
/// - `Disconnected` - The receiver has disconnected.
pub fn send_sync(&self, val: T) -> Result<(), (T, Error)> {
self.data.send_sync(val)
}
/// Sends a message over the channel. Does not block if the buffer is full.
///
/// ### Errors
///
/// - `Full` - There is no space in the buffer.
/// - `Disconnected` - The receiver has disconnected.
pub fn send_async(&self, val: T) -> Result<(), (T, Error)> {
self.data.send_async(val, false)
}
}
impl<'a, T: Sendable+'a> Drop for Producer<'a, T> {
fn drop(&mut self) {
self.data.disconnect_sender()
}
}
unsafe impl<'a, T: Sendable+'a> Send for Producer<'a, T> { }
/// The consuming half of a bounded SPSC channel.
pub struct Consumer<'a, T: Sendable+'a> {
data: Arc<imp::Packet<'a, T>>,
}
impl<'a, T: Sendable+'a> Consumer<'a, T> {
/// Receives a message over this channel. Blocks until a message is available.
///
/// ### Errors
///
/// - `Disconnected` - No message is available and the sender has disconnected.
pub fn recv_sync(&self) -> Result<T, Error> {
self.data.recv_sync()
}
/// Receives a message over this channel. Does not block if no message is available.
///
/// ### Errors
///
/// - `Disconnected` - No message is available and the sender has disconnected.
/// - `Empty` - No message is available.
pub fn recv_async(&self) -> Result<T, Error> {
self.data.recv_async(false)
}
}
impl<'a, T: Sendable+'a> Drop for Consumer<'a, T> {
fn drop(&mut self) {
self.data.disconnect_receiver()
}
}
unsafe impl<'a, T: Sendable+'a> Send for Consumer<'a, T> { }
impl<'a, T: Sendable+'a> Selectable<'a> for Consumer<'a, T> {<|fim▁hole|> fn as_selectable(&self) -> ArcTrait<_Selectable<'a>+'a> {
unsafe { self.data.as_trait(&*self.data as &(_Selectable+'a)) }
}
}<|fim▁end|> | fn id(&self) -> usize {
self.data.unique_id()
}
|
<|file_name|>unicodeExtendedEscapesInTemplates14_ES5.ts<|end_file_name|><|fim▁begin|>// @target: es5
<|fim▁hole|>// Shouldn't work, negatives are not allowed.
var x = `\u{-DDDD}`;<|fim▁end|> | |
<|file_name|>reflection.rs<|end_file_name|><|fim▁begin|>use gl;
use libc;
use std::ffi;
use std::mem;
use std::ptr;
use std::collections::HashMap;
use context::CommandContext;
use version::Version;
use version::Api;
use uniforms::UniformType;
use vertex::AttributeType;
use Handle;
/// Information about a uniform (except its name).
#[derive(Debug, Copy, Clone)]
pub struct Uniform {
/// The location of the uniform.
///
/// This is internal information, you probably don't need to use it.
pub location: i32,
/// Type of the uniform.
pub ty: UniformType,
/// If it is an array, the number of elements.
pub size: Option<usize>,
}
/// Information about a uniform block (except its name).
#[derive(Debug, Clone)]
pub struct UniformBlock {
/// The binding point of the block.
///
/// This is internal information, you probably don't need to use it.
pub binding: i32,
/// Size in bytes of the data in the block.
pub size: usize,
/// Layout of the block.
pub layout: BlockLayout,
}
/// Layout of a shader storage buffer or a uniform buffer.
#[derive(Debug, Clone)]
pub enum BlockLayout {
/// Multiple elements, each having a name.
Struct {
/// The list of elements, with `name`/`layout` pairs.
members: Vec<(String, BlockLayout)>,
},
/// A basic element.
BasicType {
/// Type of data.
ty: UniformType,
/// Offset of this element in bytes from the start of the buffer.
offset_in_buffer: usize,
},
/// A fixed-size array.
///
/// For example:
///
/// ```notrust
/// uint data[12];
/// ```
Array {
/// Type of data of each element.
content: Box<BlockLayout>,
/// Number of elements in the array.
length: usize,
},
/// An array whose size isn't known at compile-time. Can only be used as the last element of
/// a buffer.
///
/// Its actual size depends on the size of the buffer.
///
/// For example:
///
/// ```notrust
/// buffer MyBuffer {
/// uint data[];
/// }
/// ```
DynamicSizedArray {
/// Type of data of each element.
content: Box<BlockLayout>,
},
}
/// Information about an attribute of a program (except its name).
///
/// Internal struct. Not public.
#[derive(Debug, Copy, Clone)]
pub struct Attribute {
/// The index of the uniform.
///
/// This is internal information, you probably don't need to use it.
pub location: i32,
/// Type of the attribute.
pub ty: AttributeType,
/// Number of elements of the attribute.
pub size: usize,
}
/// Describes the layout of a buffer that can receive transform feedback output.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TransformFeedbackBuffer {
/// Slot of this buffer.
///
/// This is internal information, you probably don't need to use it.
pub id: i32,
/// List of elements inside the buffer.
pub elements: Vec<TransformFeedbackVarying>,
/// Size in bytes between two consecutive elements.
pub stride: usize,
}
/// Describes a varying that is being output with transform feedback.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TransformFeedbackVarying {
/// Name of the variable.
pub name: String,
/// Number of bytes between the start of the first element and the start of this one.
pub offset: usize,
/// Size in bytes of this value.
pub size: usize,
/// Type of the value.
pub ty: AttributeType,
}
/// Type of transform feedback. Only used with the legacy interface.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum TransformFeedbackMode {
/// Each value is interleaved in the same buffer.
Interleaved,
/// Each value will go in a separate buffer.
Separate,
}
/// Type of primitives that is being output by transform feedback.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum OutputPrimitives {
/// Points.
Points,
/// Lines.
Lines,
/// Triangles.
Triangles,
/// Quads.
Quads,
}
pub unsafe fn reflect_uniforms(ctxt: &mut CommandContext, program: Handle)
-> HashMap<String, Uniform>
{
// number of active uniforms
let active_uniforms = {
let mut active_uniforms: gl::types::GLint = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORMS, &mut active_uniforms);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_shader_objects);
ctxt.gl.GetObjectParameterivARB(program, gl::OBJECT_ACTIVE_UNIFORMS_ARB,
&mut active_uniforms);
}
};
active_uniforms
};
// the result of this function
let mut uniforms = HashMap::with_capacity(active_uniforms as usize);
for uniform_id in (0 .. active_uniforms) {
let mut uniform_name_tmp: Vec<u8> = Vec::with_capacity(64);
let mut uniform_name_tmp_len = 63;
let mut data_type: gl::types::GLenum = mem::uninitialized();
let mut data_size: gl::types::GLint = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetActiveUniform(program, uniform_id as gl::types::GLuint,
uniform_name_tmp_len, &mut uniform_name_tmp_len,
&mut data_size, &mut data_type,
uniform_name_tmp.as_mut_ptr() as *mut gl::types::GLchar);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_shader_objects);
ctxt.gl.GetActiveUniformARB(program, uniform_id as gl::types::GLuint,
uniform_name_tmp_len, &mut uniform_name_tmp_len,
&mut data_size, &mut data_type,
uniform_name_tmp.as_mut_ptr()
as *mut gl::types::GLchar);
}
};
uniform_name_tmp.set_len(uniform_name_tmp_len as usize);
let uniform_name = String::from_utf8(uniform_name_tmp).unwrap();
let location = match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetUniformLocation(program,
ffi::CString::new(uniform_name.as_bytes()).unwrap()
.as_bytes_with_nul().as_ptr() as *const libc::c_char)
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_shader_objects);
ctxt.gl.GetUniformLocationARB(program,
ffi::CString::new(uniform_name.as_bytes()).unwrap()
.as_bytes_with_nul().as_ptr() as *const libc::c_char)
}
};
uniforms.insert(uniform_name, Uniform {
location: location as i32,
ty: glenum_to_uniform_type(data_type),
size: if data_size == 1 { None } else { Some(data_size as usize) },
});
}
uniforms
}
pub unsafe fn reflect_attributes(ctxt: &mut CommandContext, program: Handle)
-> HashMap<String, Attribute>
{
// number of active attributes
let active_attributes = {
let mut active_attributes: gl::types::GLint = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetProgramiv(program, gl::ACTIVE_ATTRIBUTES, &mut active_attributes);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_vertex_shader);
ctxt.gl.GetObjectParameterivARB(program, gl::OBJECT_ACTIVE_ATTRIBUTES_ARB,
&mut active_attributes);
}
};
active_attributes
};
// the result of this function
let mut attributes = HashMap::with_capacity(active_attributes as usize);
for attribute_id in (0 .. active_attributes) {
let mut attr_name_tmp: Vec<u8> = Vec::with_capacity(64);
let mut attr_name_tmp_len = 63;
let mut data_type: gl::types::GLenum = mem::uninitialized();
let mut data_size: gl::types::GLint = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetActiveAttrib(program, attribute_id as gl::types::GLuint,
attr_name_tmp_len, &mut attr_name_tmp_len, &mut data_size,
&mut data_type, attr_name_tmp.as_mut_ptr()
as *mut gl::types::GLchar);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_vertex_shader);
ctxt.gl.GetActiveAttribARB(program, attribute_id as gl::types::GLuint,
attr_name_tmp_len, &mut attr_name_tmp_len, &mut data_size,
&mut data_type, attr_name_tmp.as_mut_ptr()
as *mut gl::types::GLchar);
}
};
attr_name_tmp.set_len(attr_name_tmp_len as usize);
let attr_name = String::from_utf8(attr_name_tmp).unwrap();
if attr_name.starts_with("gl_") { // ignoring everything built-in
continue;
}
let location = match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetAttribLocation(program,
ffi::CString::new(attr_name.as_bytes()).unwrap()
.as_bytes_with_nul().as_ptr() as *const libc::c_char)
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_vertex_shader);
ctxt.gl.GetAttribLocationARB(program,
ffi::CString::new(attr_name.as_bytes()).unwrap()
.as_bytes_with_nul().as_ptr() as *const libc::c_char)
}
};
attributes.insert(attr_name, Attribute {
location: location,
ty: glenum_to_attribute_type(data_type),
size: data_size as usize,
});
}
attributes
}
pub unsafe fn reflect_uniform_blocks(ctxt: &mut CommandContext, program: Handle)
-> HashMap<String, UniformBlock>
{
// uniform blocks are not supported, so there's none
if !(ctxt.version >= &Version(Api::Gl, 3, 1) || ctxt.version >= &Version(Api::GlEs, 3, 0)) {
return HashMap::new();
}
let program = match program {
Handle::Id(id) => id,
_ => unreachable!()
};
let mut active_blocks: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORM_BLOCKS, &mut active_blocks);
let mut active_blocks_max_name_len: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
&mut active_blocks_max_name_len);
let mut blocks = HashMap::with_capacity(active_blocks as usize);
for block_id in (0 .. active_blocks) {
// getting the name of the block
let name = {
let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + active_blocks_max_name_len
as usize);
let mut name_tmp_len = active_blocks_max_name_len;
ctxt.gl.GetActiveUniformBlockName(program, block_id as gl::types::GLuint,
name_tmp_len, &mut name_tmp_len,
name_tmp.as_mut_ptr() as *mut gl::types::GLchar);
name_tmp.set_len(name_tmp_len as usize);
String::from_utf8(name_tmp).unwrap()
};
// binding point for this block
let mut binding: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint,
gl::UNIFORM_BLOCK_BINDING, &mut binding);
// number of bytes
let mut block_size: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint,
gl::UNIFORM_BLOCK_DATA_SIZE, &mut block_size);
// number of members
let mut num_members: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint,
gl::UNIFORM_BLOCK_ACTIVE_UNIFORMS, &mut num_members);
// indices of the members
let mut members_indices = ::std::iter::repeat(0).take(num_members as usize)
.collect::<Vec<gl::types::GLuint>>();
ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint,
gl::UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES,
members_indices.as_mut_ptr() as *mut gl::types::GLint);
// getting the offsets of the members
let mut member_offsets = ::std::iter::repeat(0).take(num_members as usize)
.collect::<Vec<gl::types::GLint>>();
ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(),
gl::UNIFORM_OFFSET, member_offsets.as_mut_ptr());
// getting the types of the members
let mut member_types = ::std::iter::repeat(0).take(num_members as usize)
.collect::<Vec<gl::types::GLint>>();
ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(),
gl::UNIFORM_TYPE, member_types.as_mut_ptr());
// getting the array sizes of the members
let mut member_size = ::std::iter::repeat(0).take(num_members as usize)
.collect::<Vec<gl::types::GLint>>();
ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(),
gl::UNIFORM_SIZE, member_size.as_mut_ptr());
// getting the length of the names of the members
let mut member_name_len = ::std::iter::repeat(0).take(num_members as usize)
.collect::<Vec<gl::types::GLint>>();
ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(),
gl::UNIFORM_NAME_LENGTH, member_name_len.as_mut_ptr());
// getting the names of the members
let member_names = member_name_len.iter().zip(members_indices.iter())
.map(|(&name_len, &index)|
{
let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len as usize);
let mut name_len_tmp = name_len;
ctxt.gl.GetActiveUniformName(program, index, name_len, &mut name_len_tmp,
name_tmp.as_mut_ptr() as *mut gl::types::GLchar);
name_tmp.set_len(name_len_tmp as usize);
String::from_utf8(name_tmp).unwrap()
}).collect::<Vec<_>>();
// now computing the list of members
let members = member_names.into_iter().enumerate().map(|(index, name)| {
(name, member_offsets[index] as usize,
glenum_to_uniform_type(member_types[index] as gl::types::GLenum),
match member_size[index] {
1 => None,
a => Some(a as usize),
}
)
});
// finally inserting into the blocks list
blocks.insert(name, UniformBlock {
binding: binding as i32,
size: block_size as usize,
layout: introspection_output_to_layout(members),
});
}
blocks
}
pub unsafe fn reflect_transform_feedback(ctxt: &mut CommandContext, program: Handle)
-> Vec<TransformFeedbackBuffer>
{
let program = match program {
// transform feedback not supported
Handle::Handle(_) => return Vec::with_capacity(0),
Handle::Id(id) => id
};
// transform feedback not supported
if !(ctxt.version >= &Version(Api::Gl, 3, 0)) && !ctxt.extensions.gl_ext_transform_feedback {
return Vec::with_capacity(0);
}
// querying the number of varying
let num_varyings = {
let mut num_varyings: gl::types::GLint = mem::uninitialized();
if ctxt.version >= &Version(Api::Gl, 3, 0) {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYINGS, &mut num_varyings);
} else if ctxt.extensions.gl_ext_transform_feedback {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYINGS_EXT, &mut num_varyings);
} else {
unreachable!();
}
num_varyings<|fim▁hole|> return Vec::with_capacity(0);
}
// querying "interleaved" or "separate"
let buffer_mode = {
let mut buffer_mode: gl::types::GLint = mem::uninitialized();
if ctxt.version >= &Version(Api::Gl, 3, 0) {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_BUFFER_MODE, &mut buffer_mode);
} else if ctxt.extensions.gl_ext_transform_feedback {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_BUFFER_MODE_EXT, &mut buffer_mode);
} else {
unreachable!();
}
glenum_to_transform_feedback_mode(buffer_mode as gl::types::GLenum)
};
// the max length includes the null terminator
let mut max_buffer_len: gl::types::GLint = mem::uninitialized();
if ctxt.version >= &Version(Api::Gl, 3, 0) {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
&mut max_buffer_len);
} else if ctxt.extensions.gl_ext_transform_feedback {
ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT,
&mut max_buffer_len);
} else {
unreachable!();
}
let mut result = Vec::with_capacity(num_varyings as usize);
for index in (0 .. num_varyings as gl::types::GLuint) {
let mut name_tmp: Vec<u8> = Vec::with_capacity(max_buffer_len as usize);
let mut name_tmp_len = max_buffer_len;
let mut size = mem::uninitialized();
let mut ty = mem::uninitialized();
if ctxt.version >= &Version(Api::Gl, 3, 0) {
ctxt.gl.GetTransformFeedbackVarying(program, index, name_tmp_len, &mut name_tmp_len,
&mut size, &mut ty, name_tmp.as_mut_ptr()
as *mut gl::types::GLchar);
} else if ctxt.extensions.gl_ext_transform_feedback {
ctxt.gl.GetTransformFeedbackVaryingEXT(program, index, name_tmp_len,
&mut name_tmp_len, &mut size, &mut ty,
name_tmp.as_mut_ptr()
as *mut gl::types::GLchar);
} else {
unreachable!();
}
name_tmp.set_len(name_tmp_len as usize);
let name = String::from_utf8(name_tmp).unwrap();
if buffer_mode == TransformFeedbackMode::Interleaved {
if result.len() == 0 {
result.push(TransformFeedbackBuffer {
id: 0,
elements: vec![],
stride: 0,
});
}
let ty = glenum_to_attribute_type(ty as gl::types::GLenum);
let prev_size = result[0].stride;
result[0].stride += size as usize * ty.get_size_bytes();
result[0].elements.push(TransformFeedbackVarying { // TODO: handle arrays
name: name,
size: size as usize * ty.get_size_bytes(),
offset: prev_size,
ty: ty,
});
} else if buffer_mode == TransformFeedbackMode::Separate {
let id = result.len();
let ty = glenum_to_attribute_type(ty as gl::types::GLenum);
result.push(TransformFeedbackBuffer {
id: id as i32,
elements: vec![
TransformFeedbackVarying {
name: name,
size: size as usize * ty.get_size_bytes(),
offset: 0,
ty: ty,
}
],
stride: size as usize * ty.get_size_bytes(),
});
} else {
unreachable!();
}
}
result
}
/// Obtains the type of data that the geometry shader stage outputs.
///
/// # Unsafety
///
/// - `program` must be a valid handle to a program.
/// - The program **must** contain a geometry shader.
pub unsafe fn reflect_geometry_output_type(ctxt: &mut CommandContext, program: Handle)
-> OutputPrimitives
{
let mut value = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetProgramiv(program, gl::GEOMETRY_OUTPUT_TYPE, &mut value);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_vertex_shader);
ctxt.gl.GetObjectParameterivARB(program, gl::GEOMETRY_OUTPUT_TYPE, &mut value);
}
};
match value as gl::types::GLenum {
gl::POINTS => OutputPrimitives::Points,
gl::LINE_STRIP => OutputPrimitives::Lines,
gl::TRIANGLE_STRIP => OutputPrimitives::Triangles,
_ => unreachable!()
}
}
/// Obtains the type of data that the tessellation evaluation shader stage outputs.
///
/// # Unsafety
///
/// - `program` must be a valid handle to a program.
/// - The program **must** contain a tessellation evaluation shader.
pub unsafe fn reflect_tess_eval_output_type(ctxt: &mut CommandContext, program: Handle)
-> OutputPrimitives
{
let mut value = mem::uninitialized();
match program {
Handle::Id(program) => {
assert!(ctxt.version >= &Version(Api::Gl, 2, 0) ||
ctxt.version >= &Version(Api::GlEs, 2, 0));
ctxt.gl.GetProgramiv(program, gl::TESS_GEN_MODE, &mut value);
},
Handle::Handle(program) => {
assert!(ctxt.extensions.gl_arb_vertex_shader);
ctxt.gl.GetObjectParameterivARB(program, gl::TESS_GEN_MODE, &mut value);
}
};
match value as gl::types::GLenum {
gl::TRIANGLES => OutputPrimitives::Triangles,
gl::ISOLINES => OutputPrimitives::Lines,
gl::QUADS => OutputPrimitives::Quads,
_ => unreachable!()
}
}
/// Returns the list of shader storage blocks of a program.
pub unsafe fn reflect_shader_storage_blocks(ctxt: &mut CommandContext, program: Handle)
-> HashMap<String, UniformBlock>
{
if !(ctxt.version >= &Version(Api::Gl, 4, 3) || ctxt.version >= &Version(Api::GlEs, 3, 1) ||
(ctxt.extensions.gl_arb_program_interface_query && ctxt.extensions.gl_arb_shader_storage_buffer_object))
{
// not supported
return HashMap::with_capacity(0);
}
let program = match program {
Handle::Id(program) => program,
Handle::Handle(program) => return HashMap::with_capacity(0)
};
// number of active SSBOs
let active_blocks = {
let mut active_blocks: gl::types::GLint = mem::uninitialized();
ctxt.gl.GetProgramInterfaceiv(program, gl::SHADER_STORAGE_BLOCK,
gl::ACTIVE_RESOURCES, &mut active_blocks);
active_blocks as gl::types::GLuint
};
// the result of this function
let mut blocks = HashMap::with_capacity(active_blocks as usize);
for block_id in (0 .. active_blocks) {
// getting basic infos
let (name_len, num_variables, binding, total_size) = {
let mut output: [gl::types::GLint; 4] = mem::uninitialized();
ctxt.gl.GetProgramResourceiv(program, gl::SHADER_STORAGE_BLOCK, block_id, 4,
[gl::NAME_LENGTH, gl::NUM_ACTIVE_VARIABLES,
gl::BUFFER_BINDING, gl::BUFFER_DATA_SIZE].as_ptr(), 4,
ptr::null_mut(), output.as_mut_ptr() as *mut _);
(output[0] as usize, output[1] as usize, output[2], output[3] as usize)
};
// getting the name of the block
let name = {
let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len);
let mut name_tmp_len = name_len as gl::types::GLsizei;
ctxt.gl.GetProgramResourceName(program, gl::SHADER_STORAGE_BLOCK, block_id,
name_tmp_len, &mut name_tmp_len,
name_tmp.as_mut_ptr() as *mut _);
name_tmp.set_len(name_tmp_len as usize);
String::from_utf8(name_tmp).unwrap()
};
// indices of the active variables
let active_variables: Vec<gl::types::GLint> = {
let mut variables = Vec::with_capacity(num_variables);
ctxt.gl.GetProgramResourceiv(program, gl::SHADER_STORAGE_BLOCK, block_id, 1,
[gl::ACTIVE_VARIABLES].as_ptr(),
num_variables as gl::types::GLsizei,
ptr::null_mut(), variables.as_mut_ptr() as *mut _);
variables.set_len(num_variables);
variables
};
// iterator over variables
let members = active_variables.into_iter().map(|variable| {
let (ty, array_size, offset, _array_stride, name_len) = {
let mut output: [gl::types::GLint; 5] = mem::uninitialized();
ctxt.gl.GetProgramResourceiv(program, gl::BUFFER_VARIABLE,
variable as gl::types::GLuint, 5,
[gl::TYPE, gl::ARRAY_SIZE, gl::OFFSET,
gl::ARRAY_STRIDE, gl::NAME_LENGTH].as_ptr(), 5,
ptr::null_mut(), output.as_mut_ptr() as *mut _);
(glenum_to_uniform_type(output[0] as gl::types::GLenum), output[1] as usize,
output[2] as usize, output[3] as usize, output[4] as usize)
};
let name = {
let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len);
let mut name_tmp_len = name_len as gl::types::GLsizei;
ctxt.gl.GetProgramResourceName(program, gl::BUFFER_VARIABLE,
variable as gl::types::GLuint,
name_tmp_len, &mut name_tmp_len,
name_tmp.as_mut_ptr() as *mut _);
name_tmp.set_len(name_tmp_len as usize);
String::from_utf8(name_tmp).unwrap()
};
(
name, offset, ty,
match array_size {
1 => None,
a => Some(a as usize),
},
)
});
// finally inserting into the blocks list
blocks.insert(name, UniformBlock {
binding: binding as i32,
size: total_size,
layout: introspection_output_to_layout(members),
});
}
blocks
}
/// Takes a list of elements produced by OpenGL's introspection API and turns them into
/// a `BlockLayout` object.
///
/// The iterator must produce a list of `(name, offset, ty, array_size)`.
///
/// # Panic
///
/// Panic if the input doesn't conform to the OpenGL specs.
///
fn introspection_output_to_layout<I>(elements: I) -> BlockLayout
where I: Iterator<Item = (String, usize, UniformType,
Option<usize>)>
{
// `output` must be a BlockLayout::Struct, otherwise this function will panic
fn process(output: &mut BlockLayout, name: &str, offset: usize, ty: UniformType,
array_size: Option<usize>)
{
let mut components = name.splitn(2, '.');
let current_component = components.next().unwrap();
let name_rest = components.next();
// finding the appropriate place in `output` to write the element
let member = if let &mut BlockLayout::Struct { ref mut members } = output {
// splitting the name and array size
let (current_component, array) = if current_component.ends_with(']') {
let open_bracket_pos = current_component.rfind('[').unwrap();
let array = current_component[open_bracket_pos + 1 .. current_component.len() - 1]
.parse().unwrap();
(¤t_component[.. open_bracket_pos], Some(array))
} else {
(¤t_component[..], None)
};
// because of a bug in Rust's borrow checker, we have to loop twice instead of just
// call `if let Some() { } else { }`
let existing = members.iter_mut().find(|m| m.0 == current_component).is_some();
if existing {
let member = &mut members.iter_mut().find(|m| m.0 == current_component)
.unwrap().1;
if let Some(array) = array {
match member {
&mut BlockLayout::Array { ref mut content, ref mut length } => {
if *length <= array { *length = array + 1; }
&mut **content
},
_ => unreachable!()
}
} else {
member
}
} else {
// member doesn't exist yet in the output, adding it
if let Some(array) = array {
members.push((current_component.to_string(), BlockLayout::Array {
content: Box::new(BlockLayout::Struct { members: Vec::new() }),
length: if name_rest.is_some() { array } else { array_size.unwrap() },
}));
match &mut members.last_mut().unwrap().1 {
&mut BlockLayout::Array { ref mut content, .. } => &mut **content,
_ => unreachable!()
}
} else {
members.push((current_component.to_string(), BlockLayout::Struct {
members: Vec::new()
}));
&mut members.last_mut().unwrap().1
}
}
} else {
unreachable!();
};
// now adding either the other elements or the final element itself
if let Some(name_rest) = name_rest {
process(member, name_rest, offset, ty, array_size);
} else {
*member = BlockLayout::BasicType {
offset_in_buffer: offset,
ty: ty,
};
}
}
// ↓ actual body of `introspection_output_to_layout` starts here ↓
let mut layout = BlockLayout::Struct { members: Vec::new() };
for (name, offset, ty, array_size) in elements {
process(&mut layout, &name, offset, ty, array_size);
}
layout
}
fn glenum_to_uniform_type(ty: gl::types::GLenum) -> UniformType {
match ty {
gl::FLOAT => UniformType::Float,
gl::FLOAT_VEC2 => UniformType::FloatVec2,
gl::FLOAT_VEC3 => UniformType::FloatVec3,
gl::FLOAT_VEC4 => UniformType::FloatVec4,
gl::DOUBLE => UniformType::Double,
gl::DOUBLE_VEC2 => UniformType::DoubleVec2,
gl::DOUBLE_VEC3 => UniformType::DoubleVec3,
gl::DOUBLE_VEC4 => UniformType::DoubleVec4,
gl::INT => UniformType::Int,
gl::INT_VEC2 => UniformType::IntVec2,
gl::INT_VEC3 => UniformType::IntVec3,
gl::INT_VEC4 => UniformType::IntVec4,
gl::UNSIGNED_INT => UniformType::UnsignedInt,
gl::UNSIGNED_INT_VEC2 => UniformType::UnsignedIntVec2,
gl::UNSIGNED_INT_VEC3 => UniformType::UnsignedIntVec3,
gl::UNSIGNED_INT_VEC4 => UniformType::UnsignedIntVec4,
gl::BOOL => UniformType::Bool,
gl::BOOL_VEC2 => UniformType::BoolVec2,
gl::BOOL_VEC3 => UniformType::BoolVec3,
gl::BOOL_VEC4 => UniformType::BoolVec4,
gl::FLOAT_MAT2 => UniformType::FloatMat2,
gl::FLOAT_MAT3 => UniformType::FloatMat3,
gl::FLOAT_MAT4 => UniformType::FloatMat4,
gl::FLOAT_MAT2x3 => UniformType::FloatMat2x3,
gl::FLOAT_MAT2x4 => UniformType::FloatMat2x4,
gl::FLOAT_MAT3x2 => UniformType::FloatMat3x2,
gl::FLOAT_MAT3x4 => UniformType::FloatMat3x4,
gl::FLOAT_MAT4x2 => UniformType::FloatMat4x2,
gl::FLOAT_MAT4x3 => UniformType::FloatMat4x3,
gl::DOUBLE_MAT2 => UniformType::DoubleMat2,
gl::DOUBLE_MAT3 => UniformType::DoubleMat3,
gl::DOUBLE_MAT4 => UniformType::DoubleMat4,
gl::DOUBLE_MAT2x3 => UniformType::DoubleMat2x3,
gl::DOUBLE_MAT2x4 => UniformType::DoubleMat2x4,
gl::DOUBLE_MAT3x2 => UniformType::DoubleMat3x2,
gl::DOUBLE_MAT3x4 => UniformType::DoubleMat3x4,
gl::DOUBLE_MAT4x2 => UniformType::DoubleMat4x2,
gl::DOUBLE_MAT4x3 => UniformType::DoubleMat4x3,
gl::SAMPLER_1D => UniformType::Sampler1d,
gl::SAMPLER_2D => UniformType::Sampler2d,
gl::SAMPLER_3D => UniformType::Sampler3d,
gl::SAMPLER_CUBE => UniformType::SamplerCube,
gl::SAMPLER_1D_SHADOW => UniformType::Sampler1dShadow,
gl::SAMPLER_2D_SHADOW => UniformType::Sampler2dShadow,
gl::SAMPLER_1D_ARRAY => UniformType::Sampler1dArray,
gl::SAMPLER_2D_ARRAY => UniformType::Sampler2dArray,
gl::SAMPLER_1D_ARRAY_SHADOW => UniformType::Sampler1dArrayShadow,
gl::SAMPLER_2D_ARRAY_SHADOW => UniformType::Sampler2dArrayShadow,
gl::SAMPLER_2D_MULTISAMPLE => UniformType::Sampler2dMultisample,
gl::SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::Sampler2dMultisampleArray,
gl::SAMPLER_CUBE_SHADOW => UniformType::SamplerCubeShadow,
gl::SAMPLER_BUFFER => UniformType::SamplerBuffer,
gl::SAMPLER_2D_RECT => UniformType::Sampler2dRect,
gl::SAMPLER_2D_RECT_SHADOW => UniformType::Sampler2dRectShadow,
gl::INT_SAMPLER_1D => UniformType::ISampler1d,
gl::INT_SAMPLER_2D => UniformType::ISampler2d,
gl::INT_SAMPLER_3D => UniformType::ISampler3d,
gl::INT_SAMPLER_CUBE => UniformType::ISamplerCube,
gl::INT_SAMPLER_1D_ARRAY => UniformType::ISampler1dArray,
gl::INT_SAMPLER_2D_ARRAY => UniformType::ISampler2dArray,
gl::INT_SAMPLER_2D_MULTISAMPLE => UniformType::ISampler2dMultisample,
gl::INT_SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::ISampler2dMultisampleArray,
gl::INT_SAMPLER_BUFFER => UniformType::ISamplerBuffer,
gl::INT_SAMPLER_2D_RECT => UniformType::ISampler2dRect,
gl::UNSIGNED_INT_SAMPLER_1D => UniformType::USampler1d,
gl::UNSIGNED_INT_SAMPLER_2D => UniformType::USampler2d,
gl::UNSIGNED_INT_SAMPLER_3D => UniformType::USampler3d,
gl::UNSIGNED_INT_SAMPLER_CUBE => UniformType::USamplerCube,
gl::UNSIGNED_INT_SAMPLER_1D_ARRAY => UniformType::USampler2dArray,
gl::UNSIGNED_INT_SAMPLER_2D_ARRAY => UniformType::USampler2dArray,
gl::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE => UniformType::USampler2dMultisample,
gl::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::USampler2dMultisampleArray,
gl::UNSIGNED_INT_SAMPLER_BUFFER => UniformType::USamplerBuffer,
gl::UNSIGNED_INT_SAMPLER_2D_RECT => UniformType::USampler2dRect,
gl::IMAGE_1D => UniformType::Image1d,
gl::IMAGE_2D => UniformType::Image2d,
gl::IMAGE_3D => UniformType::Image3d,
gl::IMAGE_2D_RECT => UniformType::Image2dRect,
gl::IMAGE_CUBE => UniformType::ImageCube,
gl::IMAGE_BUFFER => UniformType::ImageBuffer,
gl::IMAGE_1D_ARRAY => UniformType::Image1dArray,
gl::IMAGE_2D_ARRAY => UniformType::Image2dArray,
gl::IMAGE_2D_MULTISAMPLE => UniformType::Image2dMultisample,
gl::IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::Image2dMultisampleArray,
gl::INT_IMAGE_1D => UniformType::IImage1d,
gl::INT_IMAGE_2D => UniformType::IImage2d,
gl::INT_IMAGE_3D => UniformType::IImage3d,
gl::INT_IMAGE_2D_RECT => UniformType::IImage2dRect,
gl::INT_IMAGE_CUBE => UniformType::IImageCube,
gl::INT_IMAGE_BUFFER => UniformType::IImageBuffer,
gl::INT_IMAGE_1D_ARRAY => UniformType::IImage1dArray,
gl::INT_IMAGE_2D_ARRAY => UniformType::IImage2dArray,
gl::INT_IMAGE_2D_MULTISAMPLE => UniformType::IImage2dMultisample,
gl::INT_IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::IImage2dMultisampleArray,
gl::UNSIGNED_INT_IMAGE_1D => UniformType::UImage1d,
gl::UNSIGNED_INT_IMAGE_2D => UniformType::UImage2d,
gl::UNSIGNED_INT_IMAGE_3D => UniformType::UImage3d,
gl::UNSIGNED_INT_IMAGE_2D_RECT => UniformType::UImage2dRect,
gl::UNSIGNED_INT_IMAGE_CUBE => UniformType::UImageCube,
gl::UNSIGNED_INT_IMAGE_BUFFER => UniformType::UImageBuffer,
gl::UNSIGNED_INT_IMAGE_1D_ARRAY => UniformType::UImage1dArray,
gl::UNSIGNED_INT_IMAGE_2D_ARRAY => UniformType::UImage2dArray,
gl::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE => UniformType::UImage2dMultisample,
gl::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::UImage2dMultisampleArray,
gl::UNSIGNED_INT_ATOMIC_COUNTER => UniformType::AtomicCounterUint,
v => panic!("Unknown value returned by OpenGL uniform type: {}", v)
}
}
fn glenum_to_attribute_type(value: gl::types::GLenum) -> AttributeType {
match value {
gl::FLOAT => AttributeType::F32,
gl::FLOAT_VEC2 => AttributeType::F32F32,
gl::FLOAT_VEC3 => AttributeType::F32F32F32,
gl::FLOAT_VEC4 => AttributeType::F32F32F32F32,
gl::INT => AttributeType::I32,
gl::INT_VEC2 => AttributeType::I32I32,
gl::INT_VEC3 => AttributeType::I32I32I32,
gl::INT_VEC4 => AttributeType::I32I32I32I32,
gl::UNSIGNED_INT => AttributeType::U32,
gl::UNSIGNED_INT_VEC2 => AttributeType::U32U32,
//gl::UNSIGNED_INT_VEC2_EXT => AttributeType::U32U32,
gl::UNSIGNED_INT_VEC3 => AttributeType::U32U32U32,
//gl::UNSIGNED_INT_VEC3_EXT => AttributeType::U32U32U32,
gl::UNSIGNED_INT_VEC4 => AttributeType::U32U32U32U32,
//gl::UNSIGNED_INT_VEC4_EXT => AttributeType::U32U32U32U32,
gl::FLOAT_MAT2 => AttributeType::F32x2x2,
gl::FLOAT_MAT3 => AttributeType::F32x3x3,
gl::FLOAT_MAT4 => AttributeType::F32x4x4,
gl::FLOAT_MAT2x3 => AttributeType::F32x2x3,
gl::FLOAT_MAT2x4 => AttributeType::F32x2x4,
gl::FLOAT_MAT3x2 => AttributeType::F32x3x2,
gl::FLOAT_MAT3x4 => AttributeType::F32x3x4,
gl::FLOAT_MAT4x2 => AttributeType::F32x4x2,
gl::FLOAT_MAT4x3 => AttributeType::F32x4x3,
v => panic!("Unknown value returned by OpenGL attribute type: {}", v)
}
}
fn glenum_to_transform_feedback_mode(value: gl::types::GLenum) -> TransformFeedbackMode {
match value {
gl::INTERLEAVED_ATTRIBS/* | gl::INTERLEAVED_ATTRIBS_EXT*/ => {
TransformFeedbackMode::Interleaved
},
gl::SEPARATE_ATTRIBS/* | gl::SEPARATE_ATTRIBS_EXT*/ => {
TransformFeedbackMode::Separate
},
v => panic!("Unknown value returned by OpenGL varying mode: {}", v)
}
}<|fim▁end|> | };
// no need to request other things if there are no varying
if num_varyings == 0 { |
<|file_name|>Write.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
def first_message(database, write):
messages = [
firestore_pb2.WriteRequest(database = database, writes = [])
]
for msg in messages:
yield msg
def generate_messages(database, writes, stream_id, stream_token):
# writes can be an array and append to the messages, so it can write multiple Write
# here just write one as example
messages = [
firestore_pb2.WriteRequest(database=database, writes = []),
firestore_pb2.WriteRequest(database=database, writes = [writes], stream_id = stream_id, stream_token = stream_token)
]
for msg in messages:
yield msg
def main():
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Write"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
database = item["grpc"]["Write"]["database"]<|fim▁hole|> name = item["grpc"]["Write"]["name"]
first_write = write_pb2.Write()
responses = stub.Write(first_message(database, first_write))
for response in responses:
print("Received message %s" % (response.stream_id))
print(response.stream_token)
value_ = document_pb2.Value(string_value = "foo_boo")
update = document_pb2.Document(name=name, fields={"foo":value_})
writes = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
for r in r2:
print(r.write_results)
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>test_integration.py<|end_file_name|><|fim▁begin|>import unittest
import route53
from route53.exceptions import AlreadyDeletedError
from route53.transport import BaseTransport
from tests.utils import get_route53_connection
import datetime
import os
from test_basic import BaseTestCase
try:
from .credentials import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
except ImportError:
AWS_ACCESS_KEY_ID = 'XXXXXXXXXXXXXXXXXXXX'
AWS_SECRET_ACCESS_KEY = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
class IntegrationBaseTestCase(BaseTestCase):
"""
A base unit test class that has some generally useful stuff for the
various test cases.
"""
test_zone_name = 'route53-unittest-zone.com.'
def __init__(self, *args, **kwargs):
super(IntegrationBaseTestCase, self).__init__(*args, **kwargs)
if ((AWS_ACCESS_KEY_ID == 'XXXXXXXXXXXXXXXXXXXX') or (AWS_SECRET_ACCESS_KEY == 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY')):
self.skip = True
else:
self.skp = False
def setUp(self):
self.conn = get_route53_connection(**self.CONNECTION_OPTIONS)
self.submittedAt = datetime.datetime.now()
def tearDown(self):
for zone in self.conn.list_hosted_zones():
if zone.name == self.test_zone_ame:
zone.delete(force=True)
class IntegrationHostedZoneTestCase(IntegrationBaseTestCase):
"""
Tests for manipulating hosted zones.
"""
def test_sequence(self):
"""
Runs through a sequence of calls to test hosted zones.
"""
if self.skip:
self.SkipTest("There is no api credentials")
# Create a new hosted zone.
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name, comment='A comment here.'
)
# Make sure the change info came through.
self.assertIsInstance(change_info, dict)
# Now get a list of all zones. Look for the one we just created.
found_match = False
for zone in self.conn.list_hosted_zones():
if zone.name == new_zone.name:
found_match = True
# ListHostedZones doesn't return nameservers.
# We lazy load them in this case. Initially, the nameservers
# are empty.
self.assertEqual(zone._nameservers, [])
# This should return the nameservers
self.assertNotEqual(zone.nameservers, [])
# This should now be populated.
self.assertNotEqual(zone._nameservers, [])
break
# If a match wasn't found, we're not happy.
self.assertTrue(found_match)
# Now attempt to retrieve the newly created HostedZone.
zone = self.conn.get_hosted_zone_by_id(new_zone.id)
# Its nameservers should be populated.
self.assertNotEqual([], zone.nameservers)
zone.delete()
# Trying to delete a second time raises an exception.
self.assertRaises(AlreadyDeletedError, zone.delete)<|fim▁hole|> 'test.' + self.test_zone_name,
['8.8.8.8']
)
class IntegrationResourceRecordSetTestCase(IntegrationBaseTestCase):
"""
Tests related to RRSets. Deletions are tested in the cleanUp() method,
on the base class, more or less.
"""
def test_create_rrset(self):
"""
Tests creation of various record sets.
"""
if self.skip:
self.SkipTest("There is no api credentials")
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'INSYNC')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertIsInstance(new_zone, route53.hosted_zone.HostedZone)
new_record, change_info = new_zone.create_a_record(
name='test.route53-unittest-zone.com.',
values=['8.8.8.8'],
ttl=40,
# weight=10
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'PENDING')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertIsInstance(new_record, route53.hosted_zone.AResourceRecordSet)
# Initial values should equal current values.
for key, val in new_record._initial_vals.items():
self.assertEqual(getattr(new_record, key), val)
def test_change_existing_rrset(self):
"""
Tests changing an existing record set.
"""
if self.skip:
self.SkipTest("There is no api credentials")
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name
)
new_record, change_info = new_zone.create_a_record(
name='test.route53-unittest-zone.com.',
values=['8.8.8.8'],
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'PENDING')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertIsInstance(new_record, route53.hosted_zone.AResourceRecordSet)
new_record.values = ['8.8.8.7']
new_record.save()
# Initial values should equal current values after the save.
for key, val in new_record._initial_vals.items():
self.assertEqual(getattr(new_record, key), val)<|fim▁end|> | # Attempting to add a record set to an already deleted zone does the same.
self.assertRaises(AlreadyDeletedError,
zone.create_a_record, |
<|file_name|>uart.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""Python binding of UART wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_core.so')
# Baudrates
UART_BD_1200 = 1200
UART_BD_2400 = 2400
UART_BD_4800 = 4800
UART_BD_9600 = 9600
UART_BD_19200 = 19200
UART_BD_38400 = 38400
UART_BD_57600 = 57600
def init():
"""Initialise UART on all mikrobus.
UART buses are configured:
- baudrate: 9600
- one stop bit
- no parity bit
The current bus is set to MIKROBUS_1.
Note: An exception is thrown if an error occurs during initialisation.
"""
ret = _LIB.uart_init()
if ret < 0:
raise Exception("uart init failed")
def select_bus(mikrobus_index):
"""Selects the current UART bus.
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
Note: If the mikrobus index is invalid, then this function does not nothing.
"""
_LIB.uart_select_bus(mikrobus_index)
def get_current_bus():
"""Returns the current UART bus: 0 (MIKROBUS_1) or 1 (MIKROBUS_2)."""
return _LIB.uart_get_current_bus()
def set_baudrate(baudrate):
""""Set the baudrate of the current UART bus.
baudrate: Must be one of the predefined baudrates.
Note: An exception is thrown, if it fails to set the baudrate.
"""
ret = _LIB.uart_set_baudrate(baudrate)
if ret < 0:
raise Exception("uart set baudrate failed")
def get_baudrate():
"""Returns the baudrate of the current UART bus.
Note: An exception is thrown if it fails to retrieve the baudrate.
"""
baudrate = ctypes.c_uint32(0)
ret = _LIB.uart_get_baudrate(ctypes.byref(baudrate))
if ret < 0:
raise Exception("uart get baudrate failed")
return baudrate.value
def send(data):
"""Sends data using the current UART bus.
data: A list of bytes.
Note: An exception is thrown if an error occurs during the transmission.
"""
arr = (ctypes.c_uint8 * len(data))(*data)
ret = _LIB.uart_send(arr, len(data))
if ret < 0:
raise Exception("uart send failed")
def receive(length):
"""Returns a list of bytes.
This function is blocking and will not returned until length bytes have
been received.
length: Number of bytes to receive.
Note: An exception is thrown if it fails to receive data.
"""
arr = (ctypes.c_uint8 * length)()
ret = _LIB.uart_receive(arr, length)
if ret < 0:
raise Exception("uart receive failed")
return [arr[i] for i in range(length)]
def release():
"""Releases all UART bus.
Note: An exception is thrown if it fails to release all UART buses.
"""
ret = _LIB.uart_release()
if ret < 0:<|fim▁hole|><|fim▁end|> | raise Exception("uart release failed") |
<|file_name|>menu_manager_factory.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/extensions/menu_manager_factory.h"
#include "chrome/browser/extensions/menu_manager.h"
#include "chrome/browser/profiles/profile.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
#include "extensions/browser/extension_system.h"
#include "extensions/browser/extension_system_provider.h"
#include "extensions/browser/extensions_browser_client.h"
namespace extensions {
// static
MenuManager* MenuManagerFactory::GetForBrowserContext(
content::BrowserContext* context) {
return static_cast<MenuManager*>(
GetInstance()->GetServiceForBrowserContext(context, true));
}
// static
MenuManagerFactory* MenuManagerFactory::GetInstance() {
return Singleton<MenuManagerFactory>::get();
}
// static
KeyedService* MenuManagerFactory::BuildServiceInstanceForTesting(
content::BrowserContext* context) {
return GetInstance()->BuildServiceInstanceFor(context);
}
MenuManagerFactory::MenuManagerFactory()
: BrowserContextKeyedServiceFactory(
"MenuManager",
BrowserContextDependencyManager::GetInstance()) {
DependsOn(ExtensionsBrowserClient::Get()->GetExtensionSystemFactory());
}
MenuManagerFactory::~MenuManagerFactory() {}
KeyedService* MenuManagerFactory::BuildServiceInstanceFor(
content::BrowserContext* context) const {
Profile* profile = Profile::FromBrowserContext(context);
return new MenuManager(profile, ExtensionSystem::Get(profile)->state_store());
}
<|fim▁hole|> return ExtensionsBrowserClient::Get()->GetOriginalContext(context);
}
bool MenuManagerFactory::ServiceIsCreatedWithBrowserContext() const {
return true;
}
bool MenuManagerFactory::ServiceIsNULLWhileTesting() const {
return true;
}
} // namespace extensions<|fim▁end|> | content::BrowserContext* MenuManagerFactory::GetBrowserContextToUse(
content::BrowserContext* context) const { |
<|file_name|>locus.js<|end_file_name|><|fim▁begin|>import {StringUtils} from "../node_modules/igv-utils/src/index.js"
class Locus {
constructor({chr, start, end}) {
this.chr = chr
this.start = start
this.end = end
}
contains(locus) {
return locus.chr === this.chr && locus.start >= this.start && locus.end <= this.end
}
overlaps(locus) {
return locus.chr === this.chr && !(locus.end < this.start || locus.start > this.end)
}
extend(l) {
if (l.chr !== this.chr) return
this.start = Math.min(l.start, this.start)
this.end = Math.max(l.end, this.end)
}
getLocusString() {
if ('all' === this.chr) {
return 'all'
} else {
const ss = StringUtils.numberFormatter(Math.floor(this.start) + 1)
const ee = StringUtils.numberFormatter(Math.round(this.end))
return `${this.chr}:${ss}-${ee}`
}
}
static fromLocusString(str) {
if ('all' === str) {
return new Locus({chr: 'all'})
}
const parts = str.split(':')
const chr = parts[0]
const se = parts[1].split("-")
const start = Number.parseInt(se[0].replace(/,/g, "")) - 1
const end = Number.parseInt(se[1].replace(/,/g, ""))
return new Locus({chr, start, end})
}
}
<|fim▁hole|>export default Locus<|fim▁end|> | |
<|file_name|>babel.config.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
return {
presets: [
[
'@babel/preset-env',
{
targets: {
node: '12'
}
}
]
]
};
};<|fim▁end|> | module.exports = api => {
api.cache(true); |
<|file_name|>test_ray.py<|end_file_name|><|fim▁begin|>import os
import numpy as np
from matplotlib import pyplot as plt
from numpy import genfromtxt
from matplotlib import cm
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def drawRay(ax, filePath):
# Retrieve ray points
sphericalPoints = genfromtxt(filePath, delimiter=',')
# Retrieve the actual data
r = sphericalPoints[:, 3]
theta = sphericalPoints[:, 4]
phi = sphericalPoints[:, 5]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * sinT * cosP
y = r * sinT * sinP
z = r * cosT
ax.plot(x, y, z, label='Ray0')
def drawRays(ax, filePath):
# Retrieve ray points
data = genfromtxt(filePath, delimiter=',')
for i in range(0, 100, 10):
ray = data[data[:, 0] == i, :]
ray = ray[ray[:, 2].argsort()[::-1]]
print(ray)
r = ray[:, 3]
theta = ray[:, 4]
phi = ray[:, 5]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * cosT * sinP
y = r * sinT * sinP
z = r * cosP
ax.plot(x, y, z, label='Ray0', c='blue')
def drawCamera(ax):
camR = 100
camTheta = np.pi/2
camPhi = 0
camX = camR * np.sin(camTheta) * np.cos(camPhi)
camY = camR * np.sin(camTheta) * np.sin(camPhi)
camZ = camR * np.cos(camTheta)
ax.scatter(camX, camY, camZ, s=100, c='red')
x = [1, 1, -1, -1]
y = [1, -1, -1, 1]
z = [-1, -1, -1, -1]
verts = [(x[i], y[i], z[i]) for i in range(4)]
# ax.add_collection3d(Poly3DCollection(verts))
def drawAxes(ax, d=150):
ax.plot((-d, d), (0, 0), (0, 0), 'grey')
ax.plot((0, 0), (-d, d), (0, 0), 'grey')
ax.plot((0, 0), (0, 0), (-d, d), 'gray')
def drawBlackHole(ax, r=5):
# Draw black hole
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = r * np.outer(np.cos(u), np.sin(v))
y = r * np.outer(np.sin(u), np.sin(v))
z = r * np.outer(np.ones(np.size(u)), np.cos(v))<|fim▁hole|>
def absoluteFilePaths(directory):
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
if __name__ == '__main__':
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_axis_off()
ax.set_xlim3d(-25, 25)
ax.set_ylim3d(-25, 25)
ax.set_zlim3d(-25, 25)
# axisEqual3D(ax)
drawAxes(ax)
drawBlackHole(ax)
drawCamera(ax)
# drawRay(ax, "Data/rayPositions.csv")
# drawRay(ax, "Data/middleRay.csv")
# drawRays(ax, "Data/rays.csv")
# for fileName in absoluteFilePaths("Data/Spin00001"):
# if fileName.endswith(".csv"):
# drawRay(ax, fileName)
#
drawRay(ax, "Data/Spin00001/ray00.csv")
drawRay(ax, "Data/Spin00001/ray10.csv")
drawRay(ax, "Data/Spin00001/ray20.csv")
# drawRay(ax, "Data/Spin00001/ray30.csv")
drawRay(ax, "Data/Spin00001/ray40.csv")
drawRay(ax, "Data/Spin00001/ray50.csv")
drawRay(ax, "Data/Spin00001/ray60.csv")
# drawRay(ax, "Data/Spin00001/ray70.csv")
drawRay(ax, "Data/Spin00001/ray80.csv")
drawRay(ax, "Data/Spin00001/ray90.csv")
drawRay(ax, "Data/Spin00001/ray99.csv")
# ax.legend()
plt.show()<|fim▁end|> |
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='black') |
<|file_name|>order.go<|end_file_name|><|fim▁begin|>package annotate
import (
"context"
"sync"
"github.com/paulmach/osm"
)
// RelationHistoryDatasourcer is an more strict interface for when we only need the relation history.
type RelationHistoryDatasourcer interface {
RelationHistory(context.Context, osm.RelationID) (osm.Relations, error)
NotFound(error) bool
}
var _ RelationHistoryDatasourcer = &osm.HistoryDatasource{}
// A ChildFirstOrdering is a struct that allows for a set of relations to be
// processed in a dept first order. Since relations can reference other
// relations we need to make sure children are added before parents.
type ChildFirstOrdering struct {
// CompletedIndex is the number of relation ids in the provided
// array that have been finished. This can be used as a good restart position.
CompletedIndex int
ctx context.Context
done context.CancelFunc
ds RelationHistoryDatasourcer
visited map[osm.RelationID]struct{}
out chan osm.RelationID
wg sync.WaitGroup
id osm.RelationID
err error
}
// NewChildFirstOrdering creates a new ordering object. It is used to provided
// a child before parent ordering for relations. This order must be used when
// inserting+annotating relations into the datastore.
func NewChildFirstOrdering(
ctx context.Context,
ids []osm.RelationID,
ds RelationHistoryDatasourcer,
) *ChildFirstOrdering {
ctx, done := context.WithCancel(ctx)
o := &ChildFirstOrdering{
ctx: ctx,
done: done,
ds: ds,
visited: make(map[osm.RelationID]struct{}, len(ids)),
out: make(chan osm.RelationID),
}
o.wg.Add(1)
go func() {
defer o.wg.Done()
defer close(o.out)
path := make([]osm.RelationID, 0, 100)
for i, id := range ids {
err := o.walk(id, path)
if err != nil {
o.err = err
return
}
o.CompletedIndex = i
}
}()
return o
}
// Err returns a non-nil error if something went wrong with search,
// like a cycle, or a datasource error.
func (o *ChildFirstOrdering) Err() error {
if o.err != nil {
return o.err
}
return o.ctx.Err()
}
// Next locates the next relation id that can be used.
// Returns false if the context is closed, something went wrong
// or the full tree has been walked.
func (o *ChildFirstOrdering) Next() bool {
if o.err != nil || o.ctx.Err() != nil {
return false
}
select {
case id := <-o.out:
if id == 0 {
return false
}
o.id = id
return true
case <-o.ctx.Done():
return false
}
}
// RelationID is the id found by the previous scan.
func (o *ChildFirstOrdering) RelationID() osm.RelationID {
return o.id
}
// Close can be used to terminate the scanning process before
// all ids have been walked.
func (o *ChildFirstOrdering) Close() {
o.done()
o.wg.Wait()
}
func (o *ChildFirstOrdering) walk(id osm.RelationID, path []osm.RelationID) error {
if _, ok := o.visited[id]; ok {
return nil
}
relations, err := o.ds.RelationHistory(o.ctx, id)
if o.ds.NotFound(err) {
return nil
}
if err != nil {
return err
}
for _, r := range relations {
for _, m := range r.Members {
if m.Type != osm.TypeRelation {
continue
}
mid := osm.RelationID(m.Ref)
for _, pid := range path {
if pid == mid {
// circular relations are allowed,
// source: https://github.com/openstreetmap/openstreetmap-website/issues/1465#issuecomment-282323187
// since this relation is already being worked through higher
// up the stack, we can just return here.
return nil
}
}
err := o.walk(mid, append(path, mid))
if err != nil {
return err
}
}
}
if o.ctx.Err() != nil {
return o.ctx.Err()
}
o.visited[id] = struct{}{}
select {
case o.out <- id:
case <-o.ctx.Done():
return o.ctx.Err()
}<|fim▁hole|>
return nil
}<|fim▁end|> | |
<|file_name|>gateway.go<|end_file_name|><|fim▁begin|>package gateway
import (
"errors"
"net"
"strings"
"syscall"
)
const (
DefaultProtocol = "tcp"
DefaultSchedulingMethod = "wrr"
)
// Possible validation errors.
var (
ErrMissingEndpoint = errors.New("endpoint information is missing")
ErrUnknownProtocol = errors.New("specified protocol is unknown")
)
// ServiceOptions describe a virtual service.
type ServiceOptions struct {
Host string
Port uint16
Protocol string
Method string
Persistent bool
// Host string resolved to an IP, including DNS lookup.
host net.IP
// Protocol string converted to a protocol number.
protocol uint16
}
// NewServiceOptions constructs new virtual service options.
func NewServiceOptions(host string, port uint16, protocol string) (*ServiceOptions, error) {
options := &ServiceOptions{<|fim▁hole|> Host: host,
Port: port,
Protocol: protocol,
Method: DefaultSchedulingMethod,
Persistent: true,
}
if len(host) != 0 {
if addr, err := net.ResolveIPAddr("ip", host); err == nil {
options.host = addr.IP
} else {
return nil, err
}
} else {
return nil, ErrMissingEndpoint
}
if port == 0 {
return nil, ErrMissingEndpoint
}
if len(protocol) == 0 {
options.Protocol = DefaultProtocol
}
options.Protocol = strings.ToLower(options.Protocol)
switch options.Protocol {
case "tcp":
options.protocol = syscall.IPPROTO_TCP
case "udp":
options.protocol = syscall.IPPROTO_UDP
default:
return nil, ErrUnknownProtocol
}
return options, nil
}
// RealOptions describe a virtual service real.
type RealOptions struct {
Host string
Port uint16
Weight int32
VsID string
// Host string resolved to an IP, including DNS lookup.
host net.IP
// Forwarding method string converted to a forwarding method number.
methodID uint32
}
// NewRealOptions constructs new real service options.
func NewRealOptions(host string, port uint16, weight int32, vsID string) (*RealOptions, error) {
if len(host) == 0 || port == 0 {
return nil, ErrMissingEndpoint
}
options := &RealOptions{
Host: host,
Port: port,
Weight: weight,
VsID: vsID,
methodID: 0,
}
if addr, err := net.ResolveIPAddr("ip", options.Host); err == nil {
options.host = addr.IP
} else {
return nil, err
}
if options.Weight <= 0 {
options.Weight = 100
}
return options, nil
}<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
<|fim▁hole|> send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_CONFIRM_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
# Some of the names in the db end in white space.
recipient_name = self.user.profile.name.strip()
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient_name,
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
'payment_support_phone': configuration_helpers.get_value('payment_support_phone', settings.PAYMENT_SUPPORT_PHONE),
'payment_platform_name': configuration_helpers.get_value('payment_platform_name', settings.PAYMENT_PLATFORM_NAME),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)<|fim▁end|> | def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, course_names):
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.