max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,601 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Clang gcc-toolchain compiler option related tests.
"""
import shlex
import unittest
from codechecker_analyzer import gcc_toolchain
class GCCToolchainTest(unittest.TestCase):
"""
gcc toolchain detection tests.
"""
def test_tc_detect(self):
"""
Parse gcc-toolchain argument from clang compile command.
"""
compilation_action = "clang -x c -O3 " \
"--gcc-toolchain=/home/user/my_gcc/toolchain -c main.cpp "
toolchain = \
gcc_toolchain.toolchain_in_args(shlex.split(compilation_action))
print(toolchain)
self.assertEqual(toolchain, "/home/user/my_gcc/toolchain")
def test_get_tc_gcc_compiler(self):
"""
Get gcc compiler from the toolchain path.
"""
compilation_action = \
"clang --gcc-toolchain=/home/user/my_gcc/toolchain"
toolchain = \
gcc_toolchain.toolchain_in_args(shlex.split(compilation_action))
print(toolchain)
self.assertEqual(toolchain, "/home/user/my_gcc/toolchain")
tc_compiler_c = gcc_toolchain.get_toolchain_compiler(toolchain, "c")
print(tc_compiler_c)
self.assertEqual(tc_compiler_c,
"/home/user/my_gcc/toolchain/bin/gcc")
def test_get_tc_cpp_compiler(self):
"""
Get g++ compiler from the toolchain path.
"""
compilation_action = \
"clang --gcc-toolchain=/home/user/my_gcc/toolchain"
toolchain = \
gcc_toolchain.toolchain_in_args(shlex.split(compilation_action))
print(toolchain)
self.assertEqual(toolchain, "/home/user/my_gcc/toolchain")
tc_compiler_cpp = gcc_toolchain.get_toolchain_compiler(toolchain,
"c++")
print(tc_compiler_cpp)
self.assertEqual(tc_compiler_cpp,
"/home/user/my_gcc/toolchain/bin/g++")
| 1,020 |
818 | /*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.kogito.svg;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpression;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.apache.batik.anim.dom.SAXSVGDocumentFactory;
import org.apache.batik.util.XMLResourceDescriptor;
import org.junit.jupiter.api.Test;
import org.kie.kogito.svg.processor.SVGProcessor;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
public class SvgTransformationTest {
private XPath xpath = XPathFactory.newInstance().newXPath();
public static InputStream readTestFileContent() {
return SvgTransformationTest.class.getResourceAsStream("/META-INF/processSVG/travels.svg");
}
@Test
public void transformTest() throws Exception {
List<String> completed = new ArrayList<String>();
completed.add("_1A708F87-11C0-42A0-A464-0B7E259C426F");
List<String> active = new ArrayList<String>();
active.add("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
String svg = SVGImageProcessor.transform(readTestFileContent(), completed, active);
// verify transformation
Document svgDocument = readSVG(svg);
validateNodesMarkedAsActive(svgDocument, active, SVGProcessor.ACTIVE_BORDER_COLOR);
validateNodesMarkedAsCompleted(svgDocument, completed, SVGProcessor.COMPLETED_COLOR);
}
@Test
public void testCompletedAndActive() throws Exception {
List<String> completed = new ArrayList<String>();
completed.add("_1A708F87-11C0-42A0-A464-0B7E259C426F");
completed.add("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
List<String> active = new ArrayList<String>();
active.add("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
String svg = SVGImageProcessor.transform(readTestFileContent(), completed, active);
// verify transformation
Document svgDocument = readSVG(svg);
validateNodesMarkedAsActive(svgDocument, active, SVGProcessor.ACTIVE_BORDER_COLOR);
// remove it as it should be not considered completed and was already asserted as active
completed.remove("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
validateNodesMarkedAsCompleted(svgDocument, completed, SVGProcessor.COMPLETED_COLOR);
}
@Test
public void testCustomColor() throws Exception {
String completedNodeColor = "#888888";
String completedNodeBorderColor = "#888887";
String activeNodeBorderColor = "#888886";
List<String> completed = new ArrayList<String>();
completed.add("_1A708F87-11C0-42A0-A464-0B7E259C426F");
List<String> active = new ArrayList<String>();
active.add("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
String svg = SVGImageProcessor.transform(readTestFileContent(),
completed, active, null, completedNodeColor,
completedNodeBorderColor, activeNodeBorderColor);
// verify transformation
Document svgDocument = readSVG(svg);
validateNodesMarkedAsActive(svgDocument, active, activeNodeBorderColor);
validateNodesMarkedAsCompleted(svgDocument, completed, completedNodeColor);
}
// helper methods for verifying svg transformation
@Test
public void testViewBoxAttributeAddition() throws Exception {
List<String> completed = new ArrayList<String>();
completed.add("_1A708F87-11C0-42A0-A464-0B7E259C426F");
List<String> active = new ArrayList<String>();
active.add("_24FBB8D6-EF2D-4DCC-846D-D8C5E21849D2");
Map<String, String> links = new HashMap<>();
links.put("_1A708F87-11C0-42A0-A464-0B7E259C426F", "http://localhost/svg/processes/1");
String svg = SVGImageProcessor.transform(readTestFileContent(),
completed, active, links, "#888888",
"#888887", "#888886");
Document svgDocument = readSVG(svg);
assertEquals("", ((Element) svgDocument.getFirstChild()).getAttribute("width"));
assertEquals("", ((Element) svgDocument.getFirstChild()).getAttribute("height"));
assertEquals("0 0 1748 632", svgDocument.getFirstChild().getAttributes().getNamedItem("viewBox").getNodeValue());
}
private void validateNodesMarkedAsActive(Document svgDocument, List<String> activeNodes, String activeNodeBorderColor) throws XPathExpressionException {
for (String activeNode : activeNodes) {
XPathExpression expr = xpath.compile("//*[@bpmn2nodeid='" + activeNode + "']");
Element element = (Element) expr.evaluate(svgDocument, XPathConstants.NODE);
if (element == null) {
fail("Active element " + activeNode + " not found in the document");
}
String svgId = element.getAttribute("id");
Element border = svgDocument.getElementById(svgId + "?shapeType=BORDER&renderType=STROKE");
String marker = border.getAttribute("stroke");
assertNotNull(marker);
assertEquals(activeNodeBorderColor, marker);
String markerWidth = border.getAttribute("stroke-width");
assertNotNull(markerWidth);
assertEquals("2", markerWidth);
}
}
private void validateNodesMarkedAsCompleted(Document svgDocument, List<String> completedNodes, String completedNodeColor) throws XPathExpressionException {
for (String completedNode : completedNodes) {
XPathExpression expr = xpath.compile("//*[@bpmn2nodeid='" + completedNode + "']");
Element element = (Element) expr.evaluate(svgDocument, XPathConstants.NODE);
if (element == null) {
fail("Completed element " + completedNode + " not found in the document");
}
String svgId = element.getAttribute("id");
Element background = svgDocument.getElementById(svgId + "?shapeType=BACKGROUND");
String marker = background.getAttribute("fill");
assertNotNull(marker);
assertEquals(completedNodeColor, marker);
}
}
private Document readSVG(String svgContent) throws IOException {
String parser = XMLResourceDescriptor.getXMLParserClassName();
SAXSVGDocumentFactory factory = new SAXSVGDocumentFactory(parser);
factory.setValidating(false);
Document svgDocument = factory.createDocument("http://jbpm.org", new StringReader(svgContent));
return svgDocument;
}
}
| 2,903 |
5,079 | <gh_stars>1000+
__author__ = 'roland'
| 17 |
5,169 | {
"name": "NetworkSpeedTest",
"version": "0.1.1",
"summary": "Passively collecting network statistics for iOS",
"homepage": "https://github.com/ml-works/NetworkSpeedTest",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/ml-works/NetworkSpeedTest.git",
"tag": "0.1.1"
},
"social_media_url": "https://twitter.com/vdugnist",
"platforms": {
"ios": "8.0"
},
"source_files": "SpeedTestExample/SpeedTest/*"
}
| 219 |
3,281 | package carbon.view;
import android.view.View;
import android.view.ViewGroup;
import androidx.core.view.ViewCompat;
public interface MarginView {
default void setMargins(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(margin, margin, margin, margin);
view.setLayoutParams(layoutParams);
}
default void setMargins(int left, int top, int right, int bottom) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(left, top, right, bottom);
view.setLayoutParams(layoutParams);
}
default void setMarginStart(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
int layoutDirection = ViewCompat.getLayoutDirection(view);
if (layoutDirection == ViewCompat.LAYOUT_DIRECTION_RTL) {
layoutParams.setMargins(layoutParams.leftMargin, layoutParams.topMargin, margin, layoutParams.bottomMargin);
} else {
layoutParams.setMargins(margin, layoutParams.topMargin, layoutParams.rightMargin, layoutParams.bottomMargin);
}
view.setLayoutParams(layoutParams);
}
default void setMarginEnd(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
int layoutDirection = ViewCompat.getLayoutDirection(view);
if (layoutDirection == ViewCompat.LAYOUT_DIRECTION_LTR) {
layoutParams.setMargins(layoutParams.leftMargin, layoutParams.topMargin, margin, layoutParams.bottomMargin);
} else {
layoutParams.setMargins(margin, layoutParams.topMargin, layoutParams.rightMargin, layoutParams.bottomMargin);
}
view.setLayoutParams(layoutParams);
}
default void setMarginLeft(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(margin, layoutParams.topMargin, layoutParams.rightMargin, layoutParams.bottomMargin);
view.setLayoutParams(layoutParams);
}
default void setMarginRight(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(layoutParams.leftMargin, layoutParams.topMargin, margin, layoutParams.bottomMargin);
view.setLayoutParams(layoutParams);
}
default void setMarginTop(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(layoutParams.leftMargin, margin, layoutParams.rightMargin, layoutParams.bottomMargin);
view.setLayoutParams(layoutParams);
}
default void setMarginBottom(int margin) {
View view = (View) this;
ViewGroup.LayoutParams params = view.getLayoutParams();
if (!(params instanceof ViewGroup.MarginLayoutParams))
throw new IllegalStateException("Invalid layoutParams. Unable to set margin.");
ViewGroup.MarginLayoutParams layoutParams = (ViewGroup.MarginLayoutParams) params;
layoutParams.setMargins(layoutParams.leftMargin, layoutParams.topMargin, layoutParams.rightMargin, margin);
view.setLayoutParams(layoutParams);
}
}
| 1,894 |
523 | {"type":"include","line":1,"val":"./auxiliary/mixin-at-end-of-file.jade"}
{"type":"newline","line":3}
{"type":"call","line":3,"val":"slide","args":""}
{"type":"indent","line":4,"val":2}
{"type":"tag","line":4,"val":"p","selfClosing":false}
{"type":"text","line":4,"val":"some awesome content"}
{"type":"outdent","line":5}
{"type":"eos","line":5} | 133 |
5,169 | {
"name": "LR_Category",
"version": "1.0.1",
"summary": "A模块 A_Category.",
"description": "A模块 A_Category",
"homepage": "http://EXAMPLE/A_Category",
"license": "MIT",
"authors": {
"EGLS_BMAC": ""
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/RainyofSun/A_Category.git",
"tag": "v1.0.1"
},
"source_files": "A_Category/A_Category/Category/**/*.{h,m}",
"public_header_files": "A_Category/A_Category/Category/**/*.h",
"frameworks": [
"UIKit",
"Foundation"
],
"dependencies": {
"CTMediator": [
]
}
}
| 278 |
587 | <gh_stars>100-1000
/*
* Copyright 2020 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.bot.model.manageaudience;
import com.fasterxml.jackson.annotation.JsonEnumDefaultValue;
/**
* The audience's status.
*/
public enum AudienceGroupStatus {
/**
* Pending. It may take several hours for the status to change to READY.
*/
IN_PROGRESS,
/**
* Ready to accept messages.
*/
READY,
/**
* An error occurred while creating the audience.
*/
FAILED,
/**
* Expired. Audiences are automatically deleted a month after they expire.
*/
EXPIRED,
@JsonEnumDefaultValue
UNKNOWN
}
| 389 |
515 | /*=========================================================================
Library: CTK
Copyright (c) Kitware Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.txt
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================*/
// Qt includes
#include <QDebug>
#include <QBrush>
#include <QGridLayout>
#include <QLine>
#include <QMouseEvent>
#include <QPainter>
// CTK includes
#include "ctkAxesWidget.h"
// STD includes
#include <cmath>
#include <math.h>
static const double goldenRatio = 1.6180339887;
static const double PI = 3.14159265358979323846;
//ctkAxesWidgetPrivate
//-----------------------------------------------------------------------------
class ctkAxesWidgetPrivate
{
Q_DECLARE_PUBLIC(ctkAxesWidget);
protected:
ctkAxesWidget* const q_ptr;
public:
ctkAxesWidgetPrivate(ctkAxesWidget& object);
QList<QPoint> extremities(QPoint center, int radius)const;
QList<QRect> labelRects(const QList<QPoint>& extremities, QSize offset)const;
ctkAxesWidget::Axis axisAtPos(QPoint pos)const;
ctkAxesWidget::Axis CurrentAxis;
ctkAxesWidget::Axis HighlightAxis;
bool AutoReset;
QStringList AxesLabels;
QVector<double> AxesAngles;
};
//-----------------------------------------------------------------------------
ctkAxesWidgetPrivate::ctkAxesWidgetPrivate(ctkAxesWidget& object)
:q_ptr(&object)
{
qRegisterMetaType<ctkAxesWidget::Axis>("ctkAxesWidget::Axis");
this->CurrentAxis = ctkAxesWidget::None;
this->HighlightAxis = ctkAxesWidget::None;
this->AutoReset = false;
this->AxesLabels << "R" << "L" << "S" << "I" << "A" << "P";
this->AxesAngles << 3.14159265 << 0 << 1.57079633 << 4.71238898 << 5.49778714 << 2.35619449;
}
//-----------------------------------------------------------------------------
QList<QPoint> ctkAxesWidgetPrivate::extremities(QPoint center, int radius)const
{
QList<QPoint> pos;
for (int i = 0; i < 6 ; ++i)
{
pos << center + QPoint(radius * cos(this->AxesAngles[i]),
-radius * sin(this->AxesAngles[i]));
}
return pos;
}
//-----------------------------------------------------------------------------
QList<QRect> ctkAxesWidgetPrivate::labelRects(const QList<QPoint>& extremities, QSize offset)const
{
Q_Q(const ctkAxesWidget);
QFontMetrics fm = q->fontMetrics();
QSize letterSize = fm.size(Qt::TextShowMnemonic, "X") + QSize(1,1);
QSize halfLetterSize = letterSize / 2;
QList<QRect> rects;
for (int i = 0; i < 6; ++i)
{
rects << QRect(extremities[i]
+ QPoint(cos(this->AxesAngles[i]) * (offset.width()+halfLetterSize.width()),
-sin(this->AxesAngles[i]) * (offset.height()+halfLetterSize.height()))
- QPoint(halfLetterSize.width(), halfLetterSize.height()),
letterSize);
}
return rects;
}
//-----------------------------------------------------------------------------
ctkAxesWidget::Axis ctkAxesWidgetPrivate::axisAtPos(QPoint pos)const
{
Q_Q(const ctkAxesWidget);
QPoint center = QPoint(q->width(), q->height()) / 2;
int length = qMin(q->width(), q->height());
int diameter = length / goldenRatio;
int blankSize = (length - diameter) / 2;
QSize sphereRadius((blankSize / 2) / 1.6180339887,
(blankSize / 2) / 1.6180339887);
QPointF mousePos = pos - center;
double distance2 =
mousePos.x() * mousePos.x() + mousePos.y() * mousePos.y();
if (distance2 < sphereRadius.width()*sphereRadius.width())
{
return ctkAxesWidget::None;
}
double mouseAngle = atan2(-mousePos.y(), mousePos.x());
// mouseAngle is in the interval [-pi,+pi] radians
// change it to be in [-pi/8, 7/8 * pi]
double PI_8 = 0.392699082;
if (mouseAngle < -PI_8)
{
mouseAngle += 2. * PI;
}
for (int i = 0; i < 6; ++i)
{
if (mouseAngle >= (this->AxesAngles[i] - PI_8) &&
mouseAngle <= (this->AxesAngles[i] + PI_8))
{
// the mouse is over the axis
return static_cast<ctkAxesWidget::Axis>(i+1);
}
}
return ctkAxesWidget::None;
}
//ctkAxesWidget
//-----------------------------------------------------------------------------
ctkAxesWidget::ctkAxesWidget(QWidget *newParent)
: QWidget(newParent)
, d_ptr(new ctkAxesWidgetPrivate(*this))
{
}
//-----------------------------------------------------------------------------
ctkAxesWidget::~ctkAxesWidget()
{
}
// ----------------------------------------------------------------------------
ctkAxesWidget::Axis ctkAxesWidget::currentAxis() const
{
Q_D(const ctkAxesWidget);
return d->CurrentAxis;
}
//-----------------------------------------------------------------------------
void ctkAxesWidget::setCurrentAxis(ctkAxesWidget::Axis newAxis)
{
Q_D(ctkAxesWidget);
d->HighlightAxis = newAxis;
if (newAxis == d->CurrentAxis)
{
return;
}
d->CurrentAxis = newAxis;
this->repaint();
emit currentAxisChanged(d->CurrentAxis);
}
//-----------------------------------------------------------------------------
void ctkAxesWidget::setCurrentAxisToNone()
{
this->setCurrentAxis(ctkAxesWidget::None);
}
// ----------------------------------------------------------------------------
bool ctkAxesWidget::autoReset() const
{
Q_D(const ctkAxesWidget);
return d->AutoReset;
}
// ----------------------------------------------------------------------------
void ctkAxesWidget::setAutoReset(bool newAutoReset)
{
Q_D(ctkAxesWidget);
if (d->AutoReset == newAutoReset)
{
return;
}
d->AutoReset = newAutoReset;
if (d->AutoReset)
{
connect(this, SIGNAL(currentAxisChanged(ctkAxesWidget::Axis)),
this, SLOT(setCurrentAxisToNone()));
setCurrentAxisToNone();
}
else
{
disconnect(this, SIGNAL(currentAxisChanged(ctkAxesWidget::Axis)),
this, SLOT(setCurrentAxisToNone()));
}
}
//-----------------------------------------------------------------------------
void ctkAxesWidget::paintEvent(QPaintEvent *)
{
Q_D(ctkAxesWidget);
// init
QPainter painter(this);
//painter.setRenderHint(QPainter::Antialiasing);
QPoint center = QPoint(this->width(), this->height()) / 2;
int length = qMin(this->width(), this->height());
int diameter = length / goldenRatio;
int radius = diameter / 2;
QList<QPoint> positions = d->extremities(center, radius);
QFontMetrics fm = this->fontMetrics();
QSize letterSize = fm.size(Qt::TextShowMnemonic, "X") + QSize(1,1);
//QSize halfLetterSize = letterSize / 2;
int blankSize = (length - diameter) / 2;
QSize betweenLetterSpace = QSize(blankSize - letterSize.width(), blankSize - letterSize.height()) / 2;
QList<QRect> labelRects = d->labelRects(positions, betweenLetterSpace);
for (int i = 0; i < 6; ++i)
{
//QRect rect(positions[i] + QPoint(cos(d->AxesAngles[i]) * (betweenLetterSpace.width()+halfLetterSize.width()),
// -sin(d->AxesAngles[i]) * (betweenLetterSpace.height()+halfLetterSize.height()))
// - QPoint(halfLetterSize.width(), halfLetterSize.height()), letterSize);
QRect rect = labelRects[i];
//if (d->HighlightAxes)
{
QFont font = painter.font();
font.setBold(d->HighlightAxis == (i + 1));
painter.setFont(font);
}
painter.drawText(rect, Qt::AlignCenter, d->AxesLabels[i]);
}
// Drawing the lines
for (int i = 0; i < 6; ++i)
{
//if (d->HighlightAxes)
{
QPen pen;
if (d->HighlightAxis == (i + 1)) // axes start at 1
{
pen.setWidth(3);
//pen.setColor(QColor(64, 64, 72)); // Payne's grey
pen.setColor(this->palette().color(QPalette::Active, QPalette::Highlight));
}
painter.setPen(pen);
}
painter.drawLine(center, positions[i]);
}
QSize sphereRadius((blankSize / 2) / 1.6180339887,
(blankSize / 2) / 1.6180339887);
// Draw the center sphere
QRadialGradient rg(QPointF(0.3333, 0.3333),0.7);
rg.setCoordinateMode(QGradient::ObjectBoundingMode);
if (//d->HighlightAxes &&
d->HighlightAxis == ctkAxesWidget::None)
{
rg.setColorAt(0., this->palette().color(QPalette::Active, QPalette::Highlight));
}
else
{
rg.setColorAt(0., this->palette().color(QPalette::Active, QPalette::Light));
}
rg.setColorAt(1., QColor(64, 64, 72));
painter.setBrush(QBrush(rg));
painter.setPen(QPen(Qt::NoPen));
painter.drawEllipse(QPointF(center), sphereRadius.width(), sphereRadius.height());
}
// ----------------------------------------------------------------------------------
bool ctkAxesWidget::setAxesLabels(const QStringList& labels)
{
Q_D(ctkAxesWidget);
if (labels.size() < 6)
{
qWarning("ctkAxesWidget::setAxesLabels() failed: At least 6 labels are expected.");
return false;
}
if (labels == d->AxesLabels)
{
return true;
}
d->AxesLabels = labels.mid(0, 6);
this->repaint();
return true;
}
// ----------------------------------------------------------------------------------
QStringList ctkAxesWidget::axesLabels() const
{
Q_D(const ctkAxesWidget);
return d->AxesLabels;
}
// ----------------------------------------------------------------------------------
void ctkAxesWidget::mousePressEvent(QMouseEvent *mouseEvent)
{
Q_D(ctkAxesWidget);
d->HighlightAxis = d->axisAtPos(mouseEvent->pos());
this->update();
}
// ----------------------------------------------------------------------------------
void ctkAxesWidget::mouseMoveEvent(QMouseEvent *mouseEvent)
{
Q_D(ctkAxesWidget);
d->HighlightAxis = d->axisAtPos(mouseEvent->pos());
this->update();
}
// ----------------------------------------------------------------------------------
void ctkAxesWidget::mouseReleaseEvent(QMouseEvent *mouseEvent)
{
Q_D(ctkAxesWidget);
this->setCurrentAxis(d->axisAtPos(mouseEvent->pos()));
}
// --------------------------------------------------------------------------
QSize ctkAxesWidget::minimumSizeHint()const
{
// Pretty arbitrary size.
return QSize(100, 100);
}
// --------------------------------------------------------------------------
QSize ctkAxesWidget::sizeHint()const
{
// Pretty arbitrary size
return QSize(100, 100);
}
//----------------------------------------------------------------------------
bool ctkAxesWidget::hasHeightForWidth()const
{
return true;
}
//----------------------------------------------------------------------------
int ctkAxesWidget::heightForWidth(int width)const
{
// Tends to be square
return width;
}
| 3,933 |
7,353 | <gh_stars>1000+
/**
* @file PasswordListener.h
* @author <NAME> <<EMAIL>>
*
* @section LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
*
* Object used to listen on a socket, accept clients and identify them
* based on a number they send.
*/
#ifndef BADVPN_CLIENT_PASSWORDLISTENER_H
#define BADVPN_CLIENT_PASSWORDLISTENER_H
#include <stdint.h>
#include <prio.h>
#include <cert.h>
#include <keyhi.h>
#include <misc/debug.h>
#include <misc/sslsocket.h>
#include <structure/LinkedList1.h>
#include <structure/BAVL.h>
#include <base/DebugObject.h>
#include <flow/SingleStreamReceiver.h>
#include <system/BConnection.h>
#include <nspr_support/BSSLConnection.h>
/**
* Handler function called when a client identifies itself with a password
* belonging to one of the password entries.
* The password entry is unregistered before the handler is called
* and must not be unregistered again.
*
* @param user as in {@link PasswordListener_AddEntry}
* @param sock structure containing a {@link BConnection} and, if TLS is enabled,
* the SSL socket with the bottom layer connected to the async interfaces
* of the {@link BConnection} object. The structure was allocated with
* malloc() and the user is responsible for freeing it.
*/
typedef void (*PasswordListener_handler_client) (void *user, sslsocket *sock);
struct PasswordListenerClient;
/**
* Object used to listen on a socket, accept clients and identify them
* based on a number they send.
*/
typedef struct {
BReactor *bsys;
BThreadWorkDispatcher *twd;
int ssl;
int ssl_flags;
PRFileDesc model_dprfd;
PRFileDesc *model_prfd;
struct PasswordListenerClient *clients_data;
LinkedList1 clients_free;
LinkedList1 clients_used;
BAVL passwords;
BListener listener;
DebugObject d_obj;
} PasswordListener;
typedef struct {
uint64_t password;
BAVLNode tree_node;
PasswordListener_handler_client handler_client;
void *user;
} PasswordListener_pwentry;
struct PasswordListenerClient {
PasswordListener *l;
LinkedList1Node list_node;
sslsocket *sock;
BSSLConnection sslcon;
SingleStreamReceiver receiver;
uint64_t recv_buffer;
};
/**
* Initializes the object.
*
* @param l the object
* @param bsys reactor we live in
* @param twd thread work dispatcher. May be NULL if ssl_flags does not request performing SSL
* operations in threads.
* @param listen_addr address to listen on. Must be supported according to {@link BConnection_AddressSupported}.
* @param max_clients maximum number of client to hold until they are identified.
* Must be >0.
* @param ssl whether to use TLS. Must be 1 or 0.
* @param ssl_flags flags passed down to {@link BSSLConnection_MakeBackend}. May be used to
* request performing SSL operations in threads.
* @param cert if using TLS, the server certificate
* @param key if using TLS, the private key
* @return 1 on success, 0 on failure
*/
int PasswordListener_Init (PasswordListener *l, BReactor *bsys, BThreadWorkDispatcher *twd, BAddr listen_addr, int max_clients, int ssl, int ssl_flags, CERTCertificate *cert, SECKEYPrivateKey *key) WARN_UNUSED;
/**
* Frees the object.
*
* @param l the object
*/
void PasswordListener_Free (PasswordListener *l);
/**
* Registers a password entry.
*
* @param l the object
* @param entry uninitialized entry structure
* @param handler_client handler function to call when a client identifies
* with the password which this function returns
* @param user value to pass to handler function
* @return password which a client should send to be recognized and
* dispatched to the handler function. Should be treated as a numeric
* value, which a client should as a little-endian 64-bit unsigned integer
* when it connects.
*/
uint64_t PasswordListener_AddEntry (PasswordListener *l, PasswordListener_pwentry *entry, PasswordListener_handler_client handler_client, void *user);
/**
* Unregisters a password entry.
* Note that when a client is dispatched, its entry is unregistered
* automatically and must not be unregistered again here.
*
* @param l the object
* @param entry entry to unregister
*/
void PasswordListener_RemoveEntry (PasswordListener *l, PasswordListener_pwentry *entry);
#endif
| 1,813 |
521 | #pragma once
#include <elle/reactor/Waitable.hh>
namespace elle
{
namespace reactor
{
/// Lockable is an abstract type derived from Waitable to provide a
/// synchronization mechanism.
///
/// Waiting on a unlocked Lockable will result on a noop. Threads waiting on
/// a locked Lockable will be awaken when the Lockable is released.
///
/// Class inheriting from Lockable must implement acquire and release.
/// - acquire must try to 'lock' the Lockable and return whether it was
/// successful of not.
/// - release must 'unlock' the Lockable and return whether the Lockable is
/// unlocked.
class Lockable
: public Waitable
{
public:
/// Try to acquire the lock, without blocking.
///
/// \returns Whether the acquisition was successful (Lockable wasn't
/// already lock).
virtual
bool
acquire() = 0;
/// Release the lock.
virtual
bool
release() = 0;
};
/// Lock is designed to manage Lockable automatically via RAII.
///
/// The calling Thread will wait until the Lock manages to acquire the
/// Lockable. Lock destructor will release the Lockable.
///
/// \code{.cc}
///
/// void
/// do_something(reactor::Lockable& lockable)
/// {
/// {
/// auto lock = Lock{lockable}; // Wait until Lockable is acquired.
/// // We are sure the Lockable is acquired.
/// // ...
/// } // Destructor of the Lock will release the Lockable.
/// }
///
/// \endcode
class Lock
{
public:
/// Take a Lockable and wait until it acquires it.
///
/// \param lockable The Lockable to acquire.
Lock(Lockable& lockable);
/// Release the Lockable.
~Lock();
private:
Lockable& _lockable;
};
}
}
| 683 |
712 | <reponame>vicasong/swagger-maven-plugin-with-proto-support<gh_stars>100-1000
package com.wordnik.springmvc;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
/**
* @author carlosjgp
*/
@RequestMapping
@Api
public class EmptyRootPathResource {
@ApiOperation(value = "testingEmptyRootPathResource")
@RequestMapping(value="/testingEmptyRootPathResource",method = RequestMethod.GET)
public ResponseEntity<String> testingEmptyRootPathResource() {
return new ResponseEntity<String>("testingEmptyRootPathResource", HttpStatus.OK);
}
}
| 254 |
357 | <gh_stars>100-1000
/*
* Copyright © 2018 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "includes.h"
static
VOID
PrintCurrentState(
VOID
);
static DWORD
LwCAParseArgs(
int argc,
char* argv[],
PBOOL pbEnableSysLog,
PBOOL pbConsoleLogging,
LWCA_LOG_LEVEL *pSyslogLevel
)
{
DWORD dwError = LWCA_SUCCESS;
int opt = 0;
LWCA_LOG_LEVEL syslogLevel = LWCA_LOG_LEVEL_INFO;
BOOL bEnableSysLog = FALSE;
BOOL bEnableConsoleLogging = FALSE;
while ( (opt = getopt( argc, argv, LWCA_OPTIONS_VALID)) != EOF )
{
switch ( opt )
{
case LWCA_OPTION_ENABLE_SYSLOG:
bEnableSysLog = TRUE;
break;
case LWCA_OPTION_CONSOLE_LOGGING:
bEnableConsoleLogging = TRUE;
break;
case LWCA_OPTION_LOGGING_LEVEL:
syslogLevel = atoi( optarg );
break;
default:
dwError = LWCA_ERROR_INVALID_PARAMETER;
BAIL_ON_LWCA_ERROR(dwError);
}
}
if (pbEnableSysLog != NULL)
{
*pbEnableSysLog = bEnableSysLog;
}
if (pbConsoleLogging)
{
*pbConsoleLogging = bEnableConsoleLogging;
}
if (pSyslogLevel)
{
*pSyslogLevel = syslogLevel;
}
error:
return dwError;
}
int
main(
int argc,
char* argv[]
)
{
DWORD dwError = 0;
const char* pszSmNotify = NULL;
int notifyFd = -1;
int notifyCode = 0;
int ret = -1;
LWCA_LOG_LEVEL syslogLevel = LWCA_LOG_LEVEL_INFO;
BOOL bEnableSysLog = FALSE;
BOOL bConsoleLogging = FALSE;
setlocale(LC_ALL, "");
LwCABlockSelectedSignals();
dwError = LwCAParseArgs(argc, argv, &bEnableSysLog, &bConsoleLogging, &syslogLevel);
BAIL_ON_LWCA_ERROR(dwError);
LwCALogSetLevel(syslogLevel);
if (bEnableSysLog)
{
gLwCALogType = LWCA_LOG_TYPE_SYSLOG;
}
else if (bConsoleLogging)
{
gLwCALogType = LWCA_LOG_TYPE_CONSOLE;
}
else
{
gLwCALogType = LWCA_LOG_TYPE_FILE;
}
dwError = LwCAInitialize(0, 0);
BAIL_ON_LWCA_ERROR(dwError);
// interact with likewise service manager (start/stop control)
if ((pszSmNotify = getenv("LIKEWISE_SM_NOTIFY")) != NULL)
{
notifyFd = atoi(pszSmNotify);
do
{
ret = write(notifyFd, ¬ifyCode, sizeof(notifyCode));
} while (ret != sizeof(notifyCode) && errno == EINTR);
if (ret < 0)
{
LWCA_LOG_ERROR("Could not notify service manager: %s (%i)",
strerror(errno),
errno);
dwError = LWCA_ERRNO_TO_LWCAERROR(errno);
BAIL_ON_LWCA_ERROR(dwError);
}
close(notifyFd);
}
LWCA_LOG_INFO("MutentCA Service started.");
PrintCurrentState();
// main thread waits on signals
dwError = LwCAHandleSignals();
BAIL_ON_LWCA_ERROR(dwError);
LWCA_LOG_INFO("MutentCA Service exiting...");
cleanup:
LwCAShutdown();
return (dwError);
error:
LWCA_LOG_ERROR("MutentCA exiting due to error [code:%d]", dwError);
goto cleanup;
}
static
VOID
PrintCurrentState(
VOID
)
{
printf("MutantCA Server Functional level is LWCA_FUNC_LEVEL_INITIAL\n");
}
| 1,774 |
960 | /*
* ESPRESSIF MIT License
*
* Copyright (c) 2020 <ESPRESSIF SYSTEMS (SHANGHAI) CO., LTD>
*
* Permission is hereby granted for use on all ESPRESSIF SYSTEMS products, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef AUDIO_PLAYER_HELPER_H
#define AUDIO_PLAYER_HELPER_H
#include "audio_player_type.h"
#include "audio_player.h"
#include "esp_audio_helper.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* @brief To provide a function that is setup esp_audio handle by default
*
* @param cfg A pointer to esp_audio_cfg_t
*
* @return
* esp_audio_handle_t : on succss
* NULL : other errors
*
*/
esp_audio_handle_t setup_esp_audio_instance(esp_audio_cfg_t *cfg);
/*
* @brief A default http player initialization function
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_http_player_init(void);
/*
* @brief A default sdcard player initialization function
*
* @param scan_path A pointer to path
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_sdcard_player_init(const char *scan_path);
/*
* @brief A default flash music player initialization function
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_flash_music_player_init(void);
/*
* @brief A default flash tone initialization function
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_flash_tone_player_init(void);
/*
* @brief A default a2dp player initialization function
*
* @param periph A pointer to a2dp peripheral handle
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_a2dp_player_init(void *periph);
/*
* @brief A default raw player initialization function
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t default_raw_player_init(void);
/*
* @brief Set raw element handle to audio player
*
* @param handle A pointer to raw stream instance
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t ap_helper_raw_handle_set(void *handle);
/*
* @brief Set a2dp stream handle to audio player
*
* @param el A pointer to a2dp stream instance
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t ap_helper_a2dp_handle_set(void *el);
/*
* @brief Feed data to RAW player
*
* @param buff A pointer to data buffer
* @param buffer_length size of data buffer
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_raw_feed_data(void *buff, int buffer_length);
/*
* @brief Set done to RAW player after feed finished
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_raw_feed_done();
/*
* @brief Waiting for raw play finished
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_raw_waiting_finished(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default play interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_play(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default stop interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_stop(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default pause interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_pause(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default resume interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_resume(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default next interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_next(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default prev interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_prev(ap_ops_attr_t *at, ap_ops_para_t *para);
/*
* @brief A default seek interface for audio player
*
* @param at A pointer to ap_ops_attr_t
* @param para A pointer to ap_ops_para_t
*
* @return
* ESP_ERR_AUDIO_NO_ERROR : on succss
* ESP_FAIL : other errors
*
*/
audio_err_t audio_player_helper_default_seek(ap_ops_attr_t *at, ap_ops_para_t *para);
#ifdef __cplusplus
}
#endif
#endif //
| 2,578 |
716 | // Copyright (c) 2020 The Orbit Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "AccessibleTrack.h"
#include <GteVector.h>
#include <algorithm>
#include "AccessibleTimeGraph.h"
#include "CoreMath.h"
#include "OrbitBase/Logging.h"
#include "Track.h"
#include "Viewport.h"
using orbit_accessibility::AccessibilityState;
using orbit_accessibility::AccessibleInterface;
namespace orbit_gl {
namespace {
// TODO (b/185854980): Remove the fake elements.
class FakeTimerPane : public CaptureViewElement {
public:
explicit FakeTimerPane(Track* track, const TimeGraphLayout* layout)
: CaptureViewElement(track, track->GetViewport(), layout), track_(track) {
SetWidth(track->GetWidth());
}
std::unique_ptr<AccessibleInterface> CreateAccessibleInterface() override {
return std::make_unique<AccessibleCaptureViewElement>(this, "Timers");
}
[[nodiscard]] Vec2 GetPos() const override {
// The element is positioned after the last visible child. We can safely assume there's always
// at least one child due to the track header.
CaptureViewElement* last_child = *track_->GetNonHiddenChildren().rbegin();
float pos_y = last_child->GetPos()[1] + last_child->GetHeight();
if (track_->GetNonHiddenChildren().size() == 1) {
// If there's only one child, the track only has timers and a header. In this case add the
// content margin
pos_y += layout_->GetTrackContentTopMargin();
} else {
// Otherwise, it's a thread track and we need to include the space between panes.
// This is really hacky and will go away once this class vanishes, see the TODO on top.
pos_y += layout_->GetSpaceBetweenThreadPanes();
}
Vec2 pos{track_->GetPos()[0], pos_y};
return pos;
}
[[nodiscard]] float GetHeight() const override {
float height = track_->GetHeight();
float track_header_height = GetPos()[1] - track_->GetPos()[1];
height -= track_header_height;
height -= layout_->GetTrackContentBottomMargin();
return height;
}
private:
Track* track_;
};
} // namespace
AccessibleTrack::AccessibleTrack(Track* track, const TimeGraphLayout* layout)
: AccessibleCaptureViewElement(track, track->GetName(),
orbit_accessibility::AccessibilityRole::Grouping),
track_(track),
fake_timers_pane_(std::make_unique<FakeTimerPane>(track, layout)) {}
int AccessibleTrack::AccessibleChildCount() const {
ORBIT_CHECK(track_ != nullptr);
// If any timers were rendered, report an additional element. The accessibility interface
// simulates a "FakeTimerPane" to group all the timers together.
// TODO (b/185854980): Remove the fake elements.
if (track_->GetVisiblePrimitiveCount() > 0) {
return static_cast<int>(track_->GetNonHiddenChildren().size()) + 1;
}
return static_cast<int>(track_->GetNonHiddenChildren().size());
}
// TODO (b/185854980): Remove the fake elements.
const AccessibleInterface* AccessibleTrack::AccessibleChild(int index) const {
ORBIT_CHECK(track_ != nullptr);
const auto& children = track_->GetNonHiddenChildren();
auto child_count = static_cast<int>(children.size());
// The last child is the timer pane if it has timers.
if (index == child_count && track_->GetVisiblePrimitiveCount() > 0) {
return fake_timers_pane_->GetOrCreateAccessibleInterface();
}
// Are we out of bounds?
if (index < 0 || index > child_count) {
return nullptr;
}
// Indexes between 0 and child_count are reserved for the actual children.
return children[index]->GetOrCreateAccessibleInterface();
}
AccessibilityState AccessibleTrack::AccessibleState() const {
ORBIT_CHECK(track_ != nullptr);
using State = AccessibilityState;
State result = State::Normal | State::Focusable | State::Movable;
if (track_->IsTrackSelected()) {
result |= State::Focused;
}
if (track_->IsCollapsible()) {
result |= State::Expandable;
if (track_->IsCollapsed()) {
result |= State::Collapsed;
} else {
result |= State::Expanded;
}
}
if (AccessibleRect().height == 0) {
result |= State::Offscreen;
}
return result;
}
} // namespace orbit_gl | 1,406 |
1,401 | import bpy
class SCN_PT_maps_models_importer(bpy.types.Panel):
"""Panel showing information about the Maps Models Importer context"""
bl_label = "Maps Models Context"
bl_idname = "SCN_PT_maps_models_importer"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.scene, "maps_models_importer_is_ref_matrix_valid")
row = layout.row()
row.prop(context.scene, "maps_models_importer_ref_matrix")
def register():
bpy.utils.register_class(SCN_PT_maps_models_importer)
def unregister():
bpy.utils.unregister_class(SCN_PT_maps_models_importer)
| 294 |
680 | <reponame>sherrycloudy/hdrnet
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "utils.h"
cv::Mat load_image(std::string input_path) {
cv::Mat image = cv::imread(input_path, CV_LOAD_IMAGE_COLOR);
cv::Mat image_rgb;
cv::cvtColor(image, image_rgb, CV_BGR2RGB, 3);
return image_rgb;
}
void load_binary_data(std::string filename, int length, float* output) {
std::ifstream file;
file.open(filename, std::ios::in);
if(!file) {
std::cout << "Failed to load file " << filename << std::endl;
throw;
}
file.read((char*) output, sizeof(float)*length);
file.close();
}
void shader_from_file(const std::string filename, GLuint& shader) {
std::ifstream file;
file.open(filename, std::ios::in);
if(!file) {
std::cout << "Failed to load file " << filename << std::endl;
throw;
}
std::string content;
file.seekg(0, std::ios::end);
content.reserve(file.tellg());
file.seekg(0, std::ios::beg);
content.assign(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>());
const GLchar* source = (const GLchar*) content.c_str();
glShaderSource(shader, 1, &source, NULL);
glCompileShader(shader);
GLint shader_success;
glGetShaderiv(shader, GL_COMPILE_STATUS, &shader_success);
if (shader_success == GL_FALSE) {
std::cout << "Failed to compile shader" << std::endl;
GLint logSize = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &logSize);
std::vector<GLchar> errorLog(logSize);
glGetShaderInfoLog(shader, logSize, &logSize, &errorLog[0]);
std::cout << errorLog.data() << std::endl;
glDeleteShader(shader);
throw;
}
}
| 777 |
348 | {"nom":"Mauzé-Thouarsais","circ":"3ème circonscription","dpt":"Deux-Sèvres","inscrits":1769,"abs":1007,"votants":762,"blancs":12,"nuls":7,"exp":743,"res":[{"nuance":"REM","nom":"<NAME>","voix":268},{"nuance":"FN","nom":"Mme <NAME>","voix":122},{"nuance":"FI","nom":"M. <NAME>","voix":118},{"nuance":"LR","nom":"Mme <NAME>","voix":105},{"nuance":"SOC","nom":"M. <NAME>","voix":58},{"nuance":"ECO","nom":"Mme <NAME>","voix":27},{"nuance":"DLF","nom":"Mme <NAME>","voix":26},{"nuance":"EXG","nom":"<NAME>","voix":10},{"nuance":"DIV","nom":"Mme <NAME>","voix":5},{"nuance":"EXD","nom":"M. <NAME>","voix":4}]} | 250 |
335 | <gh_stars>100-1000
{
"word": "Jol",
"definitions": [
"Set off; go.",
"Have a good time; celebrate in a lively way.",
"Engage in a flirtation or a casual love affair."
],
"parts-of-speech": "Verb"
} | 107 |
648 | <filename>spec/hl7.fhir.core/3.0.1/package/DataElement-ConceptMap.group.unmapped.code.json
{"resourceType":"DataElement","id":"ConceptMap.group.unmapped.code","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00"},"url":"http://hl7.org/fhir/DataElement/ConceptMap.group.unmapped.code","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"id":"ConceptMap.group.unmapped.code","path":"ConceptMap.group.unmapped.code","short":"Fixed code when mode = fixed","definition":"The fixed code to use when the mode = 'fixed' - all unmapped codes are mapped to a single fixed code.","min":0,"max":"1","type":[{"code":"code"}]}]} | 210 |
362 | from chromatica import logger
from chromatica.util import load_external_module
import chromatica.util as util
load_external_module(__file__, "")
from clang import cindex
import os
import re
log = logger.logging.getLogger("chromatica.compile_args")
DEFAULT_STD={"c" : ["-std=c11"], \
"cpp" : ["-std=c++14"]}
SOURCE_EXTS=[".c", ".cc", ".cpp", ".cxx"]
compile_args_files = ['.color_coded', '.clang', 'compile_flags.txt', '.cquery', '.ccls', '.chromatica', 'compile_commands.json']
def set_default_std(stds):
DEFAULT_STD = stds
return True
class CompileArgsDatabase(object):
def __init__(self, global_path, global_args=None):
if global_path:
self.__global_path = global_path
self.__paths = []
self.__args_file = None
self.__cdb_file = None
self.cdb = None
self.global_args = global_args
self.compile_args = []
self.ft_compile_args = { "c" : [], "cpp" : [] , "objc" : [], "objcpp" : [] }
self.find_per_project_file()
if len(self.__paths) > 0:
self.__args_file = self.__paths[0]
self.parse_args_file()
def find_per_project_file(self):
search_path = os.getcwd()
found_project_root = False
while not found_project_root and os.path.dirname(search_path) != search_path:
for args_file in compile_args_files:
args_file_path = os.path.join(search_path, args_file)
if os.path.exists(args_file_path):
self.__paths.append(args_file_path)
found_project_root = True
search_path = os.path.dirname(search_path)
def parse_args_file(self):
if not self.__args_file:
return
filename = os.path.basename(self.__args_file)
if filename == ".chromatica":
self.parse_chromatica_file()
elif filename == ".color_coded" or filename == ".clang" or filename == "compile_flags.txt":
self.parse_simple_file()
elif filename == ".cquery":
self.parse_ccls_file()
elif filename == ".ccls":
self.parse_ccls_file()
elif filename == ".ycm_extra_flags":
self.parse_ycm_file()
else:
self.init_cdb(os.path.dirname(self.__args_file))
def parse_simple_file(self):
if self.__args_file == None:
return
fp = open(self.__args_file, "r")
lines = fp.readlines()
fp.close()
for line in lines:
if len(line) == 0:
continue
self.compile_args.extend(line.strip().split())
def parse_chromatica_file(self):
assert len(self.compile_args) == 0
if self.__args_file == None:
return
fp = open(self.__args_file, "r")
lines = fp.readlines()
fp.close()
funcs = {"flags" : lambda s, value: s.compile_args.extend(value.split()),
"c" : lambda s, value: s.ft_compile_args["c"].extend(value.split()),
"cpp" : lambda s, value: s.ft_compile_args["cpp"].extend(value.split()),
"compilation_database" : lambda s, value: s.init_cdb(value), }
for line in lines:
if len(line) == 0 :
continue
line = line.strip()
if line.startswith("#"):
continue
pos = line.find("=")
if pos != -1:
key = line[:pos]
try:
funcs[key](self, line[pos+1:])
except:
log.error("Invalid configuration key: ", key)
else:
self.compile_args.extend(line.split())
def parse_ccls_file(self):
if self.__args_file == None:
return
fp = open(self.__args_file, "r")
lines = fp.readlines()
fp.close()
for line in lines:
if len(line) == 0 :
continue
line = line.strip()
if line.startswith("clang"):
continue
if line.startswith("#"):
continue
elif line.startswith("%"):
keys = [key for key in line.split(" ") if len(key) > 0]
for key in keys:
if key == "%compile_commands.json":
self.init_cdb("compile_commands.json")
elif key == "%c" or key == "%h":
self.ft_compile_args["c"].append(keys[-1])
elif key == "%cpp" or key == "%hpp":
self.ft_compile_args["c"].append(keys[-1])
elif key == "%objective-c":
self.ft_compile_args["objc"].append(keys[-1])
elif key == "%objective-cpp":
self.ft_compile_args["objcpp"].append(keys[-1])
else:
self.compile_args.append(line)
def init_cdb(self, value):
log.info("cdb: %s" % value)
cdb_rel_path = value.strip("\"")
if os.path.isabs(cdb_rel_path):
cdb_path = cdb_rel_path
else:
cdb_path = os.path.join(os.path.dirname(self.__args_file), cdb_rel_path)
if cdb_path and os.path.isdir(cdb_path):
self.__cdb_path = cdb_path
# self.try_init_cdb()
try:
self.cdb = cindex.CompilationDatabase.fromDirectory(self.__cdb_path)
except:
log.error("Invalid compilation database file %s" % self.__cdb_path)
self.__cdb_path = None
def get_cdb_args(self, filename):
res = []
ret = self.cdb.getCompileCommands(filename)
_basename = os.path.basename(filename)
log.info("Read cdb for: %s" % filename)
if ret:
for cmds in ret:
cwd = cmds.directory
skip = 0
last = ''
for arg in cmds.arguments:
if skip and (len(arg) == 0 or arg[0] != "-"):
skip = 0
continue
if arg == "-o" or arg == "-c":
skip = 1
continue
if arg != '-I' and arg.startswith('-I'):
include_path = arg[2:]
if not os.path.isabs(include_path):
include_path = os.path.normpath(
os.path.join(cwd, include_path))
res.append('-I' + include_path)
if arg != '-isystem' and arg.startswith('-isystem'):
include_path = arg[8:]
if not os.path.isabs(include_path):
include_path = os.path.normpath(
os.path.join(cwd, include_path))
res.append('-isystem' + include_path)
if _basename in arg:
continue;
else:
# if last added switch was standalone include then we need to append path to it
if last == '-I' or last == '-isystem':
include_path = arg
if not os.path.isabs(include_path):
include_path = os.path.normpath(os.path.join(cwd, include_path))
res[len(res) - 1] += include_path
last = ''
else:
res.append(arg)
last = arg
else:
print("Cannot find compile flags for %s in compilation database" % filename)
return res
def get_args_filename(self, filename, search_source_args=False):
ret = []
if self.global_args != None:
ret = self.global_args
ret += self.compile_args
if self.cdb != None:
cdb_ret = self.get_cdb_args(filename)
if not cdb_ret and search_source_args:
filename_base = os.path.splitext(filename)[0]
for source_ext in SOURCE_EXTS:
cdb_ret = self.get_cdb_args(filename_base + source_ext)
if cdb_ret:
break
ret += cdb_ret
return ret
def get_args_filename_ft(self, filename, filetype, search_source_args=False):
ret = self.get_args_filename(filename, search_source_args)
return ret + self.ft_compile_args[filetype]
def get_available_args_files(self):
return self.__paths
@property
def args_file(self):
return self.__args_file
| 4,759 |
656 | <reponame>librg/librg
/* this file is usually used to manually test some stuff */
#include <assert.h>
// #include <stdlib.h>
// #include <stdio.h>
// void *myalloc(size_t size) {
// void *ptr = malloc(size);
// printf("allocating mem[%zd]: 0x%llx\n", size, (uint64_t)ptr);
// return ptr;
// }
// void myfree(void *ptr) {
// printf("freeing mem: 0x%llx\n", (uint64_t)ptr);
// free(ptr);
// return;
// }
// #define LIBRG_MEM_ALLOC(x) myalloc(x)
// #define LIBRG_MEM_FREE(x) myfree(x)
#define LIBRG_IMPL
#define LIBRG_DEBUG
// #define LIBRG_WORLDWRITE_MAXQUERY 360
#include "librg.h"
// TODO: add librg_enet code
// TODO: add behavior support
/* impl part*/
int32_t _parent_create(librg_world *world, librg_event *event) {
zpl_unused(world);
zpl_unused(event);
// printf("_parent_create %p %d\n", world, event->type);
return 0;
}
int32_t _child_create(librg_world *world, librg_event *event) {
zpl_unused(world);
zpl_unused(event);
// printf("_child_create %p %d\n", world, event->type);
return 0;
}
int main() {
printf("version %d\n", librg_version());
librg_world *world = librg_world_create();
assert(librg_world_valid(world));
librg_config_chunksize_set(world, 16, 16, 16);
librg_config_chunkamount_set(world, 16, 16, 16);
librg_config_chunkoffset_set(world, LIBRG_OFFSET_MID, LIBRG_OFFSET_MID, LIBRG_OFFSET_MID);
librg_event_set(world, LIBRG_WRITE_CREATE, _parent_create);
librg_event_set(world, LIBRG_READ_CREATE, _child_create);
const int myId = 24;
const int observerRadius = 1;
const librg_chunk chunkId = librg_chunk_from_chunkpos(world, 0, 0, 0);
librg_entity_track(world, myId);
zpl_printf("setting chunk to: %lld\n", chunkId);
librg_entity_chunk_set(world, myId, chunkId);
librg_entity_owner_set(world, myId, 1);
librg_entity_radius_set(world, myId, observerRadius);
const int totalEnts = 40000;
for (int i=0;i<totalEnts;i++) {
if (librg_entity_track(world, i) == LIBRG_OK) {
librg_entity_chunk_set(world, i, librg_chunk_from_chunkpos(world, -3+(i%6), -2+(i%4), -1+(i%2)));
}
}
zpl_printf("> querying...\n");
#define RESSIZE 4096
int64_t results[RESSIZE] = {0};
#define BUFSIZE 10000
char buffer[BUFSIZE] = {0};
zpl_f64 tstart = zpl_time_rel();
size_t amount = RESSIZE;
librg_world_query(world, 1, results, &amount);
zpl_printf("query found %d results of %d in (%.3f ms)\n", amount, totalEnts, zpl_time_rel() - tstart);
// for (int i=0; i<amount; i++) zpl_printf("result #%d: %lld\n", i, results[i]);
zpl_printf("> encoding...\n");
tstart = zpl_time_rel();
size_t buffer_size = 10000;
int32_t result = librg_world_write(world, 1, buffer, &buffer_size, NULL);
if (result > 0) {
printf("AAA, you didnt have enough space to write stuff in your buffer mister\n");
}
zpl_printf("written %zu bytes in (%.3f ms)\n", buffer_size, zpl_time_rel() - tstart);
librg_world *w2 = librg_world_create();
librg_event_set(w2, LIBRG_WRITE_CREATE, _parent_create);
librg_event_set(w2, LIBRG_READ_CREATE, _child_create);
tstart = zpl_time_rel();
int r = librg_world_read(w2, 1, buffer, buffer_size, NULL);
zpl_printf("read %zu bytes, result: %d, entities: %d in (%.3f ms)\n", buffer_size, r, librg_entity_count(w2), zpl_time_rel() - tstart);
librg_entity_untrack(world, myId);
librg_world_destroy(world);
librg_world_destroy(w2);
return 0;
}
| 1,577 |
322 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.query.aggregate.test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.eagle.query.aggregate.timeseries.TimeSeriesAggregator;
import org.apache.eagle.query.aggregate.AggregateFunctionType;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.eagle.log.entity.test.TestEntity;
public class TestTimeSeriesAggregator {
private static final Logger LOG = LoggerFactory.getLogger(TestFlatAggregator.class);
@SuppressWarnings("serial")
private TestEntity createEntity(final String cluster, final String datacenter, final String rack, int numHosts, long numClusters, long timestamp) {
TestEntity entity = new TestEntity();
Map<String, String> tags = new HashMap<String, String>() {{
put("cluster", cluster);
put("datacenter", datacenter);
put("rack", rack);
}};
entity.setTags(tags);
entity.setNumHosts(numHosts);
entity.setNumClusters(numClusters);
entity.setTimestamp(timestamp);
return entity;
}
@Test
public void testTimeSeriesAggregator() {
TestEntity[] entities = new TestEntity[8];
entities[0] = createEntity("cluster1", "dc1", "rack123", 12, 2, 1386120000*1000); // bucket 0
entities[1] = createEntity("cluster1", "dc1", "rack123", 20, 1, 1386121060*1000); // bucket 17
entities[2] = createEntity("cluster1", "dc1", "rack128", 10, 0, 1386121070*1000); // bucket 17
entities[3] = createEntity("cluster2", "dc1", "rack125", 9, 2, 1386122122*1000); // bucket 35
entities[4] = createEntity("cluster2", "dc1", "rack126", 15, 5, 1386123210*1000); // bucket 53
entities[5] = createEntity("cluster2", "dc1", "rack234", 25, 1, 1386123480*1000); // bucket 58
entities[6] = createEntity("cluster2", "dc1", "rack234", 12, 0, 1386123481*1000); // bucket 58
entities[7] = createEntity("cluster1", "dc1", "rack123", 3, 2, 1386123482*1000); // bucket 58
TimeSeriesAggregator tsAgg = new TimeSeriesAggregator(Arrays.asList("cluster"), Arrays.asList(AggregateFunctionType.sum), Arrays.asList("numHosts"),
1386120000*1000, 1386123600*1000, 60*1000);
try {
for (TestEntity e : entities) {
tsAgg.accumulate(e);
}
Map<List<String>, List<Double>> result = tsAgg.result();
Assert.assertEquals(result.size(), 6);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "0")).get(0), (double)(entities[0].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "17")).get(0), (double)(entities[1].getNumHosts()+entities[2].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "35")).get(0), (double)(entities[3].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "53")).get(0), (double)(entities[4].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "58")).get(0), (double)(entities[5].getNumHosts()+entities[6].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "58")).get(0), (double)(entities[7].getNumHosts()), 0.001);
Map<List<String>, List<double[]>> tsResult = tsAgg.getMetric();
Assert.assertEquals(tsResult.size(), 2);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0).length, 60);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[0], (double)(entities[0].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[17], (double)(entities[1].getNumHosts()+entities[2].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[35], (double)(entities[3].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[53], (double)(entities[4].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[58], (double)(entities[5].getNumHosts()+entities[6].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[58], (double)(entities[7].getNumHosts()), 0.001);
} catch (Exception ex) {
LOG.error("Can not aggregate", ex);
Assert.fail("Can not aggregate");
}
tsAgg = new TimeSeriesAggregator(new ArrayList<String>(), Arrays.asList(AggregateFunctionType.sum), Arrays.asList("numHosts"),
1386120000*1000, 1386123600*1000, 60*1000);
try {
for (TestEntity e : entities) {
tsAgg.accumulate(e);
}
Map<List<String>, List<Double>> result = tsAgg.result();
Assert.assertEquals(result.size(), 5);
Assert.assertEquals(result.get(Arrays.asList("0")).get(0), (double)(entities[0].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("17")).get(0), (double)(entities[1].getNumHosts()+entities[2].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("35")).get(0), (double)(entities[3].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("53")).get(0), (double)(entities[4].getNumHosts()), 0.001);
Assert.assertEquals(result.get(Arrays.asList("58")).get(0), (double)(entities[5].getNumHosts()+entities[6].getNumHosts()+entities[7].getNumHosts()), 0.001);
Map<List<String>, List<double[]>> tsResult = tsAgg.getMetric();
Assert.assertEquals(tsResult.size(), 1);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0).length, 60);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[0], (double)(entities[0].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[17], (double)(entities[1].getNumHosts()+entities[2].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[35], (double)(entities[3].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[53], (double)(entities[4].getNumHosts()), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[58], (double)(entities[5].getNumHosts()+entities[6].getNumHosts()+entities[7].getNumHosts()), 0.001);
} catch (Exception ex) {
LOG.error("Can not aggregate", ex);
Assert.fail("Can not aggregate");
}
tsAgg = new TimeSeriesAggregator(Arrays.asList("cluster"), Arrays.asList(AggregateFunctionType.count), Arrays.asList("*"),
1386120000*1000, 1386123600*1000, 60*1000);
try {
for (TestEntity e : entities) {
tsAgg.accumulate(e);
}
Map<List<String>, List<Double>> result = tsAgg.result();
Assert.assertEquals(result.size(), 6);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "0")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "17")).get(0), (double)(2), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "35")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "53")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster2", "58")).get(0), (double)(2), 0.001);
Assert.assertEquals(result.get(Arrays.asList("cluster1", "58")).get(0), (double)(1), 0.001);
Map<List<String>, List<double[]>> tsResult = tsAgg.getMetric();
Assert.assertEquals(tsResult.size(), 2);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0).length, 60);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[0], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[17], (double)(2), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[35], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[53], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster2")).get(0)[58], (double)(2), 0.001);
Assert.assertEquals(tsResult.get(Arrays.asList("cluster1")).get(0)[58], (double)(1), 0.001);
} catch (Exception ex) {
LOG.error("Can not aggregate", ex);
Assert.fail("Can not aggregate");
}
tsAgg = new TimeSeriesAggregator(new ArrayList<String>(), Arrays.asList(AggregateFunctionType.count), Arrays.asList("*"),
1386120000*1000, 1386123600*1000, 60*1000);
try {
for (TestEntity e : entities) {
tsAgg.accumulate(e);
}
Map<List<String>, List<Double>> result = tsAgg.result();
Assert.assertEquals(result.size(), 5);
Assert.assertEquals(result.get(Arrays.asList("0")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("17")).get(0), (double)(2), 0.001);
Assert.assertEquals(result.get(Arrays.asList("35")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("53")).get(0), (double)(1), 0.001);
Assert.assertEquals(result.get(Arrays.asList("58")).get(0), (double)(3), 0.001);
Map<List<String>, List<double[]>> tsResult = tsAgg.getMetric();
Assert.assertEquals(tsResult.size(), 1);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0).length, 60);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[0], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[17], (double)(2), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[35], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[53], (double)(1), 0.001);
Assert.assertEquals(tsResult.get(new ArrayList<String>()).get(0)[58], (double)(3), 0.001);
} catch (Exception ex) {
LOG.error("Can not aggregate", ex);
Assert.fail("Can not aggregate");
}
}
}
| 5,088 |
412 | <reponame>tobireinhard/cbmc
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
bool validate(const char *data, unsigned allocated)
{
// clang-format off
bool check_1 = (data == NULL ==> allocated == 0);
bool check_2 = (allocated != 0 ==> __CPROVER_r_ok(data, allocated));
// clang-format on
return check_1 && check_2;
}
void main()
{
char *data;
unsigned allocated;
data = (allocated == 0) ? NULL : malloc(allocated);
__CPROVER_assume(validate(data, allocated));
assert(validate(data, allocated));
}
| 197 |
1,185 | /*
* Copyright (c) 2018, Nordic Semiconductor
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package no.nordicsemi.android.ble.common.profile.cgm;
import android.bluetooth.BluetoothDevice;
import androidx.annotation.NonNull;
import java.util.Calendar;
import no.nordicsemi.android.ble.data.Data;
public interface CGMSessionStartTimeCallback {
/**
* Callback called whenever the CGM Session Start Time characteristic was read.
* <p>
* If the E2E CRC field was present in the CGM packet, the data has been verified against it.
* If CRC check has failed, the
* {@link #onContinuousGlucoseMonitorSessionStartTimeReceivedWithCrcError(BluetoothDevice, Data)}
* will be called instead.
*
* @param device the target device.
* @param calendar the date and time received, as {@link Calendar} object.
* Time zone and DST offset are included in the calendar.
* @param secured true if the packet was sent with E2E-CRC value that was verified to
* match the packet, false if the packet didn't contain CRC field.
* For a callback in case of invalid CRC value check
* {@link #onContinuousGlucoseMonitorSessionStartTimeReceivedWithCrcError(BluetoothDevice, Data)}.
*/
void onContinuousGlucoseMonitorSessionStartTimeReceived(
@NonNull final BluetoothDevice device,
@NonNull final Calendar calendar,
final boolean secured);
/**
* Callback called when a CGM Session Start Time packet with E2E field was received but the
* CRC check has failed.
*
* @param device the target device.
* @param data the CGM Session Start Time packet data that was received, including the
* CRC field.
*/
default void onContinuousGlucoseMonitorSessionStartTimeReceivedWithCrcError(
@NonNull final BluetoothDevice device,
@NonNull final Data data) {
// ignore
}
}
| 990 |
327 | <gh_stars>100-1000
from sublime_plugin import WindowCommand
from ..libraries.quick_menu import QuickMenu
class DeviotLibraryExamplesCommand(WindowCommand):
def run(self):
QuickMenu().quick_libraries()
| 66 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package ifc.accessibility;
import lib.MultiMethodTest;
import util.ValueComparer;
import com.sun.star.accessibility.XAccessibleEditableText;
import com.sun.star.beans.PropertyValue;
/**
* Testing <code>com.sun.star.accessibility.XAccessibleEditableText</code>
* interface methods :
* <ul>
* <li><code> cutText()</code></li>
* <li><code> pasteText()</code></li>
* <li><code> deleteText()</code></li>
* <li><code> insertText()</code></li>
* <li><code> replaceText()</code></li>
* <li><code> setAttributes()</code></li>
* <li><code> setText()</code></li>
* </ul> <p>
*
* This test needs the following object relations :
* <ul>
* <li> <code>'XAccessibleEditableText.hasAttr'</code>
* (of type <code>Boolean</code>):
* Indicates whether or not the text has changeable attributes.
* E.g. text within writer document have attributes which can
* be changed, while the text within edit field has fixed
* attributes. <p>
* If the relation is <code>false</code> then the component
* has fixed text attributes. </li>
* </ul> <p>
*
* @see com.sun.star.accessibility.XAccessibleEditableText
*/
public class _XAccessibleEditableText extends MultiMethodTest {
public XAccessibleEditableText oObj = null;
String pasteText = null;
String initialText = "";
/**
* Indicates whether or not the text has changeable attributes.
* E.g. text within writer document have attributes which can
* be changed, while the text within edit field has fixed
* attributes.
*/
private boolean changeableAttr = true;
/**
* Retrieves object relation. Stores initial component text
* for restoding it in <code>after</code>.
*/
protected void before() {
Boolean b = (Boolean)
tEnv.getObjRelation("XAccessibleEditableText.hasAttr");
if (b != null) {
changeableAttr = b.booleanValue();
}
initialText = oObj.getText();
}
/**
* Calls the method with the wrong indexes and with the correct indexes.
* Stores cutted text in the variable <code>pasteText</code>.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes.
*/
public void _cutText() {
boolean res = true;
boolean locRes = true;
String curText = null;
String oldText = oObj.getText();
log.println("Text: '" + oldText + "'");
int length = oObj.getCharacterCount();
log.println("Character count: " + length);
try {
log.print("cutText(-1," + (length-1) + "): ");
locRes = oObj.cutText(-1, length - 1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(oldText);
}
try {
log.print("cutText(0," + (length+1) + "): ");
locRes = oObj.cutText(0, length + 1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(oldText);
}
try {
pasteText = oldText;
log.print("cutText(0," + length + "): ");
locRes = oObj.cutText(0, length);
log.println(locRes);
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.length() == 0 && locRes;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception");
e.printStackTrace(log);
res &= false;
}
tRes.tested("cutText()", res);
}
/**
* Calls the method with the wrong indexes and with the correct indexes.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes and if cutted text was
* pasted.
* The following method tests are to be executed before:
* <ul>
* <li> <code>cutText()</code> </li>
* </ul>
*/
public void _pasteText() {
requiredMethod("cutText()");
boolean res = true;
boolean locRes = true;
String curText = null;
String text = oObj.getText();
log.println("Text: '" + text + "'");
int length = oObj.getCharacterCount();
log.println("Character count: " + length);
try {
log.print("pasteText(-1): ");
locRes = oObj.pasteText(-1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("pasteText(" + (length+1) + "): ");
locRes = oObj.pasteText(length + 1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("pasteText(" + (length) + "): ");
locRes = oObj.pasteText(length);
log.println(locRes);
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text + pasteText) && locRes;
log.println("Expected text: '" + text + pasteText + "'");
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception");
e.printStackTrace(log);
res &= false;
}
tRes.tested("pasteText()", res);
}
/**
* Calls the method with the wrong indexes and with the correct indexes,
* checks text after method call.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes and if deleted string
* was really deleted from the text.
* The following method tests are to be executed before:
* <ul>
* <li> <code>insertText()</code> </li>
* </ul>
*/
public void _deleteText() {
executeMethod("insertText()");
boolean res = true;
boolean locRes = true;
String curText = null;
String text = oObj.getText();
log.println("Text: '" + text + "'");
int length = oObj.getCharacterCount();
log.println("Character count: " + length);
try {
log.print("deleteText(-1," + length + "): ");
locRes = oObj.deleteText(-1, length);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("deleteText(0," + (length+1) + "): ");
locRes = oObj.deleteText(0, length + 1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
if (length >= 1) {
log.print("deleteText(" + (length-1) + "," + (length) + "): ");
locRes = oObj.deleteText(length - 1, length);
log.println(locRes);
String expStr = expStr = text.substring(0, length - 1);
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(expStr);
log.println("Expected text: '" + expStr + "'");
}
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception");
e.printStackTrace(log);
res &= false;
}
tRes.tested("deleteText()", res);
}
/**
* Calls the method with the wrong indexes and with the correct indexes,
* checks text after method call.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes and if inserted string
* was really inserted into the text.
* The following method tests are to be executed before:
* <ul>
* <li> <code>pasteText()</code> </li>
* </ul>
*/
public void _insertText() {
executeMethod("pasteText()");
boolean res = true;
boolean locRes = true;
String curText = null;
String text = oObj.getText();
log.println("Text: '" + text + "'");
int length = oObj.getCharacterCount();
log.println("Character count: " + length);
final String insStr = "Inserted string";
try {
log.print("insertText(insStr, -1): ");
locRes = oObj.insertText(insStr, -1);
log.println(locRes);
log.println("exception was expected=> FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("insertText(insStr," + (length+1) + "): ");
locRes = oObj.insertText(insStr, length+1);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("insertText(insStr," + length + "): ");
locRes = oObj.insertText(insStr, length);
log.println(locRes);
curText = oObj.getText();
res &= curText.equals(text + insStr);
log.println("Current text: '" + curText + "'");
log.println("Expected text: '" + text + insStr + "'");
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception => FAILED");
e.printStackTrace(log);
res &= false;
}
tRes.tested("insertText()", res);
}
/**
* Calls the method with the wrong indexes and with the correct indexes,
* checks text after method call.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes and if part of text
* was really replaced by the specified replacement string.
* The following method tests are to be executed before:
* <ul>
* <li> <code>deleteText()</code> </li>
* </ul>
*/
public void _replaceText() {
executeMethod("deleteText()");
boolean res = true;
boolean locRes = true;
String curText = null;
final String sReplacement = "String for replace";
String oldText = oObj.getText();
int startIndx = oldText.length();
oObj.setText(oldText + " part of string for replace");
String text = oObj.getText();
log.println("Text: '" + text + "'");
int length = oObj.getCharacterCount();
log.println("Character count: " + length);
try {
log.print("replaceText(-1," + length + "): ");
locRes = oObj.replaceText(-1, length, sReplacement);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("replaceText(0," + (length+1) + "): ");
locRes = oObj.replaceText(0, length + 1, sReplacement);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
res &= curText.equals(text);
}
try {
log.print("replaceText(" + startIndx + "," + length + "): ");
locRes = oObj.replaceText(startIndx, length, sReplacement);
log.println(locRes);
curText = oObj.getText();
log.println("Current text: '" + curText + "'");
log.println("Expected text: '" + oldText + sReplacement + "'");
res &= curText.equals(oldText + sReplacement);
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception");
e.printStackTrace(log);
res &= false;
}
tRes.tested("replaceText()", res);
}
/**
* Calls the method with the wrong indexes and with the correct indexes,
* checks attributes after method call.
* Has OK status if exceptions were thrown for the wrong indexes,
* if exception wasn't thrown for the correct indexes and if attributes
* of text was changed.
* The following method tests are to be executed before:
* <ul>
* <li> <code>replaceText()</code> </li>
* </ul>
*/
public void _setAttributes() {
executeMethod("replaceText()");
boolean res = true;
boolean locRes = true;
String text = oObj.getText();
log.println("Text: '" + text + "'");
int length = oObj.getCharacterCount();
log.println("Length: " + length);
PropertyValue[] attrs = null;
try {
attrs = oObj.getCharacterAttributes(0, new String[]{""});
log.print("setAttributes(-1," + (length - 1) + "):");
locRes = oObj.setAttributes(-1, length - 1, attrs);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
res &= true;
}
try {
log.print("setAttributes(0," + (length+1) + "):");
locRes = oObj.setAttributes(0, length + 1, attrs);
log.println(locRes);
log.println("exception was expected => FAILED");
res &= false;
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("expected exception => OK");
res &= true;
}
//change old attributes set
for(int i = 0; i < attrs.length; i++) {
if (attrs[i].Name.equals("CharColor")) {
attrs[i].Value = new Integer(-2);
}
}
try {
log.print("setAttributes(0," + length + "):");
locRes = oObj.setAttributes(0, length, attrs);
log.println(locRes);
res &= (changeableAttr && locRes)
|| (!changeableAttr && !locRes);
if (changeableAttr) {
log.print("checking that new attributes was set...");
PropertyValue[] newAttrs = oObj.getCharacterAttributes(0, new String[]{""});
locRes = ValueComparer.equalValue(attrs, newAttrs);
log.println(locRes);
res &= locRes;
} else {
log.println("Text attributes can't be changed.");
}
} catch(com.sun.star.lang.IndexOutOfBoundsException e) {
log.println("unexpected exception => FAILED");
e.printStackTrace(log);
res &= false;
}
tRes.tested("setAttributes()", res);
}
/**
* Calls the method with different parameters and checks text.
*/
public void _setText() {
executeMethod("setAttributes()");
boolean res = true;
boolean locRes = true;
String oldText = oObj.getText();
log.println("Current text: '" + oldText + "'");
String newText = "New text";
log.print("setText('" + newText + "'): ");
locRes = oObj.setText(newText);
log.println(locRes);
String newCurText = oObj.getText();
log.println("getText(): '" + newCurText + "'");
res &= locRes && newCurText.equals(newText);
newText = "";
log.print("setText('" + newText + "'): ");
locRes = oObj.setText(newText);
log.println(locRes);
newCurText = oObj.getText();
log.println("getText(): '" + newCurText + "'");
res &= locRes && newCurText.equals(newText);
log.print("setText('" + oldText + "'): ");
locRes = oObj.setText(oldText);
log.println(locRes);
newCurText = oObj.getText();
log.println("getText(): '" + newCurText + "'");
res &= locRes && newCurText.equals(oldText);
tRes.tested("setText()", res);
}
/**
* Restores initial component text.
*/
protected void after() {
oObj.setText(initialText);
}
} | 8,510 |
1,338 | /*
* Copyright 2013 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* <NAME>, <EMAIL>
*/
#ifndef KERNEL_UTIL_RANDOM_H
#define KERNEL_UTIL_RANDOM_H
#include <smp.h>
#include <SupportDefs.h>
#define MAX_FAST_RANDOM_VALUE 0x7fff
#define MAX_RANDOM_VALUE 0x7fffffffu
#define MAX_SECURE_RANDOM_VALUE 0xffffffffu
static const int kFastRandomShift = 15;
static const int kRandomShift = 31;
static const int kSecureRandomShift = 32;
#ifdef __cplusplus
extern "C" {
#endif
unsigned int fast_random_value(void);
unsigned int random_value(void);
unsigned int secure_random_value(void);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
template<typename T>
T
fast_get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)fast_random_value() << shift;
shift += kFastRandomShift;
}
return random;
}
template<typename T>
T
get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)random_value() << shift;
shift += kRandomShift;
}
return random;
}
template<typename T>
T
secure_get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)secure_random_value() << shift;
shift += kSecureRandomShift;
}
return random;
}
#endif // __cplusplus
#endif // KERNEL_UTIL_RANDOM_H
| 564 |
1,108 | <gh_stars>1000+
import pytest
import jiant.utils.python.functional as py_functional
def test_indexer():
assert py_functional.indexer(1)({1: 2}) == 2
with pytest.raises(KeyError):
py_functional.indexer("1")({1: 2})
| 92 |
1,085 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.service.grpc;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Maps;
/**
* Used to construct service configuration.
* Follows the configuration mentioned@ https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
*/
public class DefaultGrpcServiceConfigProvider {
/**
* Gets the default service configuration for given list of service names
* @param serviceNames
* @return
*/
public static Map<String, Object> getDefaultGrpcServiceConfig(List<String> serviceNames) {
return setGrpcServiceConfig(serviceNames, getDefaultRetryProperties());
}
/**
* Gets the service configuration updated with given properties for given list of service names
* @param serviceNames
* @param retryPropertiesMap
* @return
*/
public static Map<String, Object> getGrpcServiceConfig(List<String> serviceNames, Map<String, Object> retryPropertiesMap) {
return setGrpcServiceConfig(serviceNames, retryPropertiesMap);
}
/**
* Sets the given service configuration for given list of service names
* @param serviceNames
* @param retryPropertiesMap
* @return
*/
private static Map<String, Object> setGrpcServiceConfig(List<String> serviceNames, Map<String, Object> retryPropertiesMap) {
Map<String, Object> serviceConfig = Maps.newHashMap();
List<Map<String, Object>> serviceConfigs = new ArrayList<>();
for (String serviceName : serviceNames) {
Map<String, Object> methodConfig = new HashMap<>();
Map<String, Object> name = new HashMap<>();
name.put("service", serviceName);
methodConfig.put("name", Collections.<Object>singletonList(name));
methodConfig.put("retryPolicy", retryPropertiesMap);
serviceConfigs.add(methodConfig);
}
serviceConfig.put("methodConfig", serviceConfigs);
return serviceConfig;
}
private static Map<String, Object> getDefaultRetryProperties() {
Map<String, Object> retryPolicy = new HashMap<>();
retryPolicy.put("maxAttempts", 10D);
retryPolicy.put("initialBackoff", "1s");
retryPolicy.put("maxBackoff", "30s");
retryPolicy.put("backoffMultiplier", 2D);
retryPolicy.put("retryableStatusCodes", Arrays.<Object>asList("UNAVAILABLE"));
return retryPolicy;
}
}
| 934 |
501 | /*
xmalloc.c - Simple malloc debugging library implementation
Copyright (c) 2001-2006 <NAME> <<EMAIL>>.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
TODO:
- red zones
- group dumps by source location
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
/*
Internal stuff.
*/
typedef struct hashTableItemRec {
void *ptr;
int bytes;
const char *file;
int line;
const char *func;
struct hashTableItemRec *next;
} hashTableItem;
typedef struct {
hashTableItem **table;
} hashTable;
static int xmalloc_peak;
int xmalloc_current;
static int xmalloc_peak_blocks;
int xmalloc_current_blocks;
static int xmalloc_fail_after;
#define TABLE_BITS 8
#define TABLE_MASK ((1 << TABLE_BITS) - 1)
#define TABLE_SIZE (1 << TABLE_BITS)
static hashTable *
hash_table_new(void)
{
hashTable *tbl;
tbl = malloc(sizeof(*tbl));
if (tbl != NULL)
{
tbl->table = calloc(TABLE_SIZE, sizeof(*tbl->table));
if (tbl->table == NULL)
{
free(tbl);
return NULL;
}
}
return tbl;
}
static int
hash_void_ptr(void *ptr)
{
int hash;
int i;
/* I took this hash function just off the top of my head, I have
no idea whether it is bad or very bad. */
hash = 0;
for (i = 0; i < sizeof(ptr)*8 / TABLE_BITS; i++)
{
hash ^= (long)ptr >> i*8;
hash += i * 17;
hash &= TABLE_MASK;
}
return hash;
}
static void
hash_table_add(hashTable *tbl, void *ptr, int bytes,
const char *file, int line, const char *func)
{
int i;
hashTableItem *item, *new;
i = hash_void_ptr(ptr);
item = tbl->table[i];
if (item != NULL)
while (item->next != NULL)
item = item->next;
new = malloc(sizeof(*new));
assert(new != NULL);
new->ptr = ptr;
new->bytes = bytes;
new->file = file;
new->line = line;
new->func = func;
new->next = NULL;
if (item != NULL)
item->next = new;
else
tbl->table[i] = new;
xmalloc_current += bytes;
if (xmalloc_current > xmalloc_peak)
xmalloc_peak = xmalloc_current;
xmalloc_current_blocks++;
if (xmalloc_current_blocks > xmalloc_peak_blocks)
xmalloc_peak_blocks = xmalloc_current_blocks;
}
static void
hash_table_del(hashTable *tbl, void *ptr)
{
int i;
hashTableItem *item, *prev;
i = hash_void_ptr(ptr);
item = tbl->table[i];
if (item == NULL)
{
printf("xfree: invalid ptr %p\n", ptr);
abort();
}
prev = NULL;
while (item->ptr != ptr)
{
prev = item;
item = item->next;
}
if (item->ptr != ptr)
{
printf("xfree: invalid ptr %p\n", ptr);
abort();
}
xmalloc_current -= item->bytes;
xmalloc_current_blocks--;
if (prev != NULL)
{
prev->next = item->next;
free(item);
}
else
{
tbl->table[i] = item->next;
free(item);
}
}
static hashTable *xmalloc_table = NULL;
static void
xmalloc_init(void)
{
if (xmalloc_table == NULL)
{
xmalloc_table = hash_table_new();
xmalloc_peak = 0;
xmalloc_peak_blocks = 0;
xmalloc_current = 0;
xmalloc_current_blocks = 0;
xmalloc_fail_after = -1;
}
assert(xmalloc_table != NULL);
assert(xmalloc_table->table != NULL);
}
/*
Public API.
*/
void
xmalloc_configure(int fail_after)
{
xmalloc_init();
xmalloc_fail_after = fail_after;
}
int
xmalloc_dump_leaks(void)
{
int i;
int num_leaks = 0;
int leaked_bytes = 0;
hashTableItem *item;
xmalloc_init();
for (i = 0; i < TABLE_SIZE; i++)
{
item = xmalloc_table->table[i];
while (item != NULL)
{
printf("%s:%d: %s: %d bytes at %p not freed\n",
item->file, item->line, item->func, item->bytes, item->ptr);
num_leaks++;
leaked_bytes += item->bytes;
item = item->next;
}
}
if (num_leaks == 0)
printf("No memory leaks.\n");
else
printf("%d unfreed memory chuncks, total %d unfreed bytes.\n",
num_leaks, leaked_bytes);
printf("Peak memory consumption %d bytes (%.1f kB, %.1f MB) in %d blocks ",
xmalloc_peak, (double)xmalloc_peak / 1024,
(double)xmalloc_peak / (1024*1024), xmalloc_peak_blocks);
printf("(average ");
if (xmalloc_peak_blocks)
printf("%d", ((xmalloc_peak + xmalloc_peak_blocks / 2)
/ xmalloc_peak_blocks));
else
printf("N/A");
printf(" bytes per block).\n");
return num_leaks;
}
void *
xmalloc_impl(size_t size, const char *file, int line, const char *func)
{
void *ptr;
xmalloc_init();
assert(size > 0);
if (xmalloc_fail_after == 0)
{
xmalloc_fail_after = -2;
#if 0
printf("xmalloc: forced failure %s:%d: %s\n", file, line, func);
#endif
return NULL;
}
else if (xmalloc_fail_after == -2)
{
printf("xmalloc: called after failure from %s:%d: %s\n",
file, line, func);
assert(0);
}
else if (xmalloc_fail_after > 0)
xmalloc_fail_after--;
ptr = malloc(size);
if (ptr != NULL)
hash_table_add(xmalloc_table, ptr, size, file, line, func);
return ptr;
}
void *
xcalloc_impl(size_t nmemb, size_t size, const char *file, int line,
const char *func)
{
void *ptr;
xmalloc_init();
assert(size > 0);
if (xmalloc_fail_after == 0)
{
xmalloc_fail_after = -2;
#if 0
printf("xcalloc: forced failure %s:%d: %s\n", file, line, func);
#endif
return NULL;
}
else if (xmalloc_fail_after == -2)
{
printf("xcalloc: called after failure from %s:%d: %s\n",
file, line, func);
assert(0);
}
else if (xmalloc_fail_after > 0)
xmalloc_fail_after--;
ptr = calloc(nmemb, size);
if (ptr != NULL)
hash_table_add(xmalloc_table, ptr, nmemb * size, file, line, func);
return ptr;
}
void
xfree_impl(void *ptr, const char *file, int line, const char *func)
{
xmalloc_init();
if (ptr != NULL)
hash_table_del(xmalloc_table, ptr);
free(ptr);
}
void *
xrealloc_impl(void *ptr, size_t new_size, const char *file, int line,
const char *func)
{
void *new_ptr;
xmalloc_init();
assert(ptr != NULL);
assert(new_size > 0);
if (xmalloc_fail_after == 0)
{
xmalloc_fail_after = -2;
return NULL;
}
else if (xmalloc_fail_after == -2)
{
printf("xrealloc: called after failure from %s:%d: %s\n",
file, line, func);
assert(0);
}
else if (xmalloc_fail_after > 0)
xmalloc_fail_after--;
new_ptr = realloc(ptr, new_size);
if (new_ptr != NULL)
{
hash_table_del(xmalloc_table, ptr);
hash_table_add(xmalloc_table, new_ptr, new_size, file, line, func);
}
return new_ptr;
}
/* EOF */
| 3,047 |
1,271 | <filename>examples/perf/micro/t2.py<gh_stars>1000+
import numpy as np
def foo(x, y):
return np.dot(x, y)
#return x + y
class Test:
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
#args1 = tuple(a for a in args)
#kwargs1 = {k: kwargs[k] for k in kwargs}
self.f(*args, **kwargs)
a = Test(foo)
x = np.zeros((256, 512))
y = np.zeros((512, 512))
def example2():
for i in range(0,100):
a(x, y)
import profile
profile.run("example2()")
| 248 |
348 | {"nom":"Ecouen","circ":"7ème circonscription","dpt":"Val-d'Oise","inscrits":5019,"abs":2783,"votants":2236,"blancs":20,"nuls":9,"exp":2207,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":733},{"nuance":"FI","nom":"<NAME>","voix":461},{"nuance":"LR","nom":"M. <NAME>","voix":339},{"nuance":"FN","nom":"M. <NAME>","voix":276},{"nuance":"SOC","nom":"Mme <NAME>","voix":182},{"nuance":"ECO","nom":"Mme <NAME>","voix":87},{"nuance":"COM","nom":"Mme <NAME>","voix":58},{"nuance":"ECO","nom":"Mme <NAME>","voix":26},{"nuance":"DIV","nom":"M. <NAME>","voix":18},{"nuance":"DVG","nom":"Mme <NAME>","voix":15},{"nuance":"EXG","nom":"M. <NAME>","voix":12}]} | 265 |
679 | <reponame>Grosskopf/openoffice
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#include "precompiled_svtools.hxx"
#include "mousefunction.hxx"
#include "svtools/table/tablecontrolinterface.hxx"
#include <tools/diagnose_ex.h>
#include <vcl/window.hxx>
//......................................................................................................................
namespace svt { namespace table
{
//......................................................................................................................
//==================================================================================================================
//= MouseFunction
//==================================================================================================================
//------------------------------------------------------------------------------------------------------------------
oslInterlockedCount MouseFunction::acquire()
{
return osl_incrementInterlockedCount( &m_refCount );
}
//------------------------------------------------------------------------------------------------------------------
oslInterlockedCount MouseFunction::release()
{
oslInterlockedCount newCount = osl_decrementInterlockedCount( &m_refCount );
if ( newCount == 0 )
{
delete this;
return 0;
}
return newCount;
}
//==================================================================================================================
//= ColumnResize
//==================================================================================================================
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnResize::handleMouseMove( ITableControl& i_tableControl, MouseEvent const & i_event )
{
Point const aPoint = i_event.GetPosPixel();
if ( m_nResizingColumn == COL_INVALID )
{
// if we hit a column divider, change the mosue pointer accordingly
Pointer aNewPointer( POINTER_ARROW );
TableCell const tableCell = i_tableControl.hitTest( aPoint );
if ( ( tableCell.nRow == ROW_COL_HEADERS ) && ( tableCell.eArea == ColumnDivider ) )
{
aNewPointer = Pointer( POINTER_HSPLIT );
}
i_tableControl.setPointer( aNewPointer );
return SkipFunction; // TODO: is this correct?
}
::Size const tableSize = i_tableControl.getTableSizePixel();
// set proper pointer
Pointer aNewPointer( POINTER_ARROW );
ColumnMetrics const & columnMetrics( i_tableControl.getColumnMetrics( m_nResizingColumn ) );
if ( ( aPoint.X() > tableSize.Width() )
|| ( aPoint.X() < columnMetrics.nStartPixel )
)
{
aNewPointer = Pointer( POINTER_NOTALLOWED );
}
else
{
aNewPointer = Pointer( POINTER_HSPLIT );
}
i_tableControl.setPointer( aNewPointer );
// show tracking line
i_tableControl.hideTracking();
i_tableControl.showTracking(
Rectangle(
Point( aPoint.X(), 0 ),
Size( 1, tableSize.Height() )
),
SHOWTRACK_SPLIT | SHOWTRACK_WINDOW
);
(void)i_event;
return ContinueFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnResize::handleMouseDown( ITableControl& i_tableControl, MouseEvent const & i_event )
{
if ( m_nResizingColumn != COL_INVALID )
{
OSL_ENSURE( false, "ColumnResize::handleMouseDown: suspicious: MouseButtonDown while still tracking?" );
return ContinueFunction;
}
TableCell const tableCell( i_tableControl.hitTest( i_event.GetPosPixel() ) );
if ( tableCell.nRow == ROW_COL_HEADERS )
{
if ( ( tableCell.nColumn != COL_INVALID )
&& ( tableCell.eArea == ColumnDivider )
)
{
m_nResizingColumn = tableCell.nColumn;
i_tableControl.captureMouse();
return ActivateFunction;
}
}
return SkipFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnResize::handleMouseUp( ITableControl& i_tableControl, MouseEvent const & i_event )
{
if ( m_nResizingColumn == COL_INVALID )
return SkipFunction;
Point const aPoint = i_event.GetPosPixel();
i_tableControl.hideTracking();
PColumnModel const pColumn = i_tableControl.getModel()->getColumnModel( m_nResizingColumn );
long const maxWidthLogical = pColumn->getMaxWidth();
long const minWidthLogical = pColumn->getMinWidth();
// new position of mouse
long const requestedEnd = aPoint.X();
// old position of right border
long const oldEnd = i_tableControl.getColumnMetrics( m_nResizingColumn ).nEndPixel;
// position of left border if cursor in the to-be-resized column
long const columnStart = i_tableControl.getColumnMetrics( m_nResizingColumn ).nStartPixel;
long const requestedWidth = requestedEnd - columnStart;
// TODO: this is not correct, strictly: It assumes that the mouse was pressed exactly on the "end" pos,
// but for a while now, we have relaxed this, and allow clicking a few pixels aside, too
if ( requestedEnd >= columnStart )
{
long requestedWidthLogical = i_tableControl.pixelWidthToAppFont( requestedWidth );
// respect column width limits
if ( oldEnd > requestedEnd )
{
// column has become smaller, check against minimum width
if ( ( minWidthLogical != 0 ) && ( requestedWidthLogical < minWidthLogical ) )
requestedWidthLogical = minWidthLogical;
}
else if ( oldEnd < requestedEnd )
{
// column has become larger, check against max width
if ( ( maxWidthLogical != 0 ) && ( requestedWidthLogical >= maxWidthLogical ) )
requestedWidthLogical = maxWidthLogical;
}
pColumn->setWidth( requestedWidthLogical );
i_tableControl.invalidate( TableAreaAll );
}
i_tableControl.setPointer( Pointer() );
i_tableControl.releaseMouse();
m_nResizingColumn = COL_INVALID;
return DeactivateFunction;
}
//==================================================================================================================
//= RowSelection
//==================================================================================================================
//------------------------------------------------------------------------------------------------------------------
FunctionResult RowSelection::handleMouseMove( ITableControl& i_tableControl, MouseEvent const & i_event )
{
OSL_UNUSED( i_tableControl );
OSL_UNUSED( i_event );
return SkipFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult RowSelection::handleMouseDown( ITableControl& i_tableControl, MouseEvent const & i_event )
{
bool handled = false;
TableCell const tableCell( i_tableControl.hitTest( i_event.GetPosPixel() ) );
if ( tableCell.nRow >= 0 )
{
if ( i_tableControl.getSelEngine()->GetSelectionMode() == NO_SELECTION )
{
i_tableControl.activateCell( tableCell.nColumn, tableCell.nRow );
handled = true;
}
else
{
handled = i_tableControl.getSelEngine()->SelMouseButtonDown( i_event );
}
}
if ( handled )
m_bActive = true;
return handled ? ActivateFunction : SkipFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult RowSelection::handleMouseUp( ITableControl& i_tableControl, MouseEvent const & i_event )
{
TableCell const tableCell = i_tableControl.hitTest( i_event.GetPosPixel() );
if ( tableCell.nRow >= 0 )
{
if ( i_tableControl.getSelEngine()->GetSelectionMode() != NO_SELECTION )
{
i_tableControl.getSelEngine()->SelMouseButtonUp( i_event );
}
}
if ( m_bActive )
{
m_bActive = false;
return DeactivateFunction;
}
return SkipFunction;
}
//==================================================================================================================
//= ColumnSortHandler
//==================================================================================================================
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnSortHandler::handleMouseMove( ITableControl& i_tableControl, MouseEvent const & i_event )
{
OSL_UNUSED( i_tableControl );
OSL_UNUSED( i_event );
return SkipFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnSortHandler::handleMouseDown( ITableControl& i_tableControl, MouseEvent const & i_event )
{
if ( m_nActiveColumn != COL_INVALID )
{
OSL_ENSURE( false, "ColumnSortHandler::handleMouseDown: called while already active - suspicious!" );
return ContinueFunction;
}
if ( i_tableControl.getModel()->getSortAdapter() == NULL )
// no sorting support at the model
return SkipFunction;
TableCell const tableCell( i_tableControl.hitTest( i_event.GetPosPixel() ) );
if ( ( tableCell.nRow != ROW_COL_HEADERS ) || ( tableCell.nColumn < 0 ) )
return SkipFunction;
// TODO: ensure the column header is rendered in some special way, indicating its current state
m_nActiveColumn = tableCell.nColumn;
return ActivateFunction;
}
//------------------------------------------------------------------------------------------------------------------
FunctionResult ColumnSortHandler::handleMouseUp( ITableControl& i_tableControl, MouseEvent const & i_event )
{
if ( m_nActiveColumn == COL_INVALID )
return SkipFunction;
TableCell const tableCell( i_tableControl.hitTest( i_event.GetPosPixel() ) );
if ( ( tableCell.nRow == ROW_COL_HEADERS ) && ( tableCell.nColumn == m_nActiveColumn ) )
{
ITableDataSort* pSort = i_tableControl.getModel()->getSortAdapter();
ENSURE_OR_RETURN( pSort != NULL, "ColumnSortHandler::handleMouseUp: somebody is mocking with us!", DeactivateFunction );
// in handleMousButtonDown, the model claimed to have sort support ...
ColumnSortDirection eSortDirection = ColumnSortAscending;
ColumnSort const aCurrentSort = pSort->getCurrentSortOrder();
if ( aCurrentSort.nColumnPos == m_nActiveColumn )
// invert existing sort order
eSortDirection = ( aCurrentSort.eSortDirection == ColumnSortAscending ) ? ColumnSortDescending : ColumnSortAscending;
pSort->sortByColumn( m_nActiveColumn, eSortDirection );
}
m_nActiveColumn = COL_INVALID;
return DeactivateFunction;
}
//......................................................................................................................
} } // namespace svt::table
//......................................................................................................................
| 4,330 |
533 | #include "saber/core/context.h"
#include "saber/funcs/conv.h"
#include "saber/core/tensor_op.h"
#include "saber/saber_types.h"
#include "test_saber_func.h"
#include "saber/funcs/debug.h"
#include "test/saber/conv_func_helper.h"
#include <vector>
#if defined(USE_X86_PLACE)
#include "saber/funcs/impl/x86/kernel/jit_generator.h"
#endif
using namespace anakin::saber;
#define BASIC_TEST true
template <typename dtype>
int count_diff(const dtype* src1, const dtype* src2,
int size, double max_ratio,
bool signed_input = false, bool wino = false) {
if (max_ratio <= 0) {
max_ratio = 0.1;
}
int count = 0;
if (wino) {
// It's a known issue that winograd convolution result is not bitwise identical as direct convolution result.
return count;
}
for (int i = 0; i < size; ++i) {
if (signed_input && (fabs(src1[i] - src2[i]) <= 1))
continue;
double ratio = fabs(src1[i] - src2[i]) / fabs(src1[i] + src2[i] + 1e-12);
if (ratio > max_ratio) {
++count;
}
}
return count;
}
const bool g_test_from_file = false;
#ifdef USE_X86_PLACE
template<typename TargetType, typename TargetType_H ,DataType OutPutDtype>
int test_conv_results_nhwc(int group,
int input_num, int in_channels, int height, int width,
int out_channels, int kernel_h, int kernel_w,
int stride_h, int stride_w, int dilation_h, int dilation_w,
int pad_h, int pad_w, bool bias_term, bool with_relu,
SaberImplStrategy strategy, ImplEnum imp,bool is_unsigned=true) {
LOG(INFO)<< " conv param: "
<< " input_num = " << input_num
<< " in_channels = " << in_channels
<< " height = " << height
<< " width = " << width
<< " group = " << group
<< " pad_h = " << pad_h
<< " pad_w = " << pad_w
<< " stride_h = " << stride_h
<< " stride_w = " << stride_w
<< " dilation_h = " << dilation_h
<< " dilation_w = " << dilation_w
<< " kernel_h = " << kernel_h
<< " kernel_w = " << kernel_w
<< " out_channels = " << out_channels
<< " bias_term = " << (bias_term ? "true" : "false");
float input_max=3.f;
Shape input_nhwc({input_num, height, width, in_channels}, Layout_NHWC);
Shape input_nchw({input_num, in_channels, height, width}, Layout_NCHW);
Shape weights_s({out_channels, in_channels/group, kernel_h, kernel_w}, Layout_NCHW);
Shape bias_s({1, out_channels, 1, 1}, Layout_NCHW);
int out_height = (pad_h * 2 + height - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int out_width = (pad_w * 2 + width - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
Shape output_nhwc({input_num, out_height, out_width, out_channels}, Layout_NHWC);
Shape output_nchw({input_num, out_channels, out_height, out_width}, Layout_NCHW);
// init input Tensor
Tensor<TargetType> input_dev;
Tensor<TargetType_H> input_host;
Tensor<TargetType> input_dev_temp;
if (is_unsigned) {
input_dev.re_alloc(input_nhwc, AK_UINT8);
input_dev_temp.re_alloc(input_nchw, AK_UINT8);
}else{
input_dev.re_alloc(input_nhwc, AK_INT8);
input_dev_temp.re_alloc(input_nchw, AK_INT8);
}
input_host.re_alloc(input_nchw, AK_FLOAT);
bool nothing_flag = false;
std::string nothing_str = "";
if (is_unsigned) {
fill_tensor_rand(input_host, 0.f, input_max);
// fill_tensor_const(input_host,input_max);
}else{
fill_tensor_rand(input_host, -input_max, input_max);
// fill_tensor_const(input_host,input_max);
}
input_host.set_scale({input_max/127.f});
if (g_test_from_file) {
load_tensor_in_io_format(input_host, nothing_flag, nothing_str,
"record+ConvBatchnormScale+conv2d#10(conv2d_2)+in+0+1_32_112_112_+nchw+ak_float+0.txt");
float max_input = utils::ScaleUtils::get_fp32_max((float *) input_host.data(), input_host.valid_size());
input_host.set_scale({max_input / 127.f});
}
if (is_unsigned) {
utils::ScaleUtils::scale_fp32_uint8(input_dev_temp, input_host);
}else{
utils::ScaleUtils::scale_fp32_int8(input_dev_temp, input_host);
}
reorder_nhwc_nchw(input_dev_temp,input_dev);
input_dev.set_scale(input_host.get_scale());
// LOG(INFO) << input_dev.get_scale()[0];
// init weights Tensor
Tensor<TargetType> weights_dev;
Tensor<TargetType_H> weights_host;
weights_dev.re_alloc(weights_s, AK_FLOAT);
weights_host.re_alloc(weights_s, AK_FLOAT);
fill_tensor_rand(weights_dev,-input_max,input_max);
// fill_tensor_const(weights_dev, input_max);//
if (g_test_from_file) {
load_tensor_in_io_format(weights_dev, nothing_flag, nothing_str,
"record+weigths+conv_weights+out+2+16_32_1_1_+nchw+ak_float+0.txt");
}
weights_host.copy_from(weights_dev);
Tensor<TargetType> bias_dev;
Tensor<TargetType_H> bias_host;
if (bias_term) {
bias_dev.re_alloc(bias_s, AK_FLOAT);
bias_host.re_alloc(bias_s, AK_FLOAT);
fill_tensor_rand(bias_dev,-input_max,input_max);
// fill_tensor_const(bias_dev,input_max);
if (g_test_from_file) {
load_tensor_in_io_format(bias_dev, nothing_flag, nothing_str,
"record+bias+conv_bias+out+2+1_16_1_1_+nchw+ak_float+0.txt");
}
bias_host.copy_from(bias_dev);
}
Tensor<TargetType> output_dev(output_nhwc, OutPutDtype);
if (OutPutDtype == AK_UINT8 || OutPutDtype == AK_INT8) {
output_dev.set_scale({in_channels * kernel_h * kernel_w * input_max / 127.f});
}
if (g_test_from_file) {
Tensor<X86> temp_tensor;
load_tensor_in_io_format(temp_tensor, nothing_flag, nothing_str,
"../mobilnet_v2_fp32/record+ConvBatchnormScale+conv2d#10(conv2d_2)+in+0+1_32_112_112_+nchw+ak_float+0.txt");
float max_output = utils::ScaleUtils::get_fp32_max((float *) temp_tensor.data(), temp_tensor.valid_size());
output_dev.set_scale({max_output / 127.f});
output_dev.set_scale({0.027173});
output_dev.set_scale({1});
}
Tensor<TargetType_H> output_host(output_nchw);
Tensor<TargetType_H> check_host(output_nchw);
Context<TargetType> ctx1(0, 1, 1);
ConvParam<TargetType> param(group, pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
&weights_dev, &bias_dev);
if (with_relu) {
ActivationParam<TargetType> act_param(Active_relu);
param.activation_param = act_param;
}
Conv<TargetType, AK_INT8> conv;
std::vector<Tensor<TargetType>* > input_v;
std::vector<Tensor<TargetType>* > output_v;
input_v.push_back(&input_dev);
output_v.push_back(&output_dev);
conv.compute_output_shape(input_v, output_v, param);
SABER_CHECK(conv.init(input_v, output_v, param, strategy, imp, ctx1));
SABER_CHECK(conv(input_v, output_v, param, ctx1));
typename Tensor<TargetType>::API::stream_t stream = ctx1.get_compute_stream();
output_v[0]->record_event(stream);
output_v[0]->sync();
reorder_nhwc_nchw(output_dev,output_host);
conv_basic_check<TargetType_H>(input_host, check_host,
(const float*)weights_host.data(), (const float*)bias_host.data(),
group, kernel_w, kernel_h, stride_w, stride_h,
dilation_w, dilation_h, pad_w, pad_h, bias_term,
param.activation_param.has_active);
double max_ratio = 0.0;
double max_diff = 0.0;
tensor_cmp_host_mlu((const float*)output_host.data(), (const float*)check_host.data(),
check_host.valid_size(), max_ratio, max_diff);
if (max_ratio< 0.15) {
//LOG(INFO) << " PASS!!! max_ratio = " << max_ratio << " max_diff = " << max_diff;
write_tensorfile(output_host,"output_host");
write_tensorfile(check_host,"check_host");
LOG(INFO) << "PASS!!! ratio = " << max_ratio <<" in "<<output_host.valid_size();
return 0;
} else {
write_tensorfile(output_dev,"output_dev",false);
write_tensorfile(output_host,"output_host");
write_tensorfile(check_host,"check_host");
//LOG(FATAL) << "FAIL!!! max_ratio = " << max_ratio << " max_diff = " << max_diff
LOG(FATAL) << "FAIL!!! ratio = " << max_ratio<<" in "<<output_host.valid_size()<<","
<< " conv param: "
<< " input_num = " << input_num
<< " in_channels = " << in_channels
<< " height = " << height
<< " width = " << width
<< " group = " << group
<< " pad_h = " << pad_h
<< " pad_w = " << pad_w
<< " stride_h = " << stride_h
<< " stride_w = " << stride_w
<< " dilation_h = " << dilation_h
<< " dilation_w = " << dilation_w
<< " kernel_h = " << kernel_h
<< " kernel_w = " << kernel_w
<< " out_channels = " << out_channels;
return -1;
}
}
#endif
template<typename TargetType, typename TargetType_H>
int test_conv_results(int group,
int input_num, int in_channels, int height, int width,
int out_channels, int kernel_h, int kernel_w,
int stride_h, int stride_w, int dilation_h, int dilation_w,
int pad_h, int pad_w, bool bias_term, bool with_relu,
SaberImplStrategy strategy, ImplEnum imp) {
LOG(INFO)<< " conv param: "
<< " input_num = " << input_num
<< " in_channels = " << in_channels
<< " height = " << height
<< " width = " << width
<< " group = " << group
<< " pad_h = " << pad_h
<< " pad_w = " << pad_w
<< " stride_h = " << stride_h
<< " stride_w = " << stride_w
<< " dilation_h = " << dilation_h
<< " dilation_w = " << dilation_w
<< " kernel_h = " << kernel_h
<< " kernel_w = " << kernel_w
<< " out_channels = " << out_channels
<< " bias_term = " << (bias_term ? "true" : "false")
<< " with_relu = " << (with_relu ? "true" : "false");
Shape input_s({input_num, in_channels, height, width}, Layout_NCHW);
Shape weights_s({out_channels, in_channels, kernel_h, kernel_w}, Layout_NCHW);
Shape bias_s({1, out_channels, 1, 1}, Layout_NCHW);
// init input Tensor
Tensor<TargetType> input_dev;
Tensor<TargetType_H> input_host;
input_dev.re_alloc(input_s, AK_FLOAT);
input_host.re_alloc(input_s, AK_FLOAT);
fill_tensor_rand(input_dev, -10.0f, 10.0f);
input_host.copy_from(input_dev);
input_dev.set_scale({10.1f / 128});
// LOG(INFO) << input_dev.get_scale()[0];
// init weights Tensor
Tensor<TargetType> weights_dev;
Tensor<TargetType_H> weights_host;
weights_dev.re_alloc(weights_s, AK_FLOAT);
weights_host.re_alloc(weights_s, AK_FLOAT);
fill_tensor_rand(weights_dev, -10.0f, 10.0f);
weights_host.copy_from(weights_dev);
Tensor<TargetType> bias_dev;
Tensor<TargetType_H> bias_host;
if (bias_term) {
bias_dev.re_alloc(bias_s, AK_FLOAT);
bias_host.re_alloc(bias_s, AK_FLOAT);
fill_tensor_rand(bias_dev, -10.0f, 10.0f);
bias_host.copy_from(bias_dev);
}
Tensor<TargetType> output_dev;
Tensor<TargetType_H> output_host;
Tensor<TargetType_H> check_host;
Tensor<TargetType_H> check_host_int8;
Context<TargetType> ctx1(0, 1, 1);
int generate_arch = Env<NV>::cur_env()[ctx1.get_device_id()]._info._generate_arch;
// only support 61 arch for now.
bool arch_check = (generate_arch == 61);
if (!arch_check) {
LOG(INFO) << "device not support int8 op!!";
return 0;
}
ActivationParam<TargetType> act_param(Active_relu);
ConvParam<TargetType> param(group, pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
&weights_dev, &bias_dev);
if (with_relu) {
param.activation_param = act_param;
}
Conv<TargetType, AK_INT8> conv;
std::vector<Tensor<TargetType>* > input_v;
std::vector<Tensor<TargetType>* > output_v;
input_v.push_back(&input_dev);
output_v.push_back(&output_dev);
conv.compute_output_shape(input_v, output_v, param);
output_dev.re_alloc(output_dev.valid_shape(), AK_FLOAT);
conv.init(input_v, output_v, param, strategy, imp, ctx1);
conv.trans_weights(*param.mutable_weight(), *param.mutable_bias(),
param.pad_h, param.pad_w, param.dilation_h, param.dilation_w,
param.stride_h, param.stride_w, param.group, imp);
conv(input_v, output_v, param, ctx1);
typename Tensor<TargetType>::API::stream_t stream = ctx1.get_compute_stream();
output_v[0]->record_event(stream);
output_v[0]->sync();
output_host.re_alloc(output_dev.valid_shape(), AK_FLOAT);
output_host.copy_from(output_dev);
// print_tensor_valid(output_host);
check_host.re_alloc(output_host.valid_shape(), AK_FLOAT);
conv_basic_check<TargetType_H>(input_host, check_host,
(const float*)weights_host.data(), (const float*)bias_host.data(),
group, kernel_w, kernel_h, stride_w, stride_h,
dilation_w, dilation_h, pad_w, pad_h, bias_term,
param.activation_param.has_active);
// print_tensor_valid(check_host);
//double max_ratio = 0.0;
//double max_diff = 0.0;
//tensor_cmp_host((const float*)output_host.data(), (const float*)check_host.data(),
// check_host.valid_size(), max_ratio, max_diff);
int count = count_diff((const float*)output_host.data(),
(const float*)check_host_int8.data(), check_host_int8.valid_size(), 2e-1);
// write_tensorfile(output_dev, "int8_output.txt");
// write_tensorfile(check_host_int8, "fp32_output.txt");
if ((double)count / output_host.valid_size() < 0.02) {
//LOG(INFO) << " PASS!!! max_ratio = " << max_ratio << " max_diff = " << max_diff;
LOG(INFO) << "PASS!!! count = " << count;
return 0;
} else {
write_tensorfile(output_dev, "int8_output.txt");
write_tensorfile(check_host, "fp32_output.txt");
// print_tensor_valid(output_host);
// print_tensor_valid(check_host);
//LOG(FATAL) << "FAIL!!! max_ratio = " << max_ratio << " max_diff = " << max_diff
LOG(FATAL) << "FAIL!!! count = " << count
<< " conv param: "
<< " input_num = " << input_num
<< " in_channels = " << in_channels
<< " height = " << height
<< " width = " << width
<< " group = " << group
<< " pad_h = " << pad_h
<< " pad_w = " << pad_w
<< " stride_h = " << stride_h
<< " stride_w = " << stride_w
<< " dilation_h = " << dilation_h
<< " dilation_w = " << dilation_w
<< " kernel_h = " << kernel_h
<< " kernel_w = " << kernel_w
<< " out_channels = " << out_channels;
return -1;
}
}
template<typename TargetType, typename TargetType_H>
int test_conv_results_s8s8(int group,
int input_num, int in_channels, int height, int width,
int out_channels, int kernel_h, int kernel_w,
int stride_h, int stride_w, int dilation_h, int dilation_w,
int pad_h, int pad_w, bool bias_term, bool with_relu,
SaberImplStrategy strategy, ImplEnum imp) {
LOG(INFO)<< " conv param: "
<< " input_num = " << input_num
<< " in_channels = " << in_channels
<< " height = " << height
<< " width = " << width
<< " group = " << group
<< " pad_h = " << pad_h
<< " pad_w = " << pad_w
<< " stride_h = " << stride_h
<< " stride_w = " << stride_w
<< " dilation_h = " << dilation_h
<< " dilation_w = " << dilation_w
<< " kernel_h = " << kernel_h
<< " kernel_w = " << kernel_w
<< " out_channels = " << out_channels
<< " bias_term = " << (bias_term ? "true" : "false")
<< " with_relu = " << (with_relu ? "true" : "false");
Shape input_s({input_num, in_channels, height, width}, Layout_NCHW);
Shape weights_s({out_channels, in_channels, kernel_h, kernel_w}, Layout_NCHW);
Shape bias_s({1, out_channels, 1, 1}, Layout_NCHW);
// init input Tensor
Tensor<TargetType> input_dev;
Tensor<TargetType_H> input_host;
input_dev.re_alloc(input_s, AK_FLOAT);
input_host.re_alloc(input_s, AK_FLOAT);
fill_tensor_rand(input_dev, -10.0f, 10.0f);
input_host.copy_from(input_dev);
input_dev.set_scale({10.1f / 128});
// init weights Tensor
Tensor<TargetType> weights_dev;
Tensor<TargetType_H> weights_host;
weights_dev.re_alloc(weights_s, AK_FLOAT);
weights_host.re_alloc(weights_s, AK_FLOAT);
fill_tensor_rand(weights_dev, -10.0f, 10.0f);
weights_host.copy_from(weights_dev);
Tensor<TargetType> bias_dev;
Tensor<TargetType_H> bias_host;
if (bias_term) {
bias_dev.re_alloc(bias_s, AK_FLOAT);
bias_host.re_alloc(bias_s, AK_FLOAT);
fill_tensor_rand(bias_dev, -10.0f, 10.0f);
bias_host.copy_from(bias_dev);
}
Tensor<TargetType> output_dev;
output_dev.set_scale({200.1f / 128});
Tensor<TargetType_H> output_host;
Tensor<TargetType_H> check_host;
Context<TargetType> ctx1(0, 1, 1);
int generate_arch = Env<NV>::cur_env()[ctx1.get_device_id()]._info._generate_arch;
// only support 61 arch for now.
bool arch_check = (generate_arch == 61);
if (!arch_check) {
LOG(INFO) << "device not support int8 op!!";
return 0;
}
ActivationParam<TargetType> act_param(Active_relu);
ConvParam<TargetType> param(group, pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
&weights_dev, &bias_dev);
if (with_relu) {
param.activation_param = act_param;
}
Conv<TargetType, AK_INT8> conv;
std::vector<Tensor<TargetType>* > input_v;
std::vector<Tensor<TargetType>* > output_v;
input_v.push_back(&input_dev);
output_v.push_back(&output_dev);
conv.compute_output_shape(input_v, output_v, param);
output_dev.re_alloc(output_dev.valid_shape(), AK_INT8);
conv.init(input_v, output_v, param, strategy, imp, ctx1);
conv.trans_weights(*param.mutable_weight(), *param.mutable_bias(),
param.pad_h, param.pad_w, param.dilation_h, param.dilation_w,
param.stride_h, param.stride_w, param.group, imp);
conv(input_v, output_v, param, ctx1);
typename Tensor<TargetType>::API::stream_t stream = ctx1.get_compute_stream();
output_v[0]->record_event(stream);
output_v[0]->sync();
output_host.re_alloc(output_dev.valid_shape(), AK_INT8);
output_host.copy_from(output_dev);
check_host.re_alloc(output_host.valid_shape(), AK_FLOAT);
conv_basic_check<TargetType_H>(input_host, check_host,
(const float*)weights_host.data(), (const float*)bias_host.data(),
group, kernel_w, kernel_h, stride_w, stride_h,
dilation_w, dilation_h, pad_w, pad_h, bias_term,
param.activation_param.has_active);
// print_tensor(output_dev);
// int count = count_diff((const float*)output_host.data(),
// (const float*)check_host.data(),
// check_host.valid_size(), 2e-1);
// write_tensorfile(output_dev, "int8_output.txt");
// write_tensorfile(check_host, "fp32_output.txt");
// if ((double)count / output_host.valid_size() < 0.02) {
// //LOG(INFO) << " PASS!!! max_ratio = " << max_ratio << " max_diff = " << max_diff;
// LOG(INFO) << "PASS!!! count = " << count;
// return 0;
// } else {
// LOG(FATAL) << "FAIL!!! count = " << count
// << " conv param: "
// << " input_num = " << input_num
// << " in_channels = " << in_channels
// << " height = " << height
// << " width = " << width
// << " group = " << group
// << " pad_h = " << pad_h
// << " pad_w = " << pad_w
// << " stride_h = " << stride_h
// << " stride_w = " << stride_w
// << " dilation_h = " << dilation_h
// << " dilation_w = " << dilation_w
// << " kernel_h = " << kernel_h
// << " kernel_w = " << kernel_w
// << " out_channels = " << out_channels;
// return -1;
// }
}
TEST(TestSaberFunc, test_saber_conv_int8_results) {
#ifdef USE_CUDA
Env<NV>::env_init();
Env<NVHX86>::env_init();
#endif
std::vector<int> kernel_h_v{1, 3};
std::vector<int> kernel_w_v{1, 3};
std::vector<int> pad_h_v{0, 1};
std::vector<int> pad_w_v{0, 1};
std::vector<int> stride_h_v{1, 2};
std::vector<int> stride_w_v{1, 2};
std::vector<int> dilation_h_v{1};
std::vector<int> dilation_w_v{1};
std::vector<int> in_channels_v{ 16, 32};
std::vector<int> out_channels_v{16, 32, 8};
// std::vector<int> group_v{1, 2, 32};
std::vector<int> in_h_v{28};
std::vector<int> in_w_v{28};
std::vector<int> input_num_v{1};
std::vector<bool> bias_term_v{true};
std::vector<bool> with_relu_v{true};
#ifdef USE_CUDA
if (BASIC_TEST) {
for (auto input_num : input_num_v) {
for (auto out_channels : out_channels_v) {
for (auto in_channels : in_channels_v) {
for (auto kernel_h : kernel_h_v) {
for (auto kernel_w : kernel_w_v) {
for (auto height : in_h_v) {
for (auto width : in_w_v) {
for (auto stride_h : stride_h_v) {
for (auto stride_w : stride_w_v) {
for (auto dilation_h : dilation_h_v) {
for (auto dilation_w : dilation_w_v) {
for (auto pad_h : pad_h_v) {
for (auto pad_w : pad_w_v) {
for (auto bias_term : bias_term_v) {
for (auto with_relu : with_relu_v) {
test_conv_results_s8s8<NV, NVHX86>(1,
input_num,
in_channels,
height,
width,
out_channels,
kernel_h,
kernel_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
pad_h, pad_w,
bias_term,
with_relu,
SPECIFY,
SABER_IMPL);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
#endif
}
TEST(TestSaberFunc, test_saber_conv_int8_x86_results) {
#ifdef USE_X86_PLACE
Env<X86>::env_init();
int group = 1;
int input_num = 1;
int in_channels = 32;
int height = 112;
int width = 112;
int out_channels = 16;
int kernel_h = 1;
int kernel_w = 1;
int stride_h = 1;
int stride_w = 1;
int dilation_h = 1;
int dilation_w = 1;
int pad_h = 0;
int pad_w = 0;
bool bias_term = false;
bool with_relu = false;
if (jit::mayiuse(jit::avx512_core)&&jit::mayiuse(jit::avx512_core_vnni)) {
test_conv_results_nhwc<X86,X86,AK_FLOAT>(group,
input_num, in_channels,
height, width,
out_channels, kernel_h,
kernel_w,
stride_h, stride_w,
dilation_h, dilation_w,
pad_h, pad_w, bias_term,with_relu,
SPECIFY, SABER_IMPL, false);
}
#endif
}
int main(int argc, const char** argv) {
// initial logger
//logger::init(argv[0]);
InitTest();
RUN_ALL_TESTS(argv[0]);
return 0;
}
| 14,093 |
1,970 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.integ.testsuite.dag;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.integ.testsuite.dag.nodes.DagNode;
import org.apache.hudi.integ.testsuite.dag.nodes.InsertNode;
import org.apache.hudi.integ.testsuite.dag.nodes.UpsertNode;
import org.apache.hudi.integ.testsuite.dag.nodes.ValidateNode;
import org.apache.hudi.integ.testsuite.configuration.DeltaConfig.Config;
import org.apache.spark.api.java.JavaRDD;
/**
* An implementation of {@link WorkflowDagGenerator}, that generates complex workflowDag.
*/
public class ComplexDagGenerator implements WorkflowDagGenerator {
@Override
public WorkflowDag build() {
// root node
DagNode root = new InsertNode(Config.newBuilder()
.withNumRecordsToInsert(1000)
.withNumInsertPartitions(3)
.withRecordSize(1000).build());
// child node1
DagNode child1 = new UpsertNode(Config.newBuilder()
.withNumRecordsToUpdate(999)
.withNumRecordsToInsert(1000)
.withNumUpsertFiles(1)
.withNumUpsertPartitions(1)
.withNumInsertPartitions(1)
.withRecordSize(10000).build());
// function used to build ValidateNode
Function<List<DagNode<JavaRDD<WriteStatus>>>, Boolean> function = (dagNodes) -> {
DagNode<JavaRDD<WriteStatus>> parent1 = dagNodes.get(0);
List<WriteStatus> statuses = parent1.getResult().collect();
long totalRecordsTouched = statuses.stream().map(st -> st.getStat().getNumUpdateWrites() + st.getStat()
.getNumInserts()).reduce((a, b) -> a + b).get();
boolean b1 = totalRecordsTouched == parent1.getConfig().getNumRecordsInsert()
+ parent1.getConfig().getNumRecordsUpsert();
boolean b2 = statuses.size() > parent1.getConfig().getNumUpsertFiles();
DagNode<JavaRDD<WriteStatus>> parent2 = parent1.getParentNodes().get(0);
statuses = parent2.getResult().collect();
totalRecordsTouched = statuses.stream().map(st -> st.getStat().getNumUpdateWrites() + st.getStat()
.getNumInserts()).reduce((a, b) -> a + b).get();
boolean b3 = totalRecordsTouched == parent2.getConfig().getNumRecordsInsert()
* parent2.getConfig().getNumInsertPartitions() + parent2.getConfig().getNumRecordsUpsert();
return b1 & b2 & b3;
};
// child node2
DagNode child2 = new ValidateNode(Config.newBuilder().build(), function);
// create relationship between nodes
root.addChildNode(child1);
// child1.addParentNode(root);
child1.addChildNode(child2);
// child2.addParentNode(child1);
List<DagNode> rootNodes = new ArrayList<>();
rootNodes.add(root);
return new WorkflowDag(rootNodes);
}
}
| 1,262 |
766 | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.tsunami.plugins.fingerprinters.web.crawl;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.net.HttpHeaders.LOCATION;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.tsunami.common.net.http.HttpStatus;
import okhttp3.mockwebserver.Dispatcher;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.RecordedRequest;
/**
* A testing dispatcher that fakes a web server with the following endpoints:
*
* <ul>
* <li>"/redirect" always redirects to "/".
* <li>"/" always serves the given HTML response.
* <li>"/anchor-link" always serves response "anchor-link-response".
* <li>"/img-src" always serves response "img-src-response".
* <li>"/timeout" always serves response after 20 seconds.
* </ul>
*/
final class FakeServerDispatcher extends Dispatcher {
private final String rootPageBody;
FakeServerDispatcher(String rootPageBody) {
this.rootPageBody = checkNotNull(rootPageBody);
}
@Override
public MockResponse dispatch(RecordedRequest request) {
switch (request.getPath()) {
case "/redirect":
return new MockResponse().setResponseCode(HttpStatus.FOUND.code()).setHeader(LOCATION, "/");
case "/":
return new MockResponse().setResponseCode(HttpStatus.OK.code()).setBody(rootPageBody);
case "/anchor-link":
return new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setBody("anchor-link-response");
case "/img-src":
return new MockResponse().setResponseCode(HttpStatus.OK.code()).setBody("img-src-response");
case "/timeout":
return new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setBody("timeout-response")
.setBodyDelay(20, SECONDS);
default: // fall out
}
return new MockResponse().setResponseCode(404);
}
}
| 839 |
593 | /**
* TLS-Attacker - A Modular Penetration Testing Framework for TLS
*
* Copyright 2014-2022 Ruhr University Bochum, Paderborn University, Hackmanit GmbH
*
* Licensed under Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package de.rub.nds.tlsattacker.core.protocol.preparator.extension;
import de.rub.nds.modifiablevariable.util.ArrayConverter;
import de.rub.nds.tlsattacker.core.protocol.message.extension.ExtendedRandomExtensionMessage;
import de.rub.nds.tlsattacker.core.protocol.serializer.extension.ExtendedRandomExtensionSerializer;
import de.rub.nds.tlsattacker.core.workflow.chooser.Chooser;
import de.rub.nds.tlsattacker.transport.ConnectionEndType;
import java.util.Random;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Class which prepares an Extended Random Extension Message for handshake messages, as defined as in
* https://tools.ietf.org/html/draft-rescorla-tls-extended-random-02
*/
public class ExtendedRandomExtensionPreparator extends ExtensionPreparator<ExtendedRandomExtensionMessage> {
private static final Logger LOGGER = LogManager.getLogger();
private final ExtendedRandomExtensionMessage message;
public ExtendedRandomExtensionPreparator(Chooser chooser, ExtendedRandomExtensionMessage message,
ExtendedRandomExtensionSerializer serializer) {
super(chooser, message, serializer);
this.message = message;
}
@Override
public void prepareExtensionContent() {
// Send specific extended Random based on current role in handshake
if (chooser.getConnectionEndType().equals(ConnectionEndType.CLIENT)) {
LOGGER.debug("Preparing Client Extended Random of Extended Random Extension Message.");
message.setExtendedRandom(chooser.getClientExtendedRandom());
LOGGER.debug("Prepared the Client Extended Random with value "
+ ArrayConverter.bytesToHexString(message.getExtendedRandom().getValue()));
}
if (chooser.getConnectionEndType().equals(ConnectionEndType.SERVER)) {
LOGGER.debug("Preparing Server Extended Random of Extended Random Extension Message.");
if (!(chooser.getServerExtendedRandom().length == chooser.getClientExtendedRandom().length)) {
LOGGER.debug("Extended Random of Client is not same length as Default Server Extended Random."
+ " Generating fresh Server Extended Random of appropriate length.");
byte[] generatedExtendedRandom = prepareExtendedRandom(chooser.getClientExtendedRandom().length);
message.setExtendedRandom(generatedExtendedRandom);
} else {
message.setExtendedRandom(chooser.getServerExtendedRandom());
}
LOGGER.debug("Prepared the Server Extended Random with value "
+ ArrayConverter.bytesToHexString(message.getExtendedRandom().getValue()));
}
prepareExtendedRandomLength(message);
}
private void prepareExtendedRandomLength(ExtendedRandomExtensionMessage msg) {
msg.setExtendedRandomLength(msg.getExtendedRandom().getValue().length);
LOGGER.debug("ExtendedRandomLength: " + msg.getExtendedRandomLength().getValue());
}
private byte[] prepareExtendedRandom(int length) {
byte[] randomBytes = new byte[length];
new Random().nextBytes(randomBytes);
return randomBytes;
}
}
| 1,207 |
711 | <reponame>TheVinhLuong102/AutoML-SMAC3<gh_stars>100-1000
import logging
import typing
import numpy as np
from smac.intensification.abstract_racer import AbstractRacer, RunInfoIntent
from smac.intensification.successive_halving import _SuccessiveHalving
from smac.optimizer.epm_configuration_chooser import EPMChooser
from smac.intensification.parallel_scheduling import ParallelScheduler
from smac.stats.stats import Stats
from smac.configspace import Configuration
from smac.runhistory.runhistory import RunHistory
from smac.runhistory.runhistory import RunValue, RunInfo, StatusType # noqa: F401
from smac.utils.io.traj_logging import TrajLogger
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, ML4AAD"
__license__ = "3-clause BSD"
class _Hyperband(_SuccessiveHalving):
""" Races multiple challengers against an incumbent using Hyperband method
This class contains the logic to implement:
"BOHB: Robust and Efficient Hyperparameter Optimization at Scale" (Falkner et al. 2018)
Objects from this class are meant to run on a single worker. `Hyperband` method,
creates multiple _Hyperband instances to allow parallelism, and for this reason
`Hyperband` should be considered the user interface whereas `_Hyperband` a private
class with the actual implementation of the method.
Parameters
----------
stats: smac.stats.stats.Stats
stats object
traj_logger: smac.utils.io.traj_logging.TrajLogger
TrajLogger object to log all new incumbents
rng : np.random.RandomState
instances : typing.List[str]
list of all instance ids
instance_specifics : typing.Mapping[str,np.ndarray]
mapping from instance name to instance specific string
cutoff : typing.Optional[int]
runtime cutoff of TA runs
deterministic : bool
whether the TA is deterministic or not
initial_budget : typing.Optional[float]
minimum budget allowed for 1 run of successive halving
max_budget : typing.Optional[float]
maximum budget allowed for 1 run of successive halving
eta : float
'halving' factor after each iteration in a successive halving run. Defaults to 3
run_obj_time : bool
whether the run objective is runtime or not (if true, apply adaptive capping)
n_seeds : typing.Optional[int]
Number of seeds to use, if TA is not deterministic. Defaults to None, i.e., seed is set as 0
instance_order : typing.Optional[str]
how to order instances. Can be set to: [None, shuffle_once, shuffle]
* None - use as is given by the user
* shuffle_once - shuffle once and use across all SH run (default)
* shuffle - shuffle before every SH run
adaptive_capping_slackfactor : float
slack factor of adpative capping (factor * adpative cutoff)
min_chall: int
minimal number of challengers to be considered (even if time_bound is exhausted earlier). This class will
raise an exception if a value larger than 1 is passed.
incumbent_selection: str
How to select incumbent in successive halving. Only active for real-valued budgets.
Can be set to: [highest_executed_budget, highest_budget, any_budget]
* highest_executed_budget - incumbent is the best in the highest budget run so far (default)
* highest_budget - incumbent is selected only based on the highest budget
* any_budget - incumbent is the best on any budget i.e., best performance regardless of budget
identifier: int
Allows to identify the _Hyperband instance in case of multiple ones
"""
def __init__(self,
stats: Stats,
traj_logger: TrajLogger,
rng: np.random.RandomState,
instances: typing.List[str],
instance_specifics: typing.Mapping[str, np.ndarray] = None,
cutoff: typing.Optional[float] = None,
deterministic: bool = False,
initial_budget: typing.Optional[float] = None,
max_budget: typing.Optional[float] = None,
eta: float = 3,
run_obj_time: bool = True,
n_seeds: typing.Optional[int] = None,
instance_order: str = 'shuffle_once',
adaptive_capping_slackfactor: float = 1.2,
min_chall: int = 1,
incumbent_selection: str = 'highest_executed_budget',
identifier: int = 0,
) -> None:
super().__init__(stats=stats,
traj_logger=traj_logger,
rng=rng,
instances=instances,
instance_specifics=instance_specifics,
cutoff=cutoff,
deterministic=deterministic,
initial_budget=initial_budget,
max_budget=max_budget,
eta=eta,
num_initial_challengers=None, # initial challengers passed as None
run_obj_time=run_obj_time,
n_seeds=n_seeds,
instance_order=instance_order,
adaptive_capping_slackfactor=adaptive_capping_slackfactor,
min_chall=min_chall,
incumbent_selection=incumbent_selection,)
self.identifier = identifier
self.logger = logging.getLogger(
self.__module__ + "." + str(self.identifier) + "." + self.__class__.__name__)
# to track completed hyperband iterations
self.hb_iters = 0
self.sh_intensifier = None # type: _SuccessiveHalving # type: ignore[assignment]
def process_results(self,
run_info: RunInfo,
incumbent: typing.Optional[Configuration],
run_history: RunHistory,
time_bound: float,
result: RunValue,
log_traj: bool = True,
) -> \
typing.Tuple[Configuration, float]:
"""
The intensifier stage will be updated based on the results/status
of a configuration execution.
Also, a incumbent will be determined.
Parameters
----------
run_info : RunInfo
A RunInfo containing the configuration that was evaluated
incumbent : typing.Optional[Configuration]
Best configuration seen so far
run_history : RunHistory
stores all runs we ran so far
if False, an evaluated configuration will not be generated again
time_bound : float
time in [sec] available to perform intensify
result: RunValue
Contain the result (status and other methadata) of exercising
a challenger/incumbent.
log_traj: bool
Whether to log changes of incumbents in trajectory
Returns
-------
incumbent: Configuration
current (maybe new) incumbent configuration
inc_perf: float
empirical performance of incumbent configuration
"""
# run 1 iteration of successive halving
incumbent, inc_perf = self.sh_intensifier.process_results(run_info=run_info,
incumbent=incumbent,
run_history=run_history,
time_bound=time_bound,
result=result,
log_traj=log_traj)
self.num_run += 1
# reset if SH iteration is over, else update for next iteration
if self.sh_intensifier.iteration_done:
self._update_stage()
return incumbent, inc_perf
def get_next_run(self,
challengers: typing.Optional[typing.List[Configuration]],
incumbent: Configuration,
chooser: typing.Optional[EPMChooser],
run_history: RunHistory,
repeat_configs: bool = True,
num_workers: int = 1,
) -> typing.Tuple[RunInfoIntent, RunInfo]:
"""
Selects which challenger to use based on the iteration stage and set the iteration parameters.
First iteration will choose configurations from the ``chooser`` or input challengers,
while the later iterations pick top configurations from the previously selected challengers in that iteration
If no new run is available, the method returns a configuration of None.
Parameters
----------
challengers : typing.List[Configuration]
promising configurations
incumbent: Configuration
incumbent configuration
chooser : smac.optimizer.epm_configuration_chooser.EPMChooser
optimizer that generates next configurations to use for racing
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
repeat_configs : bool
if False, an evaluated configuration will not be generated again
num_workers: int
the maximum number of workers available
at a given time.
Returns
-------
intent: RunInfoIntent
Indicator of how to consume the RunInfo object
run_info: RunInfo
An object that encapsulates necessary information for a config run
"""
if num_workers > 1:
raise ValueError("HyperBand does not support more than 1 worker, yet "
"the argument num_workers to get_next_run is {}".format(
num_workers
))
if not hasattr(self, 's'):
# initialize tracking variables
self._update_stage()
# sampling from next challenger marks the beginning of a new iteration
self.iteration_done = False
intent, run_info = self.sh_intensifier.get_next_run(
challengers=challengers,
incumbent=incumbent,
chooser=chooser,
run_history=run_history,
repeat_configs=self.sh_intensifier.repeat_configs
)
# For testing purposes, this attribute highlights whether a
# new challenger is proposed or not. Not required from a functional
# perspective
self.new_challenger = self.sh_intensifier.new_challenger
return intent, run_info
def _update_stage(self, run_history: RunHistory = None) -> None:
"""
Update tracking information for a new stage/iteration and update statistics.
This method is called to initialize stage variables and after all configurations
of a successive halving stage are completed.
Parameters
----------
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
"""
if not hasattr(self, 's'):
# setting initial running budget for future iterations (s & s_max from Algorithm 1)
self.s_max = int(np.floor(np.log(self.max_budget / self.initial_budget) / np.log(self.eta)))
self.s = self.s_max
elif self.s == 0:
# reset if HB iteration is over
self.s = self.s_max
self.hb_iters += 1
self.iteration_done = True
self.num_run = 0
else:
# update for next iteration
self.s -= 1
# compute min budget for new SH run
sh_initial_budget = self.eta ** -self.s * self.max_budget
# sample challengers for next iteration (based on HpBandster package)
n_challengers = int(np.floor((self.s_max + 1) / (self.s + 1)) * self.eta ** self.s)
# Compute this for the next round
n_configs_in_stage = n_challengers * np.power(self.eta, -np.linspace(0, self.s, self.s + 1))
n_configs_in_stage = np.array(np.round(n_configs_in_stage), dtype=int).tolist()
self.logger.info('Hyperband iteration-step: %d-%d with initial budget: %d' % (
self.hb_iters + 1, self.s_max - self.s + 1, sh_initial_budget))
# creating a new Successive Halving intensifier with the current running budget
self.sh_intensifier = _SuccessiveHalving(
stats=self.stats,
traj_logger=self.traj_logger,
rng=self.rs,
instances=self.instances,
instance_specifics=self.instance_specifics,
cutoff=self.cutoff,
deterministic=self.deterministic,
initial_budget=sh_initial_budget,
max_budget=self.max_budget,
eta=self.eta,
_all_budgets=self.all_budgets[(-self.s - 1):],
_n_configs_in_stage=n_configs_in_stage,
num_initial_challengers=n_challengers,
run_obj_time=self.run_obj_time,
n_seeds=self.n_seeds,
instance_order=self.instance_order,
adaptive_capping_slackfactor=self.adaptive_capping_slackfactor,
inst_seed_pairs=self.inst_seed_pairs, # additional argument to avoid
identifier=self.identifier,
) # processing instances & seeds again
class Hyperband(ParallelScheduler):
""" Races multiple challengers against an incumbent using Hyperband method
Implementation from "BOHB: Robust and Efficient Hyperparameter Optimization at Scale" (Falkner et al. 2018)
Hyperband is an extension of the Successive Halving intensifier. Please refer to `SuccessiveHalving` documentation
for more detailed information about the different types of budgets possible and the way instances are handled.
Internally, this class uses the _Hyperband private class which actually implements the hyperband logic.
To allow for parallelism, Hyperband can create multiple _Hyperband instances, based on the number
of idle workers available.
Parameters
----------
stats: smac.stats.stats.Stats
stats object
traj_logger: smac.utils.io.traj_logging.TrajLogger
TrajLogger object to log all new incumbents
rng : np.random.RandomState
instances : typing.List[str]
list of all instance ids
instance_specifics : typing.Mapping[str,np.ndarray]
mapping from instance name to instance specific string
cutoff : typing.Optional[int]
runtime cutoff of TA runs
deterministic : bool
whether the TA is deterministic or not
initial_budget : typing.Optional[float]
minimum budget allowed for 1 run of successive halving
max_budget : typing.Optional[float]
maximum budget allowed for 1 run of successive halving
eta : float
'halving' factor after each iteration in a successive halving run. Defaults to 3
run_obj_time : bool
whether the run objective is runtime or not (if true, apply adaptive capping)
n_seeds : typing.Optional[int]
Number of seeds to use, if TA is not deterministic. Defaults to None, i.e., seed is set as 0
instance_order : typing.Optional[str]
how to order instances. Can be set to: [None, shuffle_once, shuffle]
* None - use as is given by the user
* shuffle_once - shuffle once and use across all SH run (default)
* shuffle - shuffle before every SH run
adaptive_capping_slackfactor : float
slack factor of adpative capping (factor * adpative cutoff)
min_chall: int
minimal number of challengers to be considered (even if time_bound is exhausted earlier). This class will
raise an exception if a value larger than 1 is passed.
incumbent_selection: str
How to select incumbent in successive halving. Only active for real-valued budgets.
Can be set to: [highest_executed_budget, highest_budget, any_budget]
* highest_executed_budget - incumbent is the best in the highest budget run so far (default)
* highest_budget - incumbent is selected only based on the highest budget
* any_budget - incumbent is the best on any budget i.e., best performance regardless of budget
"""
def __init__(self,
stats: Stats,
traj_logger: TrajLogger,
rng: np.random.RandomState,
instances: typing.List[str],
instance_specifics: typing.Mapping[str, np.ndarray] = None,
cutoff: typing.Optional[float] = None,
deterministic: bool = False,
initial_budget: typing.Optional[float] = None,
max_budget: typing.Optional[float] = None,
eta: float = 3,
run_obj_time: bool = True,
n_seeds: typing.Optional[int] = None,
instance_order: str = 'shuffle_once',
adaptive_capping_slackfactor: float = 1.2,
min_chall: int = 1,
incumbent_selection: str = 'highest_executed_budget',
) -> None:
super().__init__(stats=stats,
traj_logger=traj_logger,
rng=rng,
instances=instances,
instance_specifics=instance_specifics,
cutoff=cutoff,
deterministic=deterministic,
run_obj_time=run_obj_time,
adaptive_capping_slackfactor=adaptive_capping_slackfactor,
min_chall=min_chall)
self.logger = logging.getLogger(
self.__module__ + "." + self.__class__.__name__)
# Parameters for a new hyperband
self.n_seeds = n_seeds
self.instance_order = instance_order
self.incumbent_selection = incumbent_selection
self._instances = instances
self._instance_specifics = instance_specifics
self.initial_budget = initial_budget
self.max_budget = max_budget
self.eta = eta
def _get_intensifier_ranking(self, intensifier: AbstractRacer
) -> typing.Tuple[int, int]:
"""
Given a intensifier, returns how advance it is.
This metric will be used to determine what priority to
assign to the intensifier
Parameters
----------
intensifier: AbstractRacer
Intensifier to rank based on run progress
Returns
-------
ranking: int
the higher this number, the faster the intensifier will get
the running resources. For hyperband we can use the
sh_intensifier stage, for example
tie_breaker: int
The configurations that have been launched to break ties. For
example, in the case of Successive Halving it can be the number
of configurations launched
"""
# For mypy -- we expect to work with _Hyperband instances
assert isinstance(intensifier, _Hyperband)
# For hyperband, we use the internal successive halving as a criteria
# to see how advanced this intensifier is
stage = 0
if hasattr(intensifier.sh_intensifier, 'stage'):
# Newly created SuccessiveHalving objects have no stage
stage = intensifier.sh_intensifier.stage
return stage, len(intensifier.sh_intensifier.run_tracker)
def _add_new_instance(self, num_workers: int) -> bool:
"""
Decides if it is possible to add a new intensifier instance,
and adds it.
If a new intensifier instance is added, True is returned, else False.
Parameters
-----------
num_workers: int
the maximum number of workers available
at a given time.
Returns
-------
Whether or not a new instance was added
"""
if len(self.intensifier_instances) >= num_workers:
return False
self.intensifier_instances[len(self.intensifier_instances)] = _Hyperband(
stats=self.stats,
traj_logger=self.traj_logger,
rng=self.rs,
instances=self._instances,
instance_specifics=self._instance_specifics,
cutoff=self.cutoff,
deterministic=self.deterministic,
initial_budget=self.initial_budget,
max_budget=self.max_budget,
eta=self.eta,
run_obj_time=self.run_obj_time,
n_seeds=self.n_seeds,
instance_order=self.instance_order,
adaptive_capping_slackfactor=self.adaptive_capping_slackfactor,
min_chall=self.min_chall,
incumbent_selection=self.incumbent_selection,
identifier=len(self.intensifier_instances),
)
return True
| 9,099 |
487 | <reponame>LS11111/video
import matplotlib.pyplot as plt
import numpy as np
def plot_trajectory(transforms, trajectory, smoothed_trajectory):
"""Plot video trajectory
Create a plot of the video's trajectory & smoothed trajectory.
Separate subplots are used to show the x and y trajectory.
:param transforms: VidStab transforms attribute
:param trajectory: VidStab trajectory attribute
:param smoothed_trajectory: VidStab smoothed_trajectory attribute
:return: tuple of matplotlib objects ``(Figure, (AxesSubplot, AxesSubplot))``
>>> from vidstab import VidStab
>>> import matplotlib.pyplot as plt
>>> stabilizer = VidStab()
>>> stabilizer.gen_transforms(input_path='input_video.mov')
>>> stabilizer.plot_trajectory()
>>> plt.show()
"""
if transforms is None:
raise AttributeError('No trajectory to plot. '
'Use methods: gen_transforms or stabilize to generate the trajectory attributes')
with plt.style.context('ggplot'):
fig, (ax1, ax2) = plt.subplots(2, sharex='all')
# x trajectory
ax1.plot(trajectory[:, 0], label='Trajectory')
ax1.plot(smoothed_trajectory[:, 0], label='Smoothed Trajectory')
ax1.set_ylabel('dx')
# y trajectory
ax2.plot(trajectory[:, 1], label='Trajectory')
ax2.plot(smoothed_trajectory[:, 1], label='Smoothed Trajectory')
ax2.set_ylabel('dy')
handles, labels = ax2.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plt.xlabel('Frame Number')
fig.suptitle('Video Trajectory', x=0.15, y=0.96, ha='left')
fig.canvas.manager.set_window_title('Trajectory')
return fig, (ax1, ax2)
def plot_transforms(transforms, radians=False):
"""Plot stabilizing transforms
Create a plot of the transforms used to stabilize the input video.
Plots x & y transforms (dx & dy) in a separate subplot than angle transforms (da).
:param transforms: VidStab transforms attribute
:param radians: Should angle transforms be plotted in radians? If ``false``, transforms are plotted in degrees.
:return: tuple of matplotlib objects ``(Figure, (AxesSubplot, AxesSubplot))``
>>> from vidstab import VidStab
>>> import matplotlib.pyplot as plt
>>> stabilizer = VidStab()
>>> stabilizer.gen_transforms(input_path='input_video.mov')
>>> stabilizer.plot_transforms()
>>> plt.show()
"""
if transforms is None:
raise AttributeError('No transforms to plot. '
'Use methods: gen_transforms or stabilize to generate the transforms attribute')
with plt.style.context('ggplot'):
fig, (ax1, ax2) = plt.subplots(2, sharex='all')
ax1.plot(transforms[:, 0], label='delta x', color='C0')
ax1.plot(transforms[:, 1], label='delta y', color='C1')
ax1.set_ylabel('Delta Pixels', fontsize=10)
if radians:
ax2.plot(transforms[:, 2], label='delta angle', color='C2')
ax2.set_ylabel('Delta Radians', fontsize=10)
else:
ax2.plot(np.rad2deg(transforms[:, 2]), label='delta angle', color='C2')
ax2.set_ylabel('Delta Degrees', fontsize=10)
handles1, labels1 = ax1.get_legend_handles_labels()
handles2, labels2 = ax2.get_legend_handles_labels()
fig.legend(handles1 + handles2,
labels1 + labels2,
loc='upper right',
ncol=1)
plt.xlabel('Frame Number')
fig.suptitle('Transformations for Stabilizing', x=0.15, y=0.96, ha='left')
fig.canvas.manager.set_window_title('Transforms')
return fig, (ax1, ax2)
| 1,540 |
5,168 | /**
* \file dnn/src/fallback/convolution/run_conv.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/fallback/convolution/run_conv.h"
#include "midout.h"
#include "src/common/utils.h"
MIDOUT_DECL(megdnn_fallback_conv)
namespace {
bool can_run_xcorr_single_channel_templated(
size_t /* IH */, size_t /* IW */, size_t FH, size_t FW, size_t /* OH */,
size_t /* OW */, size_t /* PH */, size_t /* PW */, size_t /* SH */,
size_t /* SW */) {
return FH == FW && FH >= 1 && FH <= 7;
}
template <int ker_size>
void run_xcorr_single_channel_templated_impl(
const float* __restrict src, const float* __restrict filter,
float* __restrict dst, size_t IH, size_t IW, size_t OH, size_t OW, size_t PH,
size_t PW, size_t SH, size_t SW, bool add_mode) {
#define divup(x, y) (((x) + (y)-1) / (y))
#define clear(oh, ow) \
if (!add_mode) { \
dst[(oh)*OW + (ow)] = 0; \
}
#define update(oh, ow, fh, fw) \
dst[(oh)*OW + (ow)] += filter[(fh)*ker_size + (fw)] * \
src[((oh)*SH + (fh)-PH) * IW + ((ow)*SW + (fw)-PW)]
// OH = (IH-ker_size)/stride+1
// OW = (IW-ker_size)/stride+1
// good region:
// oh*stride-anchor >= 0
// oh*stride-anchor+ker_size <= IH
// oh >= anchor/stride
// oh <= (IH+anchor-ker_size)/stride
size_t oh_start = divup(PH, SH);
size_t oh_end = IH + PH >= ker_size ? (IH + PH - ker_size) / SH + 1 : 0;
size_t ow_start = divup(PW, SW);
size_t ow_end = IW + PW >= ker_size ? (IW + PW - ker_size) / SW + 1 : 0;
if (oh_start > oh_end)
oh_start = oh_end = 0;
if (ow_start > ow_end)
ow_start = ow_end = 0;
for (size_t oh = 0; oh < oh_start; ++oh)
for (size_t ow = 0; ow < OW; ++ow) {
clear(oh, ow);
int ih = oh * SH - PH;
int iw = ow * SW - PW;
for (int fh = 0; fh < ker_size; ++fh)
if (ih + fh >= 0 && ih + fh < (int)IH)
for (int fw = 0; fw < ker_size; ++fw)
if (iw + fw >= 0 && iw + fw < (int)IW) {
update(oh, ow, fh, fw);
}
}
for (size_t oh = oh_start; oh < oh_end; ++oh) {
for (size_t ow = 0; ow < ow_start; ++ow) {
clear(oh, ow);
int iw = ow * SW - PW;
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
if (iw + fw >= 0 && iw + fw < (int)IW)
update(oh, ow, fh, fw);
}
}
for (size_t ow = ow_start; ow < ow_end; ++ow) {
clear(oh, ow);
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
update(oh, ow, fh, fw);
}
}
for (size_t ow = ow_end; ow < OW; ++ow) {
clear(oh, ow);
int iw = ow * SW - PW;
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
if (iw + fw >= 0 && iw + fw < (int)IW)
update(oh, ow, fh, fw);
}
}
}
for (size_t oh = oh_end; oh < OH; ++oh) {
for (size_t ow = 0; ow < OW; ++ow) {
clear(oh, ow);
int ih = oh * SH - PH;
int iw = ow * SW - PW;
for (int fh = 0; fh < ker_size; ++fh)
if (ih + fh >= 0 && ih + fh < (int)IH)
for (int fw = 0; fw < ker_size; ++fw)
if (iw + fw >= 0 && iw + fw < (int)IW) {
update(oh, ow, fh, fw);
}
}
}
#undef divup
#undef clear
#undef update
}
void run_xcorr_single_channel_templated(
const float* src, const float* filter, float* dst, size_t IH, size_t IW,
size_t FH, size_t FW, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW, bool add_mode) {
(void)FW;
#define DISPATCH(ker_size) \
if (FH == ker_size) { \
MIDOUT_BEGIN(megdnn_fallback_conv, ker_size) { \
run_xcorr_single_channel_templated_impl<ker_size>( \
src, filter, dst, IH, IW, OH, OW, PH, PW, SH, SW, add_mode); \
} \
MIDOUT_END(); \
return; \
}
DISPATCH(1)
DISPATCH(2)
DISPATCH(3)
DISPATCH(4)
DISPATCH(5)
DISPATCH(6)
DISPATCH(7)
#undef DISPATCH
megdnn_throw("internal error in conv template dispatching: impossible");
}
void run_xcorr_single_channel_nontemplated(
const float* src, const float* filter, float* dst, size_t IH, size_t IW,
size_t FH_, size_t FW_, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW, bool add_mode) {
#define divup(x, y) (((x) + (y)-1) / (y))
#define clear(oh, ow) \
if (!add_mode) { \
dst[(oh)*OW + (ow)] = 0; \
}
#define update(oh, ow, fh, fw) \
dst[(oh)*OW + (ow)] += filter[(fh)*FW + (fw)] * \
src[((oh)*SH + (fh)-PH) * IW + ((ow)*SW + (fw)-PW)]
// OH = (IH-ker_size)/stride+1
// OW = (IW-ker_size)/stride+1
// good region:
// oh*stride-anchor >= 0
// oh*stride-anchor+ker_size <= IH
// oh >= anchor/stride
// oh <= (IH+anchor-ker_size)/stride
int FH = FH_, FW = FW_;
size_t oh_start = divup(PH, SH);
size_t oh_end = IH + PH >= FH_ ? (IH + PH - FH) / SH + 1 : 0;
size_t ow_start = divup(PW, SW);
size_t ow_end = IW + PW >= FW_ ? (IW + PW - FW) / SW + 1 : 0;
if (oh_start > oh_end)
oh_start = oh_end = 0;
if (ow_start > ow_end)
ow_start = ow_end = 0;
for (size_t oh = 0; oh < oh_start; ++oh)
for (size_t ow = 0; ow < OW; ++ow) {
clear(oh, ow);
int ih = oh * SH - PH;
int iw = ow * SW - PW;
for (int fh = 0; fh < FH; ++fh)
if (ih + fh >= 0 && ih + fh < (int)IH)
for (int fw = 0; fw < FW; ++fw)
if (iw + fw >= 0 && iw + fw < (int)IW) {
update(oh, ow, fh, fw);
}
}
for (size_t oh = oh_start; oh < oh_end; ++oh) {
for (size_t ow = 0; ow < ow_start; ++ow) {
clear(oh, ow);
int iw = ow * SW - PW;
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
if (iw + fw >= 0 && iw + fw < (int)IW)
update(oh, ow, fh, fw);
}
}
for (size_t ow = ow_start; ow < ow_end; ++ow) {
clear(oh, ow);
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
update(oh, ow, fh, fw);
}
}
for (size_t ow = ow_end; ow < OW; ++ow) {
clear(oh, ow);
int iw = ow * SW - PW;
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
if (iw + fw >= 0 && iw + fw < (int)IW)
update(oh, ow, fh, fw);
}
}
}
for (size_t oh = oh_end; oh < OH; ++oh) {
for (size_t ow = 0; ow < OW; ++ow) {
clear(oh, ow);
int ih = oh * SH - PH;
int iw = ow * SW - PW;
for (int fh = 0; fh < FH; ++fh)
if (ih + fh >= 0 && ih + fh < (int)IH)
for (int fw = 0; fw < FW; ++fw)
if (iw + fw >= 0 && iw + fw < (int)IW) {
update(oh, ow, fh, fw);
}
}
}
#undef divup
#undef clear
#undef update
}
void run_xcorr_single_channel(
const float* src, const float* filter, float* dst, size_t IH, size_t IW,
size_t FH, size_t FW, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW, bool add_mode) {
if (can_run_xcorr_single_channel_templated(
IH, IW, FH, FW, OH, OW, PH, PW, SH, SW)) {
run_xcorr_single_channel_templated(
src, filter, dst, IH, IW, FH, FW, OH, OW, PH, PW, SH, SW, add_mode);
} else {
MIDOUT_BEGIN(megdnn_fallback_conv, void) {
run_xcorr_single_channel_nontemplated(
src, filter, dst, IH, IW, FH, FW, OH, OW, PH, PW, SH, SW, add_mode);
}
MIDOUT_END();
}
}
/*================ ConvolutionBackwardData =============*/
template <int ker_size>
void conv_backdata_single_channel_templated_impl(
const float* __restrict diff, const float* __restrict filter,
float* __restrict grad, size_t IH, size_t IW, size_t OH, size_t OW, size_t PH,
size_t PW, size_t SH, size_t SW) {
#define divup(x, y) (((x) + (y)-1) / (y))
#define update(oh, ow, fh, fw, val) \
grad[(oh + fh) * OW + (ow + fw)] += filter[(fh)*ker_size + (fw)] * val
size_t ih_start = divup(PH, SH);
size_t ih_end = OH + PH >= ker_size ? (OH + PH - ker_size) / SH + 1 : 0;
size_t iw_start = divup(PW, SW);
size_t iw_end = OW + PW >= ker_size ? (OW + PW - ker_size) / SW + 1 : 0;
if (ih_start > ih_end)
ih_start = ih_end = 0;
if (iw_start > iw_end)
iw_start = iw_end = 0;
for (size_t ih = 0; ih < ih_start; ++ih)
for (size_t iw = 0; iw < IW; ++iw) {
int oh = ih * SH - PH;
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < ker_size; ++fh)
if (oh + fh >= 0 && oh + fh < (int)OH)
for (int fw = 0; fw < ker_size; ++fw)
if (ow + fw >= 0 && ow + fw < (int)OW) {
update(oh, ow, fh, fw, val);
}
}
for (size_t ih = ih_start; ih < ih_end; ++ih) {
int oh = ih * SH - PH;
for (size_t iw = 0; iw < iw_start; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
if (ow + fw >= 0 && ow + fw < (int)OW)
update(oh, ow, fh, fw, val);
}
}
for (size_t iw = iw_start; iw < iw_end; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
update(oh, ow, fh, fw, val);
}
}
for (size_t iw = iw_end; iw < IW; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < ker_size; ++fh)
for (int fw = 0; fw < ker_size; ++fw) {
if (ow + fw >= 0 && ow + fw < (int)OW)
update(oh, ow, fh, fw, val);
}
}
}
for (size_t ih = ih_end; ih < IH; ++ih) {
for (size_t iw = 0; iw < IW; ++iw) {
int oh = ih * SH - PH;
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < ker_size; ++fh)
if (oh + fh >= 0 && oh + fh < (int)OH)
for (int fw = 0; fw < ker_size; ++fw)
if (ow + fw >= 0 && ow + fw < (int)OW) {
update(oh, ow, fh, fw, val);
}
}
}
#undef divup
#undef update
}
void conv_backdata_single_channel_templated(
const float* src, const float* filter, float* dst, size_t IH, size_t IW,
size_t FH, size_t FW, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW) {
megdnn_ignore(FW);
#define DISPATCH(ker_size) \
if (FH == ker_size) { \
MIDOUT_BEGIN(megdnn_fallback_conv, ker_size) { \
conv_backdata_single_channel_templated_impl<ker_size>( \
src, filter, dst, IH, IW, OH, OW, PH, PW, SH, SW); \
} \
MIDOUT_END(); \
return; \
}
DISPATCH(1)
DISPATCH(2)
DISPATCH(3)
DISPATCH(4)
DISPATCH(5)
DISPATCH(6)
DISPATCH(7)
#undef DISPATCH
megdnn_throw("internal error in conv_backdata template dispatching: impossible");
}
void conv_backdata_single_channel_nontemplated(
const float* diff, const float* filter, float* grad, size_t IH, size_t IW,
size_t FH_, size_t FW_, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW) {
#define divup(x, y) (((x) + (y)-1) / (y))
#define update(oh, ow, fh, fw, val) \
grad[(oh + fh) * OW + (ow + fw)] += filter[(fh)*FW + (fw)] * val
int FH = FH_, FW = FW_;
size_t ih_start = divup(PH, SH);
size_t ih_end = OH + PH >= FH_ ? (OH + PH - FH) / SH + 1 : 0;
size_t iw_start = divup(PW, SW);
size_t iw_end = OW + PW >= FW_ ? (OW + PW - FW) / SW + 1 : 0;
if (ih_start > ih_end)
ih_start = ih_end = 0;
if (iw_start > iw_end)
iw_start = iw_end = 0;
for (size_t ih = 0; ih < ih_start; ++ih)
for (size_t iw = 0; iw < IW; ++iw) {
int oh = ih * SH - PH;
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < FH; ++fh)
if (oh + fh >= 0 && oh + fh < (int)OH)
for (int fw = 0; fw < FW; ++fw)
if (ow + fw >= 0 && ow + fw < (int)OW) {
update(oh, ow, fh, fw, val);
}
}
for (size_t ih = ih_start; ih < ih_end; ++ih) {
int oh = ih * SH - PH;
for (size_t iw = 0; iw < iw_start; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
if (ow + fw >= 0 && ow + fw < (int)OW)
update(oh, ow, fh, fw, val);
}
}
for (size_t iw = iw_start; iw < iw_end; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
update(oh, ow, fh, fw, val);
}
}
for (size_t iw = iw_end; iw < IW; ++iw) {
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < FH; ++fh)
for (int fw = 0; fw < FW; ++fw) {
if (ow + fw >= 0 && ow + fw < (int)OW)
update(oh, ow, fh, fw, val);
}
}
}
for (size_t ih = ih_end; ih < IH; ++ih) {
for (size_t iw = 0; iw < IW; ++iw) {
int oh = ih * SH - PH;
int ow = iw * SW - PW;
float val = diff[ih * IW + iw];
for (int fh = 0; fh < FH; ++fh)
if (oh + fh >= 0 && oh + fh < (int)OH)
for (int fw = 0; fw < FW; ++fw)
if (ow + fw >= 0 && ow + fw < (int)OW) {
update(oh, ow, fh, fw, val);
}
}
}
#undef divup
#undef update
}
void conv_backdata_single_channel(
const float* diff, const float* filter, float* grad, size_t IH, size_t IW,
size_t FH, size_t FW, size_t OH, size_t OW, size_t PH, size_t PW, size_t SH,
size_t SW) {
if (can_run_xcorr_single_channel_templated(
IH, IW, FH, FW, OH, OW, PH, PW, SH, SW)) {
conv_backdata_single_channel_templated(
diff, filter, grad, IH, IW, FH, FW, OH, OW, PH, PW, SH, SW);
} else {
MIDOUT_BEGIN(megdnn_fallback_conv, void) {
conv_backdata_single_channel_nontemplated(
diff, filter, grad, IH, IW, FH, FW, OH, OW, PH, PW, SH, SW);
}
MIDOUT_END();
}
}
} // anonymous namespace
namespace megdnn {
namespace fallback {
namespace convolution {
void run_conv(
const float* src, const float* filter, float* dst, void* workspace, size_t IH,
size_t IW, size_t IC, size_t FH, size_t FW, size_t OH, size_t OW, size_t OC,
size_t PH, size_t PW, size_t SH, size_t SW, bool xcorr) {
for (size_t oc = 0; oc < OC; ++oc)
for (size_t ic = 0; ic < IC; ++ic) {
// ut for untransposed
const float* fut = filter + oc * IC * FH * FW + ic * FH * FW;
const float* f;
if (!xcorr) {
// need transpose
f = (float*)workspace;
for (size_t fh = 0; fh < FH; ++fh)
for (size_t fw = 0; fw < FW; ++fw) {
((float*)f)[fh * FW + fw] =
fut[(FH - fh - 1) * FW + (FW - fw - 1)];
}
} else {
// do not need transpose
f = fut;
}
run_xcorr_single_channel(
src + ic * IH * IW, f, dst + oc * OH * OW, IH, IW, FH, FW, OH, OW,
PH, PW, SH, SW, ic > 0);
}
}
void run_conv_backward_data(
const float* diff, const float* filter, float* grad, void* workspace, size_t IH,
size_t IW, size_t IC, size_t FH, size_t FW, size_t OH, size_t OW, size_t OC,
size_t PH, size_t PW, size_t SH, size_t SW, bool xcorr) {
std::memset(grad, 0, sizeof(float) * IC * OH * OW);
for (size_t oc = 0; oc < OC; ++oc)
for (size_t ic = 0; ic < IC; ++ic) {
// ut for untransposed
const float* fut = filter + oc * IC * FH * FW + ic * FH * FW;
const float* f;
if (!xcorr) {
// need transpose
f = (float*)workspace;
for (size_t fh = 0; fh < FH; ++fh)
for (size_t fw = 0; fw < FW; ++fw) {
((float*)f)[fh * FW + fw] =
fut[(FH - fh - 1) * FW + (FW - fw - 1)];
}
} else {
// do not need transpose
f = fut;
}
conv_backdata_single_channel(
diff + oc * IH * IW, f, grad + ic * OH * OW, IH, IW, FH, FW, OH, OW,
PH, PW, SH, SW);
}
}
} // namespace convolution
} // namespace fallback
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 11,597 |
316 | package com.salesforce.dva.argus.service.auth;
import com.salesforce.dva.argus.TestUtils;
import com.salesforce.dva.argus.entity.PrincipalUser;
import com.salesforce.dva.argus.service.MonitorService;
import com.salesforce.dva.argus.service.UserService;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NoAuthTest {
@Test
public void testNoAuthDoesNotThrow() {
String userName = "user";
PrincipalUser principalUser1 = new PrincipalUser(null, userName, userName);
UserService mockUserService = mock(UserService.class);
when(mockUserService.findUserByUsername(any())).thenReturn(principalUser1);
when(mockUserService.updateUser(any())).thenReturn(principalUser1);
MonitorService mockMonitorService = mock(MonitorService.class);
NoAuthService authService = new NoAuthService(TestUtils.getConfiguration(), mockUserService, mockMonitorService);
PrincipalUser principalUser2 = authService.getUser(userName, userName);
assertEquals(principalUser2.getUserName(), userName);
}
}
| 382 |
4,756 | // Copyright 2020 The MACE Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mace/runtimes/opencl/qc_ion/opencl_image_qc_ion_allocator.h"
#include <memory>
#include "mace/core/runtime_failure_mock.h"
#include "mace/runtimes/opencl/core/opencl_executor.h"
#include "mace/runtimes/opencl/core/opencl_util.h"
#include "mace/runtimes/opencl/opencl_image_allocator.h"
#include "mace/utils/logging.h"
namespace mace {
OpenclImageQcIonAllocator::OpenclImageQcIonAllocator(
OpenclExecutor *opencl_executor, std::shared_ptr<Rpcmem> rpcmem)
: OpenclBaseQcIonAllocator(opencl_executor, rpcmem) {}
MemoryType OpenclImageQcIonAllocator::GetMemType() {
return MemoryType::GPU_IMAGE;
}
MaceStatus OpenclImageQcIonAllocator::New(const MemInfo &info, void **result) {
MACE_CHECK(info.mem_type == MemoryType::GPU_IMAGE);
MACE_LATENCY_LOGGER(1, "Allocate OpenCL ION image: ",
info.dims[0], ", ", info.dims[1]);
if (ShouldMockRuntimeFailure()) {
return MaceStatus::MACE_OUT_OF_RESOURCES;
}
cl::ImageFormat img_format(
CL_RGBA, OpenCLUtil::DataTypeToCLChannelType(info.data_type));
cl_int error = CL_SUCCESS;
cl_mem_ion_host_ptr ion_host;
size_t pitch;
CreateQualcommImageIONHostPtr(info.dims, img_format, &pitch, &ion_host);
cl::Image2D *cl_image = new cl::Image2D(
opencl_executor_->context(),
CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_EXT_HOST_PTR_QCOM,
img_format,
info.dims[0],
info.dims[1],
pitch,
&ion_host,
&error);
if (error != CL_SUCCESS) {
LOG(WARNING) << "Allocate OpenCL image with shape: ["
<< info.dims[0] << ", " << info.dims[1]
<< "] failed because of "
<< OpenCLErrorToString(error);
// Many users have doubts at CL_INVALID_IMAGE_SIZE, add some tips.
if (error == CL_INVALID_IMAGE_SIZE) {
auto max_2d_size = opencl_executor_->GetMaxImage2DSize();
LOG(WARNING) << "The allowable OpenCL image size is: "
<< max_2d_size[0] << "x" << max_2d_size[1];
}
delete cl_image;
*result = nullptr;
return MaceStatus::MACE_OUT_OF_RESOURCES;
} else {
cl_to_host_map_[static_cast<void *>(cl_image)] = ion_host.ion_hostptr;
*result = cl_image;
return MaceStatus::MACE_SUCCESS;
}
}
void OpenclImageQcIonAllocator::Delete(void *image) {
MACE_LATENCY_LOGGER(1, "Free OpenCL image");
if (image != nullptr) {
cl::Image2D *cl_image = static_cast<cl::Image2D *>(image);
delete cl_image;
auto it = cl_to_host_map_.find(image);
MACE_CHECK(it != cl_to_host_map_.end(), "OpenCL image not found!");
rpcmem_->Delete(it->second);
cl_to_host_map_.erase(image);
}
}
void OpenclImageQcIonAllocator::CreateQualcommImageIONHostPtr(
const std::vector<index_t> &shape,
const cl::ImageFormat &format,
size_t *pitch,
cl_mem_ion_host_ptr *ion_host) {
cl_int error = clGetDeviceImageInfoQCOM(
opencl_executor_->device().get(), shape[0], shape[1], &format,
CL_IMAGE_ROW_PITCH, sizeof(*pitch), pitch, nullptr);
MACE_CHECK(error == CL_SUCCESS, "clGetDeviceImageInfoQCOM failed, error: ",
OpenCLErrorToString(error));
CreateQualcommBufferIONHostPtr(cpu_ion_allocator_.get(),
*pitch * shape[1], ion_host);
}
} // namespace mace
| 1,633 |
1,374 | <gh_stars>1000+
package source.structural.defaultsOverloaded;
import java.util.List;
import java.util.Map;
public interface DuplicateMethod extends DuplicateMethodInterface {
@Override
DuplicateMethod getNombrePleinSignes() ;
@Override
default DuplicateMethod getNombrePleinSignes(List<?> unNombrePleinSignes) {
// TODO Auto-generated method stub
return null;
}
@Override
default DuplicateMethod getNombrePleinSignes(Map<String, ?> unNombrePleinSignes) {
// TODO Auto-generated method stub
return null;
}
} | 178 |
3,579 | <reponame>coder-hugo/querydsl
/*
* Copyright 2015, The Querydsl Team (http://www.querydsl.com/team)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.querydsl.core.util;
import javax.lang.model.SourceVersion;
/**
* JavaSyntaxUtils defines reserved keywords for Java
*
* @author tiwe
* @deprecated use the built-in {@link SourceVersion#isKeyword(CharSequence) keyword list} instead.
*
*/
@Deprecated
public final class JavaSyntaxUtils {
private JavaSyntaxUtils() { }
public static boolean isReserved(String str) {
return SourceVersion.isKeyword(str);
}
}
| 327 |
348 | <gh_stars>100-1000
{"nom":"Aubenas","circ":"3ème circonscription","dpt":"Ardèche","inscrits":8224,"abs":4906,"votants":3318,"blancs":236,"nuls":120,"exp":2962,"res":[{"nuance":"LR","nom":"<NAME>","voix":1682},{"nuance":"REM","nom":"M. <NAME>","voix":1280}]} | 107 |
348 | {"nom":"Magny-lès-Aubigny","circ":"5ème circonscription","dpt":"Côte-d'Or","inscrits":167,"abs":91,"votants":76,"blancs":8,"nuls":1,"exp":67,"res":[{"nuance":"LR","nom":"<NAME>","voix":37},{"nuance":"REM","nom":"<NAME>","voix":30}]} | 101 |
415 | <gh_stars>100-1000
package io.joern.fuzzyc2cpg.ast.logical.statements;
import io.joern.fuzzyc2cpg.ast.walking.ASTNodeVisitor;
public class JumpStatement extends Statement {
@Override
public void accept(ASTNodeVisitor visitor) {
visitor.visit(this);
}
}
| 100 |
646 | #!/usr/bin/env python
import os
import sys
import json
import shutil
import subprocess
# Get project settings
projectData = open("project.json")
projectConfig = json.load(projectData)
buildCultures = projectConfig["cultures"]
buildConfiguration = projectConfig["configuration"]
projectData.close()
# Get system settings
systemData = open("system.json")
systemConfig = json.load(systemData)
buildPlatforms = systemConfig["platforms"]
inputDir = systemConfig["inputDir"]
outputDir = systemConfig["outputDir"]
engineDir = systemConfig["engineDir"]
engineInstalled = systemConfig["engineInstalled"]
systemData.close()
# Platform-dependent names
if sys.platform.startswith('linux'):
scriptExt = ".sh"
engineExecutable = "UE4Editor"
noCompileEditorOption = " -LinuxNoEditor"
else:
scriptExt = ".bat"
engineExecutable = "UE4Editor-Cmd.exe"
noCompileEditorOption = " -nocompileeditor"
# Installed vs built engine
if engineInstalled:
installedOption = " -installed"
cleanOption = " -clean"
else:
installedOption = ""
cleanOption = ""
# Generate paths
inputProject = os.path.join(inputDir, "HeliumRain.uproject")
buildTool = os.path.join(engineDir, "Engine", "Build", "BatchFiles", "RunUAT" + scriptExt)
# Generate version tag
gitCommand = ['git', 'describe']
buildVersion = subprocess.check_output(gitCommand).decode("utf-8")
buildVersion = buildVersion.replace("\n", "");
# Build each platform
for platform in buildPlatforms:
# Generate command line
commandLine = buildTool
commandLine += " BuildCookRun -project=" + inputProject + " -nocompile" + noCompileEditorOption + installedOption
commandLine += " -nop4 -clientconfig=" + buildConfiguration
commandLine += " -cook -allmaps -stage -archive -archivedirectory=" + outputDir
commandLine += " -package -ue4exe=" + engineExecutable
commandLine += " -build -targetplatform=" + platform + cleanOption
commandLine += " -pak -prereqs -distribution -createreleaseversion=" + buildVersion
commandLine += " -utf8output -CookCultures=" + buildCultures
# Call
os.system(commandLine)
# Copy Boiler files and other tools
if projectConfig["modding"]:
if platform == 'Linux':
buildOutputDir = outputDir + "/LinuxNoEditor"
shutil.copyfile("../HeliumRainLauncher", buildOutputDir + "/HeliumRainLauncher")
shutil.copyfile("../HeliumRainLauncher.sh", buildOutputDir + "/HeliumRainLauncher.sh")
shutil.copyfile("../libsteam_api.so", buildOutputDir + "/libsteam_api.so")
shutil.copyfile("../steam_appid.txt", buildOutputDir + "/steam_appid.txt")
shutil.copytree("../Icons", buildOutputDir + "/Icons")
else:
buildOutputDir = outputDir + "/WindowsNoEditor"
shutil.copyfile("../HeliumRainLauncher.exe", buildOutputDir + "/HeliumRainLauncher.exe")
shutil.copyfile("../steam_api64.dll", buildOutputDir + "/steam_api64.dll")
shutil.copyfile("../steam_appid.txt", buildOutputDir + "/steam_appid.txt")
| 918 |
30,023 | <gh_stars>1000+
"""Tests for Broadlink remotes."""
from base64 import b64decode
from unittest.mock import call
from homeassistant.components.broadlink.const import DOMAIN
from homeassistant.components.remote import (
DOMAIN as REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import STATE_OFF, STATE_ON, Platform
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import get_device
from tests.common import mock_device_registry, mock_registry
REMOTE_DEVICES = ["Entrance", "Living Room", "Office", "Garage"]
IR_PACKET = (
"JgBGAJKVETkRORA6ERQRFBEUERQRFBE5ETkQOhAVEBUQFREUEBUQ"
"OhEUERQRORE5EBURFBA6EBUQOhE5EBUQFRA6EDoRFBEADQUAAA=="
)
async def test_remote_setup_works(hass):
"""Test a successful setup with all remotes."""
for device in map(get_device, REMOTE_DEVICES):
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_setup = await device.setup_entry(hass)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_setup.entry.unique_id)}
)
entries = async_entries_for_device(entity_registry, device_entry.id)
remotes = [entry for entry in entries if entry.domain == Platform.REMOTE]
assert len(remotes) == 1
remote = remotes[0]
assert remote.original_name == f"{device.name} Remote"
assert hass.states.get(remote.entity_id).state == STATE_ON
assert mock_setup.api.auth.call_count == 1
async def test_remote_send_command(hass):
"""Test sending a command with all remotes."""
for device in map(get_device, REMOTE_DEVICES):
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_setup = await device.setup_entry(hass)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_setup.entry.unique_id)}
)
entries = async_entries_for_device(entity_registry, device_entry.id)
remotes = [entry for entry in entries if entry.domain == Platform.REMOTE]
assert len(remotes) == 1
remote = remotes[0]
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": remote.entity_id, "command": "b64:" + IR_PACKET},
blocking=True,
)
assert mock_setup.api.send_data.call_count == 1
assert mock_setup.api.send_data.call_args == call(b64decode(IR_PACKET))
assert mock_setup.api.auth.call_count == 1
async def test_remote_turn_off_turn_on(hass):
"""Test we do not send commands if the remotes are off."""
for device in map(get_device, REMOTE_DEVICES):
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_setup = await device.setup_entry(hass)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_setup.entry.unique_id)}
)
entries = async_entries_for_device(entity_registry, device_entry.id)
remotes = [entry for entry in entries if entry.domain == Platform.REMOTE]
assert len(remotes) == 1
remote = remotes[0]
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_OFF,
{"entity_id": remote.entity_id},
blocking=True,
)
assert hass.states.get(remote.entity_id).state == STATE_OFF
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": remote.entity_id, "command": "b64:" + IR_PACKET},
blocking=True,
)
assert mock_setup.api.send_data.call_count == 0
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_ON,
{"entity_id": remote.entity_id},
blocking=True,
)
assert hass.states.get(remote.entity_id).state == STATE_ON
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": remote.entity_id, "command": "b64:" + IR_PACKET},
blocking=True,
)
assert mock_setup.api.send_data.call_count == 1
assert mock_setup.api.send_data.call_args == call(b64decode(IR_PACKET))
assert mock_setup.api.auth.call_count == 1
| 1,978 |
490 | #ifndef HDRPLUS_MERGE_H_
#define HDRPLUS_MERGE_H_
#include "Halide.h"
/*
* merge -- fully merges aligned frames in the temporal and spatial
* dimension to produce one denoised bayer frame.
*/
Halide::Func merge(Halide::Func imgs, Halide::Expr width, Halide::Expr height, Halide::Expr frames, Halide::Func alignment);
Halide::Func merge(Halide::Buffer<uint16_t> imgs, Halide::Func alignment);
#endif
| 155 |
335 | {
"word": "Onychophoran",
"definitions": [
"A terrestrial invertebrate of the small phylum Onychophora, which comprises the velvet worms."
],
"parts-of-speech": "Noun"
} | 77 |
3,269 | // Time: O(n)
// Space: O(1)
class Solution {
public:
bool lemonadeChange(vector<int>& bills) {
static const vector<int> coins = {20, 10, 5};
unordered_map<int, int> counts;
for (const auto& bill : bills) {
++counts[bill];
auto change = bill - coins.back();
for (const auto& coin : coins) {
if (change == 0) {
break;
}
if (change >= coin) {
const auto count = min(counts[coin], change / coin);
counts[coin] -= count;
change -= coin * count;
}
}
if (change != 0) {
return false;
}
}
return true;
}
};
class Solution2 {
public:
bool lemonadeChange(vector<int>& bills) {
int five = 0, ten = 0;
for (const auto& bill : bills) {
if (bill == 5) {
++five;
} else if (bill == 10) {
if (!five) {
return false;
}
--five;
++ten;
} else {
if (five && ten) {
--five;
--ten;
} else if (five >= 3) {
five -= 3;
} else {
return false;
}
}
}
return true;
}
};
| 899 |
1,063 | # -*- coding: utf-8 -*-
import os
import torch.nn as nn
from torch.nn import Parameter
from torchvision.models.utils import load_state_dict_from_url
from typing import Dict
def ensure_dir(path: str) -> None:
"""
Check if a directory exists, if not, create a new one.
Args:
path (str): the path of the directory.
"""
if not os.path.exists(path):
os.makedirs(path)
def load_state_dict(model: nn.Module, state_dict: Dict) -> None:
"""
Load parameters regardless the shape of parameters with the same name need to match,
which is a slight modification to load_state_dict of pytorch.
Args:
model (nn.Module): the model for extracting features.
state_dict (Dict): a dict of model parameters.
"""
own_state = model.state_dict()
success_keys = list()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
success_keys.append(name)
except Exception:
print("[LoadStateDict]: shape mismatch in parameter {}, {} vs {}".format(
name, own_state[name].size(), param.size()
))
else:
print("[LoadStateDict]: " + 'unexpected key "{}" in state_dict'.format(name))
missing = set(own_state.keys()) - set(success_keys)
if len(missing) > 0:
print("[LoadStateDict]: " + "missing keys or mismatch param in state_dict: {}".format(missing))
| 682 |
1,192 | <reponame>clayne/DirectXShaderCompiler
// RUN: %clang_cc1 -mms-bitfields -fsyntax-only -verify -triple x86_64-apple-darwin9 %s
// expected-no-diagnostics
// The -mms-bitfields commandline parameter should behave the same
// as the ms_struct attribute.
struct
{
int a : 1;
short b : 1;
} t;
// MS pads out bitfields between different types.
static int arr[(sizeof(t) == 8) ? 1 : -1];
| 145 |
348 | <filename>docs/data/leg-t2/045/04505030.json
{"nom":"Beaune-la-Rolande","circ":"5ème circonscription","dpt":"Loiret","inscrits":1324,"abs":744,"votants":580,"blancs":43,"nuls":13,"exp":524,"res":[{"nuance":"LR","nom":"<NAME>","voix":322},{"nuance":"REM","nom":"<NAME>","voix":202}]} | 118 |
3,486 | <reponame>Ryan2511/orbital<filename>tools/slice-pup.py
#!/usr/bin/env python3
import argparse
import ctypes
import os
import pathlib
import struct
import sys
# Configuration
DEBUG = False
# Globals
ENC_PATH = None
DEC_PATH = None
# Structures
class struct_t(object):
def from_file(self, file):
size = struct.calcsize(self.fmt)
data = file.read(size)
self.from_data(data)
class bls_header_t(struct_t):
def __init__(self):
self.fmt = 'IIIIIIII'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.magic = fields[0]
self.version = fields[1]
self.flags = fields[2]
self.entry_count = fields[3]
self.block_count = fields[4]
def __repr__(self):
output= 'bls_header_t({\n'
output += ' magic: 0x%08X\n' % self.magic
output += ' version: 0x%08X\n' % self.version
output += ' flags: 0x%08X\n' % self.flags
output += ' entry_count: 0x%08X\n' % self.entry_count
output += ' block_count: 0x%08X\n' % self.block_count
output += '})'
return output
class bls_entry_t(struct_t):
def __init__(self):
self.fmt = 'IIII32s'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.block_offset = fields[0]
self.file_size = fields[1]
self.file_name = ctypes.create_string_buffer(fields[4]).value
self.file_name = self.file_name.decode('utf-8')
def __repr__(self):
output= 'bls_entry_t({\n'
output += ' block_offset: 0x%08X\n' % self.block_offset
output += ' file_size: 0x%08X\n' % self.file_size
output += ' file_name: %s\n' % self.file_name
output += '})'
return output
class pup_header_t(struct_t):
def __init__(self):
self.fmt = 'IIIHH'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.magic = fields[0]
self.unk_04 = fields[1]
self.unk_08 = fields[2]
self.unk_0C_size = fields[3]
self.unk_0E_size = fields[4]
def __repr__(self):
output= 'pup_header_t({\n'
output += ' magic: 0x%08X\n' % self.magic
output += ' unk_04: 0x%08X\n' % self.unk_04
output += ' unk_08: 0x%08X\n' % self.unk_08
output += ' unk_0C_size: 0x%04X\n' % self.unk_0C_size
output += ' unk_0E_size: 0x%04X\n' % self.unk_0E_size
output += '})'
return output
class pup_header_ex_t(struct_t):
def __init__(self):
self.fmt = 'QHHI'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.file_size = fields[0]
self.segment_count = fields[1]
self.unk_1A = fields[2]
self.unk_1C = fields[3]
def __repr__(self):
output= 'pup_header_ex_t({\n'
output += ' file_size: %d bytes\n' % self.file_size
output += ' seg_count: 0x%04X\n' % self.segment_count
output += ' unk_1A: 0x%04X\n' % self.unk_1A
output += ' unk_1C: 0x%08X\n' % self.unk_1C
output += '})'
return output
class pup_segment_t(struct_t):
def __init__(self):
self.fmt = 'QQQQ'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.flags = fields[0]
self.offset = fields[1]
self.compressed_size = fields[2]
self.uncompressed_size = fields[3]
def __repr__(self):
output= 'pup_segment_t({\n'
output += ' flags: 0x%08X (%s)\n' % (self.flags,
('E' if self.has_encryption else '') +
('C' if self.has_compression else '') +
('B' if self.has_blocks else '') +
('D' if self.has_digests else '') +
('X' if self.has_extents else ''))
output += ' offset: 0x%08X\n' % self.offset
output += ' compr_size: 0x%08X\n' % self.compressed_size
output += ' uncompr_size: 0x%08X\n' % self.uncompressed_size
output += '})'
return output
@property
def has_encryption(self):
return bool(self.flags & (1 << 1))
@property
def has_compression(self):
return bool(self.flags & (1 << 3))
@property
def has_blocks(self):
return bool(self.flags & (1 << 11))
@property
def has_digests(self):
return bool(self.flags & (1 << 16))
@property
def has_extents(self):
return bool(self.flags & (1 << 17))
@property
def block_size(self):
return 1 << (((self.flags & 0xF000) >> 12) + 12)
@property
def block_count(self):
return (self.block_size + self.uncompressed_size - 1) // self.block_size
class pup_block_t(struct_t):
def __init__(self):
self.fmt = 'II'
def from_data(self, data):
fields = struct.unpack(self.fmt, data)
self.offset = fields[0]
self.size = fields[1]
def __repr__(self):
output= 'pup_block_t({\n'
output += ' offset: 0x%08X\n' % self.offset
output += ' size: 0x%08X\n' % self.size
output += '})'
return output
# Helpers
def dprint(*args):
if DEBUG:
print(*args)
def decrypt(blob_name, blob_data):
with open(os.path.join(ENC_PATH, blob_name), 'wb') as f:
f.write(blob_data)
blob_size = len(blob_data)
with open(os.path.join(DEC_PATH, blob_name), 'rb') as f:
blob_data = f.read()
assert(len(blob_data) == blob_size)
return blob_data
# Slicer
def slice_bls_entry(pup, bls_entry):
# Parse PUP header
offset = bls_entry.block_offset * 0x200
pup.seek(offset)
pup_header = pup_header_t()
pup_header.from_file(pup)
dprint(pup_header)
# Get PUP header blob
blob_name = 'd%d_hdr.bin' % (bls_entry.index)
blob_size = pup_header.unk_0C_size - struct.calcsize(pup_header.fmt)
blob_data = pup.read(blob_size)
blob_data = decrypt(blob_name, blob_data)
# Parse PUP extended header
pup_header_ex = pup_header_ex_t()
pup_header_ex.from_data(blob_data[:0x10])
dprint(pup_header_ex)
# Parse PUP segments
pup_segments = []
for i in range(pup_header_ex.segment_count):
pup_segment_size = 0x20
pup_segment_offs = 0x10 + pup_segment_size * i
pup_segment_data = blob_data[pup_segment_offs: \
pup_segment_offs + pup_segment_size]
pup_segment = pup_segment_t()
pup_segment.from_data(pup_segment_data)
pup_segments.append(pup_segment)
dprint(pup_segment)
# Get PUP segment blobs
table_segments = {}
for i in range(len(pup_segments)):
pup_segment = pup_segments[i]
# Skip special PUP segments
special_flags = pup_segment.flags & 0xF0000000
if special_flags == 0xF0000000 or \
special_flags == 0xE0000000:
continue
if pup_segment.has_blocks:
# Get PUP segment blob (blocked)
count = pup_segment.block_count
table = table_segments[i]
table = table[0x20 * count:]
for j in range(count):
if pup_segment.has_compression:
pup_block = pup_block_t()
pup_block.from_data(table[(j+0)*0x8:(j+1)*0x8])
blob_offs = pup_block.offset + pup_segment.offset
blob_size = pup_block.size & ~0xF
dprint(pup_block)
else:
blob_offs = pup_segment.block_size * j
blob_size = pup_segment.block_size
blob_size = min(blob_size,
pup_segment.uncompressed_size - blob_offs)
pup.seek(blob_offs)
blob_name = 'd%d_blkseg%04d_b%04d.bin' % (bls_entry.index, i, j)
blob_data = pup.read(blob_size)
blob_data = decrypt(blob_name, blob_data)
elif pup_segment.has_digests:
# Get PUP segment blob (non-blocked table)
pup.seek(pup_segment.offset)
segment_id = (pup_segment.flags >> 20) & 0xFF
blob_name = 'd%d_blkseg%04d_i%04d.bin' % \
(bls_entry.index, segment_id, i)
blob_size = pup_segment.compressed_size
blob_data = pup.read(blob_size)
blob_data = decrypt(blob_name, blob_data)
table_segments[segment_id] = blob_data
else:
# Get PUP segment blob (non-blocked)
pup.seek(pup_segment.offset)
blob_name = 'd%d_nonblkseg%04d.bin' % (bls_entry.index, i)
blob_size = pup_segment.compressed_size & ~0xF
blob_data = pup.read(blob_size)
blob_data = decrypt(blob_name, blob_data)
def slice_pup(pup_path):
# Sanity checks
if not os.listdir(DEC_PATH):
print("Directory of decrypted blobs is empty")
return
pathlib.Path(ENC_PATH).mkdir(parents=True, exist_ok=True)
if os.listdir(ENC_PATH):
print("Directory of encrypted blobs is not empty")
return
# Parse BLS header
pup = open(pup_path, 'rb')
bls_header = bls_header_t()
bls_header.from_file(pup)
dprint(bls_header)
# Parse BLS entries
dprint('')
bls_entries = []
for i in range(bls_header.entry_count):
bls_entry = bls_entry_t()
bls_entry.from_file(pup)
bls_entry.index = i
bls_entries.append(bls_entry)
dprint('bls_entries[%d] =' % i, bls_entry)
# Slice BLS entries
for i in range(bls_header.entry_count):
dprint('')
slice_bls_entry(pup, bls_entries[i])
pup.close()
def main():
# Define arguments
parser = argparse.ArgumentParser(
description='Slice a PUP file into named encrypted blobs.')
parser.add_argument('pup',
metavar='path/to/pup', help='path to input pup',
)
parser.add_argument('dec',
metavar='path/to/dec', help='path to decrypted blobs',
)
parser.add_argument('enc', nargs='?',
metavar='path/to/enc', help='path to encrypted blobs',
)
# Parse arguments
args = parser.parse_args()
if args.enc is None:
args.enc = args.pup + '.enc'
# Set globals and perform slicing
global ENC_PATH
global DEC_PATH
DEC_PATH = args.dec
ENC_PATH = args.enc
slice_pup(args.pup)
if __name__ == '__main__':
main()
| 5,337 |
2,415 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
@resources.register('cdnprofile')
class CdnProfile(ArmResourceManager):
"""CDN Resource
:example:
Returns all CDNs with Standard_Verizon sku
.. code-block:: yaml
policies:
- name: standard-verizon
resource: azure.cdnprofile
filters:
- type: value
key: sku
op: in
value_type: normalize
value: Standard_Verizon
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Media']
service = 'azure.mgmt.cdn'
client = 'CdnManagementClient'
enum_spec = ('profiles', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'sku.name'
)
resource_type = 'Microsoft.Cdn/profiles'
| 479 |
1,383 | <gh_stars>1000+
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: <NAME>, <NAME>
// =============================================================================
#ifndef CHAPI_PARDISOMKL_H
#define CHAPI_PARDISOMKL_H
#include "chrono/ChVersion.h"
#include "chrono/core/ChPlatform.h"
// When compiling this library, remember to define CH_API_COMPILE_PARDISOMKL
// (so that the symbols with 'ChApiPardisoMKL' in front of them will be
// marked as exported). Otherwise, just do not define it if you
// link the library to your code, and the symbols will be imported.
#if defined(CH_API_COMPILE_PARDISOMKL)
#define ChApiPardisoMKL ChApiEXPORT
#else
#define ChApiPardisoMKL ChApiIMPORT
#endif
/**
@defgroup pardisomkl_module Pardiso MKL module
@brief Module for the Intel MKL library Pardiso direct solver
This module provides an interface to the Pardiso parallel direct solver in the Intel MKL library.
For additional information, see:
- the [installation guide](@ref module_mkl_installation)
*/
#endif
| 419 |
537 | <gh_stars>100-1000
#include "cpp_odbc/level2/diagnostic_record.h"
#include <gtest/gtest.h>
TEST(DiagnosticRecordTest, Members)
{
std::string const odbc_state = "ABCDE";
int const native_state = -1;
std::string const message = "Everything is bad.";
cpp_odbc::level2::diagnostic_record record = {odbc_state, native_state, message};
EXPECT_EQ( odbc_state, record.odbc_status_code );
EXPECT_EQ( native_state, record.native_error_code );
EXPECT_EQ( message, record.message );
}
| 184 |
467 | package com.codepoetics.protonpack;
import java.util.Comparator;
import java.util.Spliterator;
import java.util.function.Consumer;
import java.util.function.Predicate;
class SkipUntilSpliterator<T> implements Spliterator<T> {
static <T> SkipUntilSpliterator<T> over(Spliterator<T> source, Predicate<T> condition) {
return new SkipUntilSpliterator<>(source, condition, false);
}
static <T> SkipUntilSpliterator<T> overInclusive(Spliterator<T> source, Predicate<T> condition) {
return new SkipUntilSpliterator<>(source, condition, true);
}
private final Spliterator<T> source;
private final Predicate<T> condition;
private final boolean inclusive;
private boolean conditionMet = false;
private SkipUntilSpliterator(Spliterator<T> source, Predicate<T> condition, boolean inclusive) {
this.source = source;
this.condition = condition;
this.inclusive = inclusive;
}
@Override
public boolean tryAdvance(Consumer<? super T> action) {
if (conditionMet) {
return source.tryAdvance(action);
}
while (!conditionMet && source.tryAdvance(e -> {
if (condition.test(e)) {
if (!inclusive) {
action.accept(e);
}
conditionMet = true;
}
}));
return conditionMet;
}
@Override
public void forEachRemaining(Consumer<? super T> action) {
if (!conditionMet) {
tryAdvance(action);
}
if (conditionMet) {
source.forEachRemaining(action);
}
}
@Override
public Spliterator<T> trySplit() {
return null;
}
@Override
public long estimateSize() {
return conditionMet ? source.estimateSize() : Long.MAX_VALUE;
}
@Override
public int characteristics() {
return source.characteristics() &~ Spliterator.SIZED;
}
@Override
public Comparator<? super T> getComparator() {
return source.getComparator();
}
}
| 834 |
4,879 | #include "testing/testing.hpp"
#include "geometry/convex_hull.hpp"
#include "geometry/point2d.hpp"
#include <vector>
using namespace m2;
using namespace std;
namespace
{
double const kEps = 1e-12;
vector<PointD> BuildConvexHull(vector<PointD> const & points)
{
return ConvexHull(points, kEps).Points();
}
UNIT_TEST(ConvexHull_Smoke)
{
TEST_EQUAL(BuildConvexHull({}), vector<PointD>{}, ());
TEST_EQUAL(BuildConvexHull({PointD(0, 0)}), vector<PointD>{PointD(0, 0)}, ());
TEST_EQUAL(BuildConvexHull({PointD(0, 0), PointD(0, 0)}), vector<PointD>{PointD(0, 0)}, ());
TEST_EQUAL(BuildConvexHull({PointD(0, 0), PointD(1, 1), PointD(0, 0)}),
vector<PointD>({PointD(0, 0), PointD(1, 1)}), ());
TEST_EQUAL(BuildConvexHull({PointD(0, 0), PointD(1, 1), PointD(2, 2)}),
vector<PointD>({PointD(0, 0), PointD(2, 2)}), ());
{
int const kXMax = 100;
int const kYMax = 200;
vector<PointD> points;
for (int x = 0; x <= kXMax; ++x)
{
for (int y = 0; y <= kYMax; ++y)
points.emplace_back(x, y);
}
TEST_EQUAL(BuildConvexHull(points), vector<PointD>({PointD(0, 0), PointD(kXMax, 0),
PointD(kXMax, kYMax), PointD(0, kYMax)}),
());
}
TEST_EQUAL(
BuildConvexHull({PointD(0, 0), PointD(0, 5), PointD(10, 5), PointD(3, 3), PointD(10, 0)}),
vector<PointD>({PointD(0, 0), PointD(10, 0), PointD(10, 5), PointD(0, 5)}), ());
}
} // namespace
| 747 |
365 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
class SPP(nn.Module):
"""
Spatial Pyramid Pooling in Deep Convolutional Networks
"""
def __init__(self, kernel_size1, kernel_size2, kernel_size3):
super().__init__()
self.pool1 = nn.MaxPool2d(kernel_size1, stride=1, padding=kernel_size1 // 2)
self.pool2 = nn.MaxPool2d(kernel_size2, stride=1, padding=kernel_size2 // 2)
self.pool3 = nn.MaxPool2d(kernel_size3, stride=1, padding=kernel_size3 // 2)
def forward(self, x):
x1 = self.pool1(x)
x2 = self.pool2(x)
x3 = self.pool3(x)
return torch.cat([x, x1, x2, x3], dim=1)
| 450 |
345 | <reponame>cdoebler1/AIML2
import unittest
from unittest.mock import patch
import programytest.storage.engines as Engines
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine
from programy.storage.stores.nosql.mongo.store.usergroups import MongoUserGroupsStore
from programytest.storage.asserts.store.assert_usergroups import UserGroupsStoreAsserts
class MongoUserGroupsStoreTests(UserGroupsStoreAsserts):
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_initialise(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoUserGroupsStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoUserGroupsStore(engine)
self.assert_upload_from_file(store)
def patch_read_yaml_from_file(self, filename):
raise Exception("Mock Exception")
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
@patch("programy.storage.stores.nosql.mongo.store.usergroups.MongoUserGroupsStore._read_yaml_from_file",patch_read_yaml_from_file)
def test_upload_from_file_exception(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoUserGroupsStore(engine)
self.assert_upload_from_file_exception(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_file_no_collection(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoUserGroupsStore(engine)
self.assert_upload_from_file_no_collection(store)
def patch_add_document(self, document):
return False
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
@patch("programy.storage.stores.nosql.mongo.store.mongostore.MongoStore.add_document", patch_add_document)
def test_upload_from_file_add_document_false(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoUserGroupsStore(engine)
self.assert_upload_from_file_exception(store)
| 959 |
371 | #include "../stereokit.h"
#include "../sk_memory.h"
#include "mesh.h"
#include "assets.h"
#include <stdio.h>
#include <string.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <float.h>
using namespace DirectX;
namespace sk {
///////////////////////////////////////////
void mesh_set_keep_data(mesh_t mesh, bool32_t keep_data) {
if (mesh_has_skin(mesh) && !keep_data) {
log_warn("Skinned meshes must keep their data, ignoring mesh_set_keep_data call.");
return;
}
mesh->discard_data = !keep_data;
if (mesh->discard_data) {
free(mesh->verts); mesh->verts = nullptr;
free(mesh->inds ); mesh->inds = nullptr;
}
}
///////////////////////////////////////////
bool32_t mesh_get_keep_data(mesh_t mesh) {
return !mesh->discard_data;
}
///////////////////////////////////////////
void _mesh_set_verts(mesh_t mesh, const vert_t *vertices, int32_t vertex_count, bool32_t calculate_bounds, bool update_original) {
// Keep track of vertex data for use on CPU side
if (!mesh->discard_data && update_original) {
if (mesh->vert_capacity < vertex_count)
mesh->verts = sk_realloc_t(vert_t, mesh->verts, vertex_count);
memcpy(mesh->verts, vertices, sizeof(vert_t) * vertex_count);
}
if (!skg_buffer_is_valid( &mesh->vert_buffer )) {
// Create a static vertex buffer the first time we call this function!
mesh->vert_dynamic = false;
mesh->vert_capacity = vertex_count;
mesh->vert_buffer = skg_buffer_create(vertices, vertex_count, sizeof(vert_t), skg_buffer_type_vertex, skg_use_static);
if (!skg_buffer_is_valid(&mesh->vert_buffer))
log_err("mesh_set_verts: Failed to create vertex buffer");
skg_mesh_set_verts(&mesh->gpu_mesh, &mesh->vert_buffer);
} else if (mesh->vert_dynamic == false || vertex_count > mesh->vert_capacity) {
// If they call this a second time, or they need more verts than will
// fit in this buffer, lets make a new dynamic buffer!
skg_buffer_destroy(&mesh->vert_buffer);
mesh->vert_dynamic = true;
mesh->vert_capacity = vertex_count;
mesh->vert_buffer = skg_buffer_create(vertices, vertex_count, sizeof(vert_t), skg_buffer_type_vertex, skg_use_dynamic);
if (!skg_buffer_is_valid(&mesh->vert_buffer))
log_err("mesh_set_verts: Failed to create dynamic vertex buffer");
skg_mesh_set_verts(&mesh->gpu_mesh, &mesh->vert_buffer);
} else {
// And if they call this a third time, or their verts fit in the same
// buffer, just copy things over!
skg_buffer_set_contents(&mesh->vert_buffer, vertices, sizeof(vert_t)*vertex_count);
}
mesh->vert_count = vertex_count;
// Calculate the bounds for this mesh by searching it for min and max values!
if (calculate_bounds && vertex_count > 0) {
vec3 min = vertices[0].pos;
vec3 max = vertices[0].pos;
for (int32_t i = 1; i < vertex_count; i++) {
min.x = fminf(vertices[i].pos.x, min.x);
min.y = fminf(vertices[i].pos.y, min.y);
min.z = fminf(vertices[i].pos.z, min.z);
max.x = fmaxf(vertices[i].pos.x, max.x);
max.y = fmaxf(vertices[i].pos.y, max.y);
max.z = fmaxf(vertices[i].pos.z, max.z);
}
mesh->bounds = bounds_t{ min / 2 + max / 2, max - min };
}
}
///////////////////////////////////////////
void mesh_set_verts(mesh_t mesh, const vert_t *vertices, int32_t vertex_count, bool32_t calculate_bounds) {
_mesh_set_verts(mesh, vertices, vertex_count, calculate_bounds, true);
}
///////////////////////////////////////////
void mesh_get_verts(mesh_t mesh, vert_t *&out_vertices, int32_t &out_vertex_count) {
out_vertices = mesh->verts;
out_vertex_count = mesh->verts == nullptr ? 0 : mesh->vert_count;
}
///////////////////////////////////////////
void mesh_set_inds (mesh_t mesh, const vind_t *indices, int32_t index_count) {
if (index_count % 3 != 0) {
log_err("mesh_set_inds index_count must be a multiple of 3!");
return;
}
// Keep track of index data for use on CPU side
if (!mesh->discard_data) {
if (mesh->ind_capacity < index_count)
mesh->inds = sk_realloc_t(vind_t, mesh->inds, index_count);
memcpy(mesh->inds, indices, sizeof(vind_t) * index_count);
}
if (!skg_buffer_is_valid( &mesh->ind_buffer )) {
// Create a static vertex buffer the first time we call this function!
mesh->ind_dynamic = false;
mesh->ind_capacity = index_count;
mesh->ind_buffer = skg_buffer_create(indices, index_count, sizeof(vind_t), skg_buffer_type_index, skg_use_static);
if (!skg_buffer_is_valid( &mesh->ind_buffer ))
log_err("mesh_set_inds: Failed to create index buffer");
skg_mesh_set_inds(&mesh->gpu_mesh, &mesh->ind_buffer);
} else if (mesh->ind_dynamic == false || index_count > mesh->ind_capacity) {
// If they call this a second time, or they need more inds than will
// fit in this buffer, lets make a new dynamic buffer!
skg_buffer_destroy(&mesh->ind_buffer);
mesh->ind_dynamic = true;
mesh->ind_capacity = index_count;
mesh->ind_buffer = skg_buffer_create(indices, index_count, sizeof(vind_t), skg_buffer_type_index, skg_use_dynamic);
if (!skg_buffer_is_valid( &mesh->ind_buffer ))
log_err("mesh_set_inds: Failed to create dynamic index buffer");
skg_mesh_set_inds(&mesh->gpu_mesh, &mesh->ind_buffer);
} else {
// And if they call this a third time, or their inds fit in the same
// buffer, just copy things over!
skg_buffer_set_contents(&mesh->ind_buffer, indices, sizeof(vind_t) * index_count);
}
mesh->ind_count = index_count;
mesh->ind_draw = index_count;
}
///////////////////////////////////////////
void mesh_get_inds(mesh_t mesh, vind_t *&out_indices, int32_t &out_index_count) {
out_indices = mesh->inds;
out_index_count = mesh->inds == nullptr ? 0 : mesh->ind_count;
}
///////////////////////////////////////////
void mesh_calculate_normals(vert_t *verts, int32_t vert_count, const vind_t *inds, int32_t ind_count) {
for (size_t i = 0; i < vert_count; i++) verts[i].norm = vec3_zero;
for (size_t i = 0; i < ind_count; i+=3) {
vert_t *v1 = &verts[inds[i ]];
vert_t *v2 = &verts[inds[i+1]];
vert_t *v3 = &verts[inds[i+2]];
// Length of cross product is twice the area of the triangle it's
// from, so if we don't 'normalize' it, then we get trangle area
// weighting on our normals for free!
vec3 normal = vec3_cross(v3->pos - v2->pos, v1->pos - v2->pos);
v1->norm += normal;
v2->norm += normal;
v3->norm += normal;
}
for (size_t i = 0; i < vert_count; i++) verts[i].norm = vec3_normalize(verts[i].norm);
}
///////////////////////////////////////////
void mesh_set_draw_inds(mesh_t mesh, int32_t index_count) {
if (index_count > mesh->ind_count) {
index_count = mesh->ind_count;
log_warn("mesh_set_draw_inds: Can't render more indices than the mesh has! Capping...");
}
mesh->ind_draw = index_count;
}
///////////////////////////////////////////
void mesh_set_bounds(mesh_t mesh, const bounds_t &bounds) {
mesh->bounds = bounds;
}
///////////////////////////////////////////
bounds_t mesh_get_bounds(mesh_t mesh) {
return mesh->bounds;
}
bool32_t mesh_has_skin(mesh_t mesh) {
return mesh->skin_data.bone_ids != nullptr;
}
///////////////////////////////////////////
bool _mesh_set_skin(mesh_t mesh, const uint16_t *bone_ids_4, int32_t bone_id_4_count, const vec4 *bone_weights, int32_t bone_weight_count, int32_t bone_count) {
if (mesh->discard_data) {
log_err("mesh_set_skin: can't work with a mesh that doesn't keep data, ensure mesh_get_keep_data() is true");
return false;
}
if (bone_weight_count != bone_id_4_count || bone_weight_count != mesh->vert_count) {
log_err("mesh_set_skin: bone_weights, bone_ids_4 and vertex counts must match exactly");
return false;
}
mesh->skin_data.bone_ids = sk_malloc_t(uint16_t, bone_id_4_count * 4);
mesh->skin_data.weights = sk_malloc_t(vec4, bone_weight_count);
mesh->skin_data.deformed_verts = sk_malloc_t(vert_t, mesh->vert_count);
memcpy(mesh->skin_data.bone_ids, bone_ids_4, sizeof(uint16_t) * bone_id_4_count * 4);
memcpy(mesh->skin_data.weights, bone_weights, sizeof(vec4) * bone_weight_count);
memcpy(mesh->skin_data.deformed_verts, mesh->verts, sizeof(vert_t) * mesh->vert_count);
mesh->skin_data.bone_inverse_transforms = sk_malloc_t(matrix, bone_count);
mesh->skin_data.bone_transforms = sk_malloc_t(XMMATRIX, bone_count);
memset(mesh->skin_data.bone_inverse_transforms, 0, sizeof(matrix ) * bone_count);
memset(mesh->skin_data.bone_transforms, 0, sizeof(XMMATRIX) * bone_count);
mesh->skin_data.bone_count = bone_count;
return true;
}
///////////////////////////////////////////
void mesh_set_skin(mesh_t mesh, const uint16_t *bone_ids_4, int32_t bone_id_4_count, const vec4 *bone_weights, int32_t bone_weight_count, const matrix *bone_resting_transforms, int32_t bone_count) {
if (_mesh_set_skin(mesh, bone_ids_4, bone_id_4_count, bone_weights, bone_weight_count, bone_count)) {
for (int32_t i = 0; i < bone_count; i++) {
mesh->skin_data.bone_inverse_transforms[i] = matrix_invert(bone_resting_transforms[i]);
}
}
}
///////////////////////////////////////////
void mesh_set_skin_inv(mesh_t mesh, const uint16_t *bone_ids_4, int32_t bone_id_4_count, const vec4 *bone_weights, int32_t bone_weight_count, const matrix *bone_resting_transforms_inverted, int32_t bone_count) {
if (_mesh_set_skin(mesh, bone_ids_4, bone_id_4_count, bone_weights, bone_weight_count, bone_count)) {
memcpy(mesh->skin_data.bone_inverse_transforms, bone_resting_transforms_inverted, sizeof(matrix) * bone_count);
}
}
///////////////////////////////////////////
void mesh_update_skin(mesh_t mesh, const matrix *bone_transforms, int32_t bone_count) {
for (int32_t i = 0; i < bone_count; i++) {
math_matrix_to_fast(mesh->skin_data.bone_inverse_transforms[i] * bone_transforms[i], &mesh->skin_data.bone_transforms[i]);
}
for (int32_t i = 0; i < mesh->vert_count; i++) {
XMVECTOR pos = XMLoadFloat3((XMFLOAT3 *)&mesh->verts[i].pos);
XMVECTOR norm = XMLoadFloat3((XMFLOAT3 *)&mesh->verts[i].norm);
const uint16_t *bones = &mesh->skin_data.bone_ids[i*4];
const vec4 weights = mesh->skin_data.weights [i];
XMVECTOR new_pos = XMVectorScale(XMVector3Transform (pos, mesh->skin_data.bone_transforms[bones[0]]), weights.x);
XMVECTOR new_norm = XMVectorScale(XMVector3TransformNormal(norm, mesh->skin_data.bone_transforms[bones[0]]), weights.x);
if (weights.y != 0) {
new_pos = XMVectorAdd(XMVectorScale(XMVector3Transform (pos, mesh->skin_data.bone_transforms[bones[1]]), weights.y), new_pos);
new_norm = XMVectorAdd(XMVectorScale(XMVector3TransformNormal(norm, mesh->skin_data.bone_transforms[bones[1]]), weights.y), new_norm);
}
if (weights.z != 0) {
new_pos = XMVectorAdd(XMVectorScale(XMVector3Transform (pos, mesh->skin_data.bone_transforms[bones[2]]), weights.z), new_pos);
new_norm = XMVectorAdd(XMVectorScale(XMVector3TransformNormal(norm, mesh->skin_data.bone_transforms[bones[2]]), weights.z), new_norm);
}
if (weights.w != 0) {
new_pos = XMVectorAdd(XMVectorScale(XMVector3Transform (pos, mesh->skin_data.bone_transforms[bones[3]]), weights.w), new_pos);
new_norm = XMVectorAdd(XMVectorScale(XMVector3TransformNormal(norm, mesh->skin_data.bone_transforms[bones[3]]), weights.w), new_norm);
}
XMStoreFloat3((DirectX::XMFLOAT3 *)&mesh->skin_data.deformed_verts[i].pos, new_pos );
XMStoreFloat3((DirectX::XMFLOAT3 *)&mesh->skin_data.deformed_verts[i].norm, new_norm);
}
_mesh_set_verts(mesh, mesh->skin_data.deformed_verts, mesh->vert_count, false, false);
}
///////////////////////////////////////////
mesh_t mesh_find(const char *id) {
mesh_t result = (mesh_t)assets_find(id, asset_type_mesh);
if (result != nullptr) {
mesh_addref(result);
return result;
}
return nullptr;
}
///////////////////////////////////////////
void mesh_set_id(mesh_t mesh, const char *id) {
assets_set_id(mesh->header, id);
}
///////////////////////////////////////////
void mesh_addref(mesh_t mesh) {
assets_addref(mesh->header);
}
///////////////////////////////////////////
mesh_t mesh_create() {
mesh_t result = (_mesh_t*)assets_allocate(asset_type_mesh);
result->gpu_mesh = skg_mesh_create(nullptr, nullptr);
return result;
}
///////////////////////////////////////////
mesh_t mesh_copy(mesh_t mesh) {
if (mesh == nullptr) {
log_err("mesh_copy was provided a null mesh!");
return nullptr;
}
mesh_t result = (mesh_t)assets_allocate(asset_type_mesh);
result->bounds = mesh->bounds;
result->discard_data = mesh->discard_data;
result->ind_draw = mesh->ind_draw;
if (mesh->discard_data) {
log_err("mesh_copy not yet implemented for meshes with discard data set!");
} else {
mesh_set_inds (result, mesh->inds, mesh->ind_count);
mesh_set_verts(result, mesh->verts, mesh->vert_count, false);
if (mesh_has_skin(mesh))
mesh_set_skin_inv(result, mesh->skin_data.bone_ids, mesh->vert_count, mesh->skin_data.weights, mesh->vert_count, mesh->skin_data.bone_inverse_transforms, mesh->skin_data.bone_count);
}
return result;
}
///////////////////////////////////////////
const mesh_collision_t *mesh_get_collision_data(mesh_t mesh) {
if (mesh->collision_data.pts != nullptr)
return &mesh->collision_data;
if (mesh->discard_data)
return nullptr;
mesh_collision_t &coll = mesh->collision_data;
coll.pts = sk_malloc_t(vec3 , mesh->ind_count);
coll.planes = sk_malloc_t(plane_t, mesh->ind_count/3);
for (int32_t i = 0; i < mesh->ind_count; i++) coll.pts[i] = mesh->verts[mesh->inds[i]].pos;
for (int32_t i = 0; i < mesh->ind_count; i += 3) {
vec3 dir1 = coll.pts[i+1] - coll.pts[i];
vec3 dir2 = coll.pts[i+1] - coll.pts[i+2];
vec3 normal = vec3_normalize( vec3_cross(dir2, dir1) );
plane_t plane = { normal, -vec3_dot(coll.pts[i + 1], normal) };
coll.planes[i/3] = plane;
}
return &mesh->collision_data;
}
///////////////////////////////////////////
void mesh_release(mesh_t mesh) {
if (mesh == nullptr)
return;
assets_releaseref(mesh->header);
}
///////////////////////////////////////////
void mesh_destroy(mesh_t mesh) {
skg_mesh_destroy (&mesh->gpu_mesh);
skg_buffer_destroy(&mesh->vert_buffer);
skg_buffer_destroy(&mesh->ind_buffer);
free(mesh->verts);
free(mesh->inds);
free(mesh->collision_data.pts );
free(mesh->collision_data.planes);
*mesh = {};
}
///////////////////////////////////////////
void mesh_draw(mesh_t mesh, material_t material, matrix transform, color128 color_linear, render_layer_ layer) {
render_add_mesh(mesh, material, transform, color_linear, layer);
}
///////////////////////////////////////////
bool32_t mesh_ray_intersect(mesh_t mesh, ray_t model_space_ray, ray_t *out_pt) {
vec3 result = {};
const mesh_collision_t *data = mesh_get_collision_data(mesh);
if (data == nullptr)
return false;
if (!bounds_ray_intersect(mesh->bounds, model_space_ray, &result))
return false;
vec3 pt = {};
float nearest_dist = FLT_MAX;
for (int32_t i = 0; i < mesh->ind_count; i+=3) {
if (!plane_ray_intersect(data->planes[i / 3], model_space_ray, &pt))
continue;
// point in triangle, implementation based on:
// https://blackpawn.com/texts/pointinpoly/default.html
// Compute vectors
vec3 v0 = data->pts[i+1] - data->pts[i];
vec3 v1 = data->pts[i+2] - data->pts[i];
vec3 v2 = pt - data->pts[i];
// Compute dot products
float dot00 = vec3_dot(v0, v0);
float dot01 = vec3_dot(v0, v1);
float dot02 = vec3_dot(v0, v2);
float dot11 = vec3_dot(v1, v1);
float dot12 = vec3_dot(v1, v2);
// Compute barycentric coordinates
float inv_denom = 1.0f / (dot00 * dot11 - dot01 * dot01);
float u = (dot11 * dot02 - dot01 * dot12) * inv_denom;
float v = (dot00 * dot12 - dot01 * dot02) * inv_denom;
// Check if point is in triangle
if ((u >= 0) && (v >= 0) && (u + v < 1)) {
float dist = vec3_magnitude_sq(pt - model_space_ray.pos);
if (dist < nearest_dist) {
nearest_dist = dist;
*out_pt = {pt, data->planes[i / 3].normal};
}
}
}
return nearest_dist != FLT_MAX;
}
///////////////////////////////////////////
void mesh_gen_cube_vert(int i, const vec3 &size, vec3 &pos, vec3 &norm, vec2 &uv) {
float neg = (float)((i / 4) % 2 ? -1 : 1);
int nx = ((i+24) / 16) % 2;
int ny = (i / 8) % 2;
int nz = (i / 16) % 2;
int u = ((i+1) / 2) % 2; // U: 0,1,1,0
int v = (i / 2) % 2; // V: 0,0,1,1
uv = { (float)u, 1.0f-(float)v };
norm = { nx*neg, ny*neg, nz*neg };
pos = {
size.x * (nx ? neg : ny ? (u?-1:1)*neg : (u?1:-1)*neg),
size.y * (nx || nz ? (v?1:-1) : neg),
size.z * (nx ? (u?-1:1)*neg : ny ? (v?1:-1) : neg)
};
}
///////////////////////////////////////////
mesh_t mesh_gen_plane(vec2 dimensions, vec3 plane_normal, vec3 plane_top_direction, int32_t subdivisions) {
vind_t subd = (vind_t)subdivisions;
mesh_t result = mesh_create();
subd = maxi(0,(int32_t)subd) + 2;
int vert_count = subd*subd;
int ind_count = 6*(subd-1)*(subd-1);
vert_t *verts = sk_malloc_t(vert_t, vert_count);
vind_t *inds = sk_malloc_t(vind_t, ind_count );
vec3 right = vec3_cross(plane_top_direction, plane_normal);
vec3 up = vec3_cross(right, plane_normal);
// Make vertices
for (vind_t y = 0; y < subd; y++) {
float yp = y / (float)(subd-1);
for (vind_t x = 0; x < subd; x++) {
float xp = x / (float)(subd-1);
verts[x + y*subd] = vert_t{
right * ((xp - 0.5f) * dimensions.x) +
up * ((yp - 0.5f) * dimensions.y),
plane_normal, {xp,yp}, {255,255,255,255} };
} }
// make indices
int ind = 0;
for (vind_t y = 0; y < subd-1; y++) {
for (vind_t x = 0; x < subd-1; x++) {
inds[ind++] = (x+1) + (y+1) * subd;
inds[ind++] = (x+1) + y * subd;
inds[ind++] = x + y * subd;
inds[ind++] = x + (y+1) * subd;
inds[ind++] = (x+1) + (y+1) * subd;
inds[ind++] = x + y * subd;
} }
mesh_set_verts(result, verts, vert_count);
mesh_set_inds (result, inds, ind_count);
free(verts);
free(inds);
return result;
}
///////////////////////////////////////////
mesh_t mesh_gen_cube(vec3 dimensions, int32_t subdivisions) {
vind_t subd = (vind_t)subdivisions;
mesh_t result = mesh_create();
subd = maxi((int32_t)0,(int32_t)subd) + 2;
int vert_count = 6*subd*subd;
int ind_count = 6*(subd-1)*(subd-1)*6;
vert_t *verts = sk_malloc_t(vert_t, vert_count);
vind_t *inds = sk_malloc_t(vind_t, ind_count);
vec3 size = dimensions / 2;
int ind = 0;
vind_t offset = 0;
for (vind_t i = 0; i < 6*4; i+=4) {
vec3 p1, p2, p3, p4;
vec3 n1, n2, n3, n4;
vec2 u1, u2, u3, u4;
mesh_gen_cube_vert(i, size, p1, n1, u1);
mesh_gen_cube_vert(i+1, size, p2, n2, u2);
mesh_gen_cube_vert(i+2, size, p3, n3, u3);
mesh_gen_cube_vert(i+3, size, p4, n4, u4);
offset = (i/4) * (subd)*(subd);
for (vind_t y = 0; y < subd; y++) {
float py = y / (float)(subd-1);
vind_t yOff = offset + y * subd;
vind_t yOffN = offset + (y+1) * subd;
vec3 pl = vec3_lerp(p1, p4, py);
vec3 pr = vec3_lerp(p2, p3, py);
vec3 nl = vec3_lerp(n1, n4, py);
vec3 nr = vec3_lerp(n2, n3, py);
vec2 ul = vec2_lerp(u1, u4, py);
vec2 ur = vec2_lerp(u2, u3, py);
for (vind_t x = 0; x < subd; x++) {
float px = x / (float)(subd-1);
vind_t ptIndex = x + yOff;
vert_t *pt = &verts[ptIndex];
pt->pos = vec3_lerp(pl, pr, px);
pt->norm= vec3_lerp(nl, nr, px);
pt->uv = vec2_lerp(ul, ur, px);
pt->col = {255,255,255,255};
if (y != subd-1 && x != subd-1) {
inds[ind++] = (x ) + yOff;
inds[ind++] = (x+1) + yOff;
inds[ind++] = (x+1) + yOffN;
inds[ind++] = (x ) + yOff;
inds[ind++] = (x+1) + yOffN;
inds[ind++] = (x ) + yOffN;
}
}
}
}
mesh_set_verts(result, verts, vert_count);
mesh_set_inds (result, inds, ind_count);
free(verts);
free(inds);
return result;
}
///////////////////////////////////////////
mesh_t mesh_gen_sphere(float diameter, int32_t subdivisions) {
vind_t subd = (vind_t)subdivisions;
mesh_t result = mesh_create();
subd = maxi(0,(int32_t)subd) + 2;
int vert_count = 6*subd*subd;
int ind_count = 6*(subd-1)*(subd-1)*6;
vert_t *verts = sk_malloc_t(vert_t, vert_count);
vind_t *inds = sk_malloc_t(vind_t, ind_count);
vec3 size = vec3_one;
float radius = diameter / 2;
int ind = 0;
vind_t offset = 0;
for (vind_t i = 0; i < 6*4; i+=4) {
vec3 p1, p2, p3, p4;
vec3 n1, n2, n3, n4;
vec2 u1, u2, u3, u4;
mesh_gen_cube_vert(i, size, p1, n1, u1);
mesh_gen_cube_vert(i+1, size, p2, n2, u2);
mesh_gen_cube_vert(i+2, size, p3, n3, u3);
mesh_gen_cube_vert(i+3, size, p4, n4, u4);
offset = (i/4) * (subd)*(subd);
for (vind_t y = 0; y < subd; y++) {
float py = y / (float)(subd-1);
vind_t yOff = offset + y * subd;
vind_t yOffN = offset + (y+1) * subd;
vec3 pl = vec3_lerp(p1, p4, py);
vec3 pr = vec3_lerp(p2, p3, py);
vec2 ul = vec2_lerp(u1, u4, py);
vec2 ur = vec2_lerp(u2, u3, py);
for (vind_t x = 0; x < subd; x++) {
float px = x / (float)(subd-1);
vind_t ptIndex = x + yOff;
vert_t *pt = &verts[ptIndex];
pt->norm= vec3_normalize(vec3_lerp(pl, pr, px));
pt->pos = pt->norm*radius;
pt->uv = vec2_lerp(ul, ur, px);
pt->col = {255,255,255,255};
if (y != subd-1 && x != subd-1) {
inds[ind++] = (x ) + yOff;
inds[ind++] = (x+1) + yOff;
inds[ind++] = (x+1) + yOffN;
inds[ind++] = (x ) + yOff;
inds[ind++] = (x+1) + yOffN;
inds[ind++] = (x ) + yOffN;
}
}
}
}
mesh_set_verts(result, verts, vert_count);
mesh_set_inds (result, inds, ind_count);
free(verts);
free(inds);
return result;
}
///////////////////////////////////////////
mesh_t mesh_gen_cylinder(float diameter, float depth, vec3 dir, int32_t subdivisions) {
mesh_t result = mesh_create();
dir = vec3_normalize(dir);
float radius = diameter / 2;
vind_t subd = (vind_t)subdivisions;
int vert_count = (subdivisions+1) * 4 + 2;
int ind_count = subdivisions * 12;
vert_t *verts = sk_malloc_t(vert_t, vert_count);
vind_t *inds = sk_malloc_t(vind_t, ind_count);
// Calculate any perpendicular vector
vec3 perp = vec3{dir.z, dir.z, -dir.x-dir.y};
if (vec3_magnitude_sq(perp) == 0)
perp = vec3{-dir.y-dir.z, dir.x, dir.x};
vec3 axis_x = vec3_normalize(vec3_cross(dir, perp));
vec3 axis_y = vec3_normalize(vec3_cross(dir, axis_x));
vec3 z_off = dir * (depth / 2.f);
vind_t ind = 0;
for (vind_t i = 0; i <= subd; i++) {
float u = ((float)i / subd);
float ang = u * (float)M_PI * 2;
float x = cosf(ang);
float y = sinf(ang);
vec3 normal = axis_x * x + axis_y * y;
vec3 top_pos = normal*radius + z_off;
vec3 bot_pos = normal*radius - z_off;
// strip first
verts[i * 4 ] = { top_pos, normal, {u,0}, {255,255,255,255} };
verts[i * 4+1] = { bot_pos, normal, {u,1}, {255,255,255,255} };
// now circular faces
verts[i * 4+2] = { top_pos, dir, {u,0}, {255,255,255,255} };
verts[i * 4+3] = { bot_pos, -dir, {u,1}, {255,255,255,255} };
if (i == subd) continue;
vind_t in = (i + 1) % (subd+1);
// Top slice
inds[ind++] = i * 4 + 2;
inds[ind++] = in * 4 + 2;
inds[ind++] = (subd+1) * 4;
// Bottom slice
inds[ind++] = (subd+1) * 4+1;
inds[ind++] = in * 4 + 3;
inds[ind++] = i * 4 + 3;
// Now edge strip quad
inds[ind++] = in * 4+1;
inds[ind++] = in * 4;
inds[ind++] = i * 4;
inds[ind++] = i * 4+1;
inds[ind++] = in * 4+1;
inds[ind++] = i * 4;
}
// center points for the circle
verts[(subdivisions+1)*4] = { z_off, dir, {0.5f,0.01f}, {255,255,255,255} };
verts[(subdivisions+1)*4+1] = { -z_off, -dir, {0.5f,0.99f}, {255,255,255,255} };
mesh_set_verts(result, verts, vert_count);
mesh_set_inds (result, inds, ind_count);
free(verts);
free(inds);
return result;
}
///////////////////////////////////////////
mesh_t mesh_gen_rounded_cube(vec3 dimensions, float edge_radius, int32_t subdivisions) {
vind_t subd = (vind_t)subdivisions;
mesh_t result = mesh_create();
subd = maxi(0,(int32_t)subd) + 2;
if (subd % 2 == 1) // need an even number of subdivisions
subd += 1;
vind_t vert_count = 6*subd*subd;
vind_t ind_count = 6*(subd-1)*(subd-1)*6;
vert_t *verts = sk_malloc_t(vert_t, vert_count);
vind_t *inds = sk_malloc_t(vind_t, ind_count );
vec3 off = (dimensions / 2) - vec3_one*edge_radius;
vec3 size = vec3_one;
float radius = edge_radius;
vind_t ind = 0;
vind_t offset = 0;
for (vind_t i = 0; i < 6*4; i+=4) {
vec3 p1, p2, p3, p4;
vec3 n1, n2, n3, n4;
vec2 u1, u2, u3, u4;
mesh_gen_cube_vert(i, size, p1, n1, u1);
mesh_gen_cube_vert(i+1, size, p2, n2, u2);
mesh_gen_cube_vert(i+2, size, p3, n3, u3);
mesh_gen_cube_vert(i+3, size, p4, n4, u4);
float sizeU = vec3_magnitude((p4 - p1) * (dimensions/2));
float sizeV = vec3_magnitude((p2 - p1) * (dimensions/2));
offset = (i/4) * (subd)*(subd);
vind_t x, y;
for (vind_t sy = 0; sy < subd; sy++) {
bool first_half_y = sy < subd / 2;
y = first_half_y ? sy : sy-1;
vec3 stretchA = first_half_y ? p1 : p4;
vec3 stretchB = first_half_y ? p2 : p3;
float stretchV = (radius*2)/sizeV;
float offV = first_half_y ? 0 : sizeV-(radius*2);
float py = y / (float)(subd-2);
float pv = py * stretchV + offV;
vind_t yOff = offset + sy * subd;
vind_t yOffN = offset + (sy+1) * subd;
vec3 pl = vec3_lerp(p1, p4, py);
vec3 pr = vec3_lerp(p2, p3, py);
vec2 ul = vec2_lerp(u1, u4, pv);
vec2 ur = vec2_lerp(u2, u3, pv);
for (vind_t sx = 0; sx < subd; sx++) {
bool first_half_x = sx < subd / 2;
x = first_half_x ? sx : sx-1;
vec3 stretch = first_half_x ? stretchA : stretchB;
float stretchU = (radius*2)/sizeU;
float offU = first_half_x ? 0 : sizeU-(radius*2);
float px = x / (float)(subd-2);
float pu = px * stretchU + offU;
int ptIndex = sx + yOff;
vert_t *pt = &verts[ptIndex];
pt->norm= vec3_normalize(vec3_lerp(pl, pr, px));
pt->pos = pt->norm*radius + stretch*off;
pt->uv = vec2_lerp(ul, ur, pu);
pt->col = {255,255,255,255};
if (sy != subd-1 && sx != subd-1) {
inds[ind++] = (sx ) + yOff;
inds[ind++] = (sx+1) + yOff;
inds[ind++] = (sx+1) + yOffN;
inds[ind++] = (sx ) + yOff;
inds[ind++] = (sx+1) + yOffN;
inds[ind++] = (sx ) + yOffN;
}
}
}
}
mesh_set_verts(result, verts, vert_count);
mesh_set_inds (result, inds, ind_count);
free(verts);
free(inds);
return result;
}
} // namespace sk | 11,858 |
372 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
* ex: set softtabstop=4 tabstop=8 expandtab shiftwidth=4: *
* Editor Settings: expandtabs and use 4 spaces for indentation */
/*
* Copyright © BeyondTrust Software 2004 - 2019
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS
* WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH
* BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT
* SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE,
* NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST
* A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT
* BEYONDTRUST AT beyondtrust.com/contact
*/
/*
* Copyright (C) BeyondTrust Software. All rights reserved.
*
* Module Name:
*
* builtin.c
*
* Abstract:
*
* BeyondTrust Security and Authentication Subsystem (LSASS)
*
* Builtin Privileges table and database init function
*
* Authors: <NAME> (<EMAIL>)
*/
#include "includes.h"
typedef struct _LSA_BUILTIN_PRIVILEGE
{
PSTR pszName;
PSTR pszDescription;
LONG Value;
BOOLEAN EnabledByDefault;
} LSA_BUILTIN_PRIVILEGE, *PLSA_BUILTIN_PRIVILEGE;
static LSA_BUILTIN_PRIVILEGE BuiltinPrivileges[] = {
{
.pszName = "SeAuditPrivilege",
.pszDescription = "Generate security audits.",
.Value = SE_AUDIT_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeBackupPrivilege",
.pszDescription = "Backup files and directories.",
.Value = SE_BACKUP_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeChangeNotifyPrivilege",
.pszDescription = "Bypass traverse checking.",
.Value = SE_CHANGE_NOTIFY_PRIVILEGE,
.EnabledByDefault = TRUE
},
{
.pszName = "SeCreateSymbolicLinkPrivilege",
.pszDescription = "Create symbolic links.",
.Value = SE_CREATE_SYMBOLIC_LINK_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeLoadDriverPrivilege",
.pszDescription = "Load and unload device drivers.",
.Value = SE_LOAD_DRIVER_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeMachineAccountPrivilege",
.pszDescription = "Add workstations to domain.",
.Value = SE_MACHINE_ACCOUNT_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeManageVolumePrivilege",
.pszDescription = "Manage the files on a volume.",
.Value = SE_MANAGE_VOLUME_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeRemoteShutdownPrivilege",
.pszDescription = "Force shutdown from a remote system.",
.Value = SE_REMOTE_SHUTDOWN_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeRestorePrivilege",
.pszDescription = "Restore files and directories.",
.Value = SE_RESTORE_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeSecurityPrivilege",
.pszDescription = "Manage auditing and security log.",
.Value = SE_SECURITY_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeShutdownPrivilege",
.pszDescription = "Shut down the system.",
.Value = SE_SHUTDOWN_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeSystemTimePrivilege",
.pszDescription = "Change system time.",
.Value = SE_SYSTEM_TIME_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeTakeOwnershipPrivilege",
.pszDescription = "Take ownership of files or other objects.",
.Value = SE_TAKE_OWNERSHIP_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeTcbPrivilege",
.pszDescription = "Act as part of the operating system.",
.Value = SE_TCB_PRIVILEGE,
.EnabledByDefault = FALSE
},
{
.pszName = "SeTimeZonePrivilege",
.pszDescription = "Change time zone.",
.Value = SE_TIME_ZONE_PRIVILEGE,
.EnabledByDefault = FALSE
}
};
DWORD
LsaSrvPrivsAddBuiltinPrivileges(
PLW_HASH_TABLE pPrivilegesTable
)
{
DWORD err = ERROR_SUCCESS;
DWORD i = 0;
DWORD numPrivileges = sizeof(BuiltinPrivileges)/sizeof(BuiltinPrivileges[0]);
PLSA_PRIVILEGE pPrivilege = NULL;
PSTR pszKey = NULL;
LSA_LOG_VERBOSE("Loading builtin privileges (%u privileges)",
numPrivileges);
for (i = 0; i < numPrivileges; i++)
{
PLSA_BUILTIN_PRIVILEGE pBuiltin = &BuiltinPrivileges[i];
PLSA_PRIVILEGE pExistingEntry = NULL;
err = LwAllocateMemory(
sizeof(*pPrivilege),
OUT_PPVOID(&pPrivilege));
BAIL_ON_LSA_ERROR(err);
LSA_LOG_VERBOSE("Loading privilege %s",
LSA_SAFE_LOG_STRING(pBuiltin->pszName));
err = LwAllocateString(
pBuiltin->pszName,
&pPrivilege->pszName);
BAIL_ON_LSA_ERROR(err);
err = LwMbsToWc16s(
pBuiltin->pszDescription,
&pPrivilege->pwszDescription);
BAIL_ON_LSA_ERROR(err);
pPrivilege->Luid = RtlConvertUlongToLuid(pBuiltin->Value);
pPrivilege->EnabledByDefault = pBuiltin->EnabledByDefault;
err = LwAllocateString(
pBuiltin->pszName,
&pszKey);
BAIL_ON_LSA_ERROR(err);
err = LwHashGetValue(
pPrivilegesTable,
pszKey,
(PVOID)&pExistingEntry);
if (err == ERROR_SUCCESS)
{
LSA_LOG_ERROR("Duplicate %s privilege entry found", pszKey);
err = ERROR_ALREADY_EXISTS;
BAIL_ON_LSA_ERROR(err);
}
else if (err == ERROR_NOT_FOUND)
{
err = ERROR_SUCCESS;
}
else
{
BAIL_ON_LSA_ERROR(err);
}
err = LwHashSetValue(
pPrivilegesTable,
pszKey,
pPrivilege);
BAIL_ON_LSA_ERROR(err);
pszKey = NULL;
pPrivilege = NULL;
}
error:
if (err)
{
if (pPrivilege)
{
LW_SAFE_FREE_MEMORY(pPrivilege->pszName);
LW_SAFE_FREE_MEMORY(pPrivilege->pwszDescription);
LW_SAFE_FREE_MEMORY(pPrivilege);
}
}
return err;
}
| 3,375 |
590 | /*!
@file
@author <NAME>
@date 07/2012
*/
#ifndef _eeb02a46_5598_4667_b6e5_24165ec13db3_
#define _eeb02a46_5598_4667_b6e5_24165ec13db3_
#include "Data.h"
namespace tools
{
class SkinDataUtility
{
public:
enum RegionIndex
{
RegionLeftTop, RegionTop, RegionRightTop,
RegionLeft, RegionCenter, RegionRight,
RegionLeftBottom, RegionBottom, RegionRightBottom,
RegionMax
};
static void CreateSkinData(DataPtr _skinData);
typedef std::vector<MyGUI::IntCoord> VectorCoord;
static VectorCoord getRegions(const MyGUI::IntSize& _size, const MyGUI::IntRect& _separators);
static MyGUI::IntSize getSkinSize(DataPtr _skinData);
static MyGUI::IntRect getSeparatorsOffset(DataPtr _skinData);
typedef MyGUI::types::TRect<bool> RectVisible;
static RectVisible getSeparatorsVisible(DataPtr _skinData);
typedef std::vector<std::string> VectorString;
static const VectorString& getRegionNames();
static void fillRegionCoords(DataPtr _skinData, const VectorCoord& _value);
static void fillRegionEnable(DataPtr _skinData, const RectVisible& _value);
static void ShowRegions(DataPtr _skinData);
private:
static void CreateStates(DataPtr _skinData);
static void CreateSeparators(DataPtr _skinData);
static void CreateRegions(DataPtr _skinData);
};
}
#endif
| 477 |
10,245 | <filename>netty/src/test/java/io/grpc/netty/AbstractHttp2HeadersTest.java<gh_stars>1000+
/*
* Copyright 2016 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.netty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import com.google.common.base.Defaults;
import io.netty.handler.codec.http2.Http2Headers;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Unit tests for {@link AbstractHttp2Headers}. */
@RunWith(JUnit4.class)
public class AbstractHttp2HeadersTest {
@Test
public void allMethodsAreUnsupported() {
Http2Headers headers = new AbstractHttp2Headers() {};
for (Method method : Http2Headers.class.getMethods()) {
// Avoid Java 8 default methods, without requiring Java 8 with isDefault()
if (!Modifier.isAbstract(method.getModifiers())) {
continue;
}
Class<?>[] params = method.getParameterTypes();
Object[] args = new Object[params.length];
for (int i = 0; i < params.length; i++) {
args[i] = Defaults.defaultValue(params[i]);
}
try {
method.invoke(headers, args);
fail("Expected exception for method: " + method);
} catch (InvocationTargetException ex) {
assertEquals("For method: " + method,
UnsupportedOperationException.class, ex.getCause().getClass());
} catch (Exception ex) {
throw new AssertionError("Failure with method: " + method, ex);
}
}
}
}
| 734 |
2,338 | <filename>clang-tools-extra/test/clang-tidy/checkers/google-default-arguments.cpp
// RUN: %check_clang_tidy %s google-default-arguments %t
struct A {
virtual void f(int I, int J = 3);
// CHECK-MESSAGES: :[[@LINE-1]]:16: warning: default arguments on virtual or override methods are prohibited [google-default-arguments]
};
struct B : public A {
void f(int I, int J = 5);
// CHECK-MESSAGES: :[[@LINE-1]]:8: warning: default arguments on virtual or override methods are prohibited
};
struct C : public B {
void f(int I, int J = 5) override;
// CHECK-MESSAGES: :[[@LINE-1]]:8: warning: default arguments on virtual or override methods are prohibited
};
// Negatives.
struct D : public B {
void f(int I, int J) override;
};
struct X {
void f(int I, int J = 3);
};
struct Y : public X {
void f(int I, int J = 5);
};
| 296 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.web.beans;
import java.io.IOException;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.lang.model.element.AnnotationMirror;
import javax.lang.model.element.TypeElement;
import org.netbeans.api.java.source.CompilationController;
import org.netbeans.api.java.source.ElementHandle;
import org.netbeans.api.project.FileOwnerQuery;
import org.netbeans.api.project.Project;
import org.netbeans.modules.javaee.injection.spi.InjectionTargetQueryImplementation;
import org.netbeans.modules.j2ee.metadata.model.api.MetadataModel;
import org.netbeans.modules.j2ee.metadata.model.api.MetadataModelAction;
import org.netbeans.modules.j2ee.metadata.model.api.MetadataModelException;
import org.netbeans.modules.web.beans.api.model.WebBeansModel;
import org.openide.util.Parameters;
/**
* @author ads
*
*/
@org.openide.util.lookup.ServiceProvider(service=org.netbeans.modules.javaee.injection.spi.InjectionTargetQueryImplementation.class)
public class WebBeanInjectionTargetQueryImplementation implements
InjectionTargetQueryImplementation
{
/* (non-Javadoc)
* @see org.netbeans.modules.j2ee.common.queries.spi.InjectionTargetQueryImplementation#isInjectionTarget(org.netbeans.modules.j2ee.common.queries.spi.CompilationController, javax.lang.model.element.TypeElement)
*/
@Override
public boolean isInjectionTarget( CompilationController controller,
TypeElement typeElement )
{
try {
Parameters.notNull("controller", controller);
Parameters.notNull("typeElement", typeElement);
Project project = FileOwnerQuery.getOwner( controller.getFileObject() );
if ( project == null ){
return false;
}
MetaModelSupport support = new MetaModelSupport(project);
MetadataModel<WebBeansModel> metaModel = support.getMetaModel();
final ElementHandle<TypeElement> handle = ElementHandle.create(typeElement);
return metaModel.runReadAction(new MetadataModelAction<WebBeansModel, Boolean>() {
@Override
public Boolean run( WebBeansModel model ) throws Exception {
TypeElement element = handle.resolve(model.getCompilationController());
if ( element == null ){
return false;
}
List<AnnotationMirror> qualifiers = model.getQualifiers(
element, true);
if ( qualifiers.size() == 0 ){
/*
* @Named is special case.
* It could be present implicitly : there are
* stereotype declared for the element which
* is annotated by @Named.
*/
if ( model.getName( element ) != null ){
return true;
}
return false;
}
else {
/*
* There are some qualifiers.
* So this bean is eligible for injection. But it
* doesn't mean it is really managed by J2EE container.
*/
return true;
}
}
});
} catch (MetadataModelException ex) {
Logger.getLogger( WebBeanInjectionTargetQueryImplementation.class.getName()).
log( Level.WARNING, ex.getMessage(), ex);
} catch (IOException ex) {
Logger.getLogger( WebBeanInjectionTargetQueryImplementation.class.getName()).
log( Level.WARNING, ex.getMessage(), ex);
}
return false;
}
/* (non-Javadoc)
* @see org.netbeans.modules.j2ee.common.queries.spi.InjectionTargetQueryImplementation#isStaticReferenceRequired(org.netbeans.modules.j2ee.common.queries.spi.CompilationController, javax.lang.model.element.TypeElement)
*/
@Override
public boolean isStaticReferenceRequired( CompilationController controller,
TypeElement typeElement )
{
return false;
}
}
| 2,268 |
848 | <reponame>earlephilhower/SdFat-namespace
/**
* Copyright (c) 2011-2021 <NAME>
* This file is part of the SdFat library for SD memory cards.
*
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FsUtf_h
#define FsUtf_h
/**
* \file
* \brief Unicode Transformation Format functions.
*/
#include <stdint.h>
#include <stddef.h>
namespace FsUtf {
/** High surrogate for a code point.
* \param{in} cp code point.
* \return high surrogate.
*/
inline uint16_t highSurrogate(uint32_t cp) {
return (cp >> 10) + (0XD800 - (0X10000 >> 10));
}
/** Low surrogate for a code point.
* \param{in} cp code point.
* \return low surrogate.
*/
inline uint16_t lowSurrogate(uint32_t cp) {
return (cp & 0X3FF) + 0XDC00;
}
/** Check for a valid code point.
* \param[in] cp code point.
* \return true if valid else false.
*/
inline bool isValidCp(uint32_t cp) {
return cp <= 0x10FFFF && (cp < 0XD800 || cp > 0XDFFF);
}
/** Check for UTF-16 surrogate.
* \param[in] c UTF-16 unit.
* \return true if c is a surrogate else false.
*/
inline bool isSurrogate(uint16_t c) {
return 0XD800 <= c && c <= 0XDFFF;
}
/** Check for UTF-16 high surrogate.
* \param[in] c UTF-16 unit..
* \return true if c is a high surrogate else false.
*/
inline bool isHighSurrogate(uint16_t c) {
return 0XD800 <= c && c <= 0XDBFF;
}
/** Check for UTF-16 low surrogate.
* \param[in] c UTF-16 unit..
* \return true if c is a low surrogate else false.
*/
inline bool isLowSurrogate(uint16_t c) {
return 0XDC00 <= c && c <= 0XDFFF;
}
/** Convert UFT-16 surrogate pair to code point.
* \param[in] hs high surrogate.
* \param[in] ls low surrogate.
* \return code point.
*/
inline uint32_t u16ToCp(uint16_t hs, uint16_t ls) {
return 0X10000 + (((hs & 0X3FF) << 10) | (ls & 0X3FF));
}
/** Encodes a 32 bit code point as a UTF-8 sequence.
* \param[in] cp code point to encode.
* \param[out] str location for UTF-8 sequence.
* \param[in] end location following last character of str.
* \return location one beyond last encoded character.
*/
char* cpToMb(uint32_t cp, char* str, char* end);
/** Get next code point from a UTF-8 sequence.
* \param[in] str location for UTF-8 sequence.
* \param[in] end location following last character of str.
* May be nullptr if str is zero terminated.
* \param[out] rtn location for the code point.
* \return location of next UTF-8 character in str of nullptr for error.
*/
const char* mbToCp(const char* str, const char* end, uint32_t* rtn);
/** Get next code point from a UTF-8 sequence as UTF-16.
* \param[in] str location for UTF-8 sequence.
* \param[in] end location following last character of str.
* \param[out] hs location for the code point or high surrogate.
* \param[out] ls location for zero or high surrogate.
* \return location of next UTF-8 character in str of nullptr for error.
*/
const char* mbToU16(const char* str,
const char* end, uint16_t* hs, uint16_t* ls);
} // namespace FsUtf
#endif // FsUtf_h
| 1,451 |
1,144 | package de.metas.report.jasper;
/*
* #%L
* de.metas.report.jasper.server.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import javax.annotation.Nullable;
import org.adempiere.exceptions.AdempiereException;
import org.slf4j.Logger;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableSet;
import de.metas.logging.LogManager;
import de.metas.util.Check;
import de.metas.util.FileUtil;
import lombok.Builder;
import lombok.NonNull;
import lombok.Singular;
import lombok.Value;
import net.sf.jasperreports.engine.JasperCompileManager;
/**
* Alternative class loader to be used when doing dev-tests on a local machine.<br>
* This class loader will be used by {@link JasperEngine} if we run in developer mode.
*
* @author tsa
*
*/
public class JasperCompileClassLoader extends ClassLoader
{
private static final Logger logger = LogManager.getLogger(JasperCompileClassLoader.class);
private static final String jasperExtension = ".jasper";
private static final String jrxmlExtension = ".jrxml";
private static final String propertiesExtension = ".properties";
private static final String xlsExtension = ".xls";
private final ImmutableSet<File> additionalResourceDirNames;
private final Map<String, Optional<JasperEntry>> jasperEntriesByJrxmlPath = new ConcurrentHashMap<>();
@Builder
private JasperCompileClassLoader(
@Nullable final ClassLoader parentClassLoader,
@NonNull @Singular final List<File> additionalResourceDirNames)
{
super(parentClassLoader);
this.additionalResourceDirNames = ImmutableSet.copyOf(additionalResourceDirNames);
}
@Override
public String toString()
{
return MoreObjects.toStringHelper(this)
.add("additionalResourceDirNames", additionalResourceDirNames)
.add("parent", getParent())
.toString();
}
@Override
protected URL findResource(final String name)
{
if (Check.isEmpty(name, true))
{
return null;
}
final String nameNormalized = name.trim();
if (nameNormalized.endsWith(jasperExtension))
{
return getJaserResource(nameNormalized);
}
else if (nameNormalized.endsWith(xlsExtension))
{
return findMiscResource(nameNormalized);
}
// handle property files (i.e. resource bundles)
else if (nameNormalized.endsWith(propertiesExtension))
{
return findMiscResource(nameNormalized);
}
else
{
return findResourceInAdditionalPathsOrNull(nameNormalized);
}
}
private URL getJaserResource(final String name)
{
final String jrxmlPath = toLocalPath(name, jrxmlExtension);
if (jrxmlPath == null)
{
return null;
}
return jasperEntriesByJrxmlPath.computeIfAbsent(jrxmlPath, this::computeJasperEntry)
.map(JasperEntry::getJasperUrl)
.orElse(null);
}
private Optional<JasperEntry> computeJasperEntry(@NonNull final String jrxmlPath)
{
logger.trace("Computing jasper report for {}", jrxmlPath);
//
// Get resource's input stream
String jrxmlPathNorm = jrxmlPath;
URL jrxmlUrl = getResource(jrxmlPathNorm);
// TODO: fix this fucked up
if (jrxmlUrl == null && jrxmlPath.startsWith("/"))
{
jrxmlPathNorm = jrxmlPath.substring(1);
jrxmlUrl = getResource(jrxmlPathNorm);
}
if (jrxmlUrl == null)
{
logger.trace("No JRXML resource found for {}", jrxmlPath);
return Optional.empty();
}
final File jrxmlFile = toLocalFile(jrxmlUrl);
final File jasperFile = compileJrxml(jrxmlFile);
logger.trace("Compiled jasper report: {} <- {}", jasperFile, jrxmlFile);
return Optional.of(JasperEntry.builder()
.jrxmlFile(jrxmlFile)
.jasperFile(jasperFile)
.build());
}
private URL findMiscResource(final String name)
{
final String resourcePath = toLocalPath(name, FileUtil.getFileExtension(name));
URL url = findResourceInAdditionalPathsOrNull(resourcePath);
if (url != null)
{
return url;
}
final ClassLoader parentClassLoader = getParent();
url = parentClassLoader.getResource(resourcePath);
if (url != null)
{
return url;
}
if (resourcePath.startsWith("/"))
{
url = parentClassLoader.getResource(resourcePath.substring(1));
}
return url;
}
private URL findResourceInAdditionalPathsOrNull(final String resourceName)
{
for (final File resourceDir : additionalResourceDirNames)
{
final File resourceFile = new File(resourceDir, resourceName);
if (resourceFile.exists() && resourceFile.isFile())
{
try
{
return resourceFile.toURI().toURL();
}
catch (final MalformedURLException e)
{
logger.trace("Not considering resourceFile={} for resourceName={} because it cannot be converted to URL", resourceFile, resourceName, e);
}
}
}
return null;
}
private static File toLocalFile(@NonNull final URL url)
{
try
{
return new File(url.toURI());
}
catch (URISyntaxException ex)
{
throw new AdempiereException("Cannot convert URL to local File: " + url, ex);
}
}
private static String toLocalPath(final String resourceName, final String fileExtension)
{
String resourcePath = resourceName.trim()
.replace(JasperClassLoader.PLACEHOLDER, "")
.replace("//", "/");
if (!resourcePath.startsWith("/"))
{
resourcePath = "/" + resourcePath;
}
final String jasperReportJrxmlPath = FileUtil.changeFileExtension(resourcePath, fileExtension);
return jasperReportJrxmlPath;
}
private static File compileJrxml(final File jrxmlFile)
{
try (InputStream jrxmlStream = new FileInputStream(jrxmlFile))
{
final File jasperFile = File.createTempFile("JasperReport", jasperExtension);
try (FileOutputStream jasperStream = new FileOutputStream(jasperFile))
{
JasperCompileManager.compileReportToStream(jrxmlStream, jasperStream);
}
return jasperFile;
}
catch (final Exception ex)
{
throw new AdempiereException("Failed compiling jasper report: " + jrxmlFile, ex);
}
}
@Override
protected Enumeration<URL> findResources(final String name) throws IOException
{
final URL url = findResource(name);
if (url == null)
{
return super.findResources(name);
}
return Collections.enumeration(Arrays.asList(url));
}
@Value
@Builder
private static class JasperEntry
{
@NonNull
File jrxmlFile;
@NonNull
File jasperFile;
public URL getJasperUrl()
{
try
{
return jasperFile.toURI().toURL();
}
catch (final MalformedURLException e)
{
throw new AdempiereException("Cannot convert " + jasperFile + " to URL", e);
}
}
}
}
| 2,649 |
766 | <reponame>YuriyPobezhymov/tsunami-security-scanner-plugins
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.tsunami.plugins.detectors.rce.cve20175638;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.net.HttpHeaders.CONTENT_TYPE;
import static com.google.tsunami.common.net.http.HttpRequest.get;
import com.google.common.collect.ImmutableList;
import com.google.common.flogger.GoogleLogger;
import com.google.protobuf.util.Timestamps;
import com.google.tsunami.common.data.NetworkServiceUtils;
import com.google.tsunami.common.net.http.HttpClient;
import com.google.tsunami.common.net.http.HttpHeaders;
import com.google.tsunami.common.net.http.HttpResponse;
import com.google.tsunami.common.time.UtcClock;
import com.google.tsunami.plugin.PluginType;
import com.google.tsunami.plugin.VulnDetector;
import com.google.tsunami.plugin.annotations.PluginInfo;
import com.google.tsunami.proto.DetectionReport;
import com.google.tsunami.proto.DetectionReportList;
import com.google.tsunami.proto.DetectionStatus;
import com.google.tsunami.proto.NetworkService;
import com.google.tsunami.proto.Severity;
import com.google.tsunami.proto.TargetInfo;
import com.google.tsunami.proto.Vulnerability;
import com.google.tsunami.proto.VulnerabilityId;
import java.io.IOException;
import java.time.Clock;
import java.time.Instant;
import java.util.Optional;
import javax.inject.Inject;
/**
* A {@link VulnDetector} that detects Apache Struts Command Injection via Content-Type header
* (CVE-2017-5638).
*
* <p>A vulnerable server will use an invalid Content-Type header value as an OGNL expression which
* is able to execute system commands under the privileges of the web server. We test it by
* acccessing the HttpServletResponse and adding a custom header with a random value. If the header
* is reflected in the response, then we know our code was executed.
*/
@PluginInfo(
type = PluginType.VULN_DETECTION,
name = "ApacheStrutsContentTypeRceDetector",
version = "0.1",
description = "Tsunami detector plugin for Apache Struts Command Injection via Content-Type "
+ "header (CVE-2017-5638).",
author = "<NAME> (<EMAIL>)",
bootstrapModule = ApacheStrutsContentTypeRceDetectorBootstrapModule.class)
public final class ApacheStrutsContentTypeRceDetector implements VulnDetector {
static final String DETECTOR_HEADER_NAME = "ApacheStrutsDetectorHeader";
static final String RANDOM_VALUE = "IhEmKCn1Lqa79o2mXmAsIzBfcMojgseiOd7srLNFlPZmzqWkRaiQNZ89mZyw";
private static final GoogleLogger logger = GoogleLogger.forEnclosingClass();
private static final String PAYLOAD_STRING_FORMAT =
"%%{#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('%s','%s')}"
+ ".multipart/form-data";
private final Clock utcClock;
private final HttpClient httpClient;
@Inject
ApacheStrutsContentTypeRceDetector(@UtcClock Clock utcClock, HttpClient httpClient) {
this.utcClock = checkNotNull(utcClock);
this.httpClient = checkNotNull(httpClient);
}
@Override
public DetectionReportList detect(
TargetInfo targetInfo, ImmutableList<NetworkService> matchedServices) {
logger.atInfo().log(
"Starting Command Injection via Content-Type header (CVE-2017-5638) detection for Apache"
+ " Struts.");
DetectionReportList detectionReports =
DetectionReportList.newBuilder()
.addAllDetectionReports(
matchedServices.stream()
// TODO(b/147455416): checking web service is not needed once we enable
// service name filtering on this plugin.
.filter(NetworkServiceUtils::isWebService)
.filter(this::isServiceVulnerable)
.map(networkService -> buildDetectionReport(targetInfo, networkService))
.collect(toImmutableList()))
.build();
logger.atInfo().log(
"ApacheStrutsContentTypeRceDetector finished, detected '%d' vulns.",
detectionReports.getDetectionReportsCount());
return detectionReports;
}
private boolean isServiceVulnerable(NetworkService networkService) {
String targetUri = NetworkServiceUtils.buildWebApplicationRootUrl(networkService);
try {
// This is a blocking call.
String payload = String.format(PAYLOAD_STRING_FORMAT, DETECTOR_HEADER_NAME, RANDOM_VALUE);
HttpHeaders headers = HttpHeaders.builder()
.addHeader(CONTENT_TYPE, payload)
.build();
HttpResponse response =
httpClient.send(get(targetUri).setHeaders(headers).build(), networkService);
// If the server is vulnerable our header will be appended to the response.
Optional<String> headerValue = response.headers().get(DETECTOR_HEADER_NAME);
return headerValue.isPresent() && headerValue.get().equals(RANDOM_VALUE);
} catch (IOException e) {
logger.atWarning().withCause(e).log("Unable to query '%s'.", targetUri);
return false;
}
}
private DetectionReport buildDetectionReport(
TargetInfo targetInfo, NetworkService vulnerableNetworkService) {
return DetectionReport.newBuilder()
.setTargetInfo(targetInfo)
.setNetworkService(vulnerableNetworkService)
.setDetectionTimestamp(Timestamps.fromMillis(Instant.now(utcClock).toEpochMilli()))
.setDetectionStatus(DetectionStatus.VULNERABILITY_VERIFIED)
.setVulnerability(
Vulnerability.newBuilder()
.setMainId(
VulnerabilityId.newBuilder()
.setPublisher("GOOGLE")
.setValue("CVE_2017_5638"))
.setSeverity(Severity.CRITICAL)
.setTitle("Apache Struts Command Injection via Content-Type header (CVE-2017-5638)")
.setDescription("Apache Struts server is vulnerable to CVE-2017-5638.")
)
.build();
}
}
| 2,377 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.deadlock.detector;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.lang.management.LockInfo;
import java.lang.management.ManagementFactory;
import java.lang.management.MonitorInfo;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.openide.util.Exceptions;
import org.openide.util.NbBundle;
/**
* Detects deadlocks using ThreadMXBean.
* @see java.lang.management.ThreadMXBean
* @author <NAME>, <NAME>
*/
class Detector implements Runnable {
private static final Logger LOG = Logger.getLogger(Detector.class.getName());
/**
* This variable is used from different threads and is protected by
* synchronized(this).
*/
private boolean running = true;
/**
* How long to wait (in milliseconds) between the deadlock checks.
*/
private static long PAUSE = 2000;
/**
* How long to wait (in milliseconds) before the deadlock detection starts.
*/
private static long INITIAL_PAUSE = 10000;
/**
* Indents for printing the thread dumps.
*/
private static final String INDENT = " "; // NOI18N
/**
* The thread bean used for the deadlock detection.
*/
private final ThreadMXBean threadMXBean;
Detector() {
threadMXBean = ManagementFactory.getThreadMXBean();
Integer pauseFromSysProp = Integer.getInteger("org.netbeans.modules.deadlock.detector.Detector.PAUSE"); // NOI18N
if (pauseFromSysProp != null) {
PAUSE = pauseFromSysProp.longValue();
}
Integer initialPauseFromSysProp = Integer.getInteger("org.netbeans.modules.deadlock.detector.Detector.INITIAL_PAUSE"); // NOI18N
if (initialPauseFromSysProp != null) {
INITIAL_PAUSE = initialPauseFromSysProp.longValue();
}
}
/**
* Starts a new thread that periodically checks for deadlocks.
*/
void start() {
if (threadMXBean == null) {
return;
}
Thread t = new Thread(this, "Deadlock Detector"); // NOI18N
t.start();
}
/**
* Stops the detector thread.
*/
synchronized void stop() {
running = false;
}
/**
* Accessing the variable running under the synchronized (this).
* @return whether we are still running the detector thread
*/
private synchronized boolean isRunning() {
return running;
}
@Override
public void run() {
try {
Thread.sleep(INITIAL_PAUSE);
while (isRunning()) {
long time = System.currentTimeMillis();
detectDeadlock();
if (LOG.isLoggable(Level.FINE)) {
LOG.log(Level.FINE, "Deadlock detection took: {0} ms.", System.currentTimeMillis() - time); // NOI18N
}
if (isRunning()) {
Thread.sleep(PAUSE);
}
}
} catch (InterruptedException ex) {
Exceptions.printStackTrace(ex);
}
}
/**
* The main method called periodically by the deadlock detector thread.
*/
private void detectDeadlock() {
if (threadMXBean == null) {
return;
}
long[] tids = threadMXBean.findDeadlockedThreads();
if (tids == null) {
return;
}
// Report deadlock just once
stop();
if (LOG.isLoggable(Level.FINE)) {
LOG.log(Level.FINE, "Deadlock detected"); // NOI18N
}
PrintStream out;
File file = null;
try {
file = File.createTempFile("deadlock", ".txt"); // NOI18N
out = new PrintStream(new FileOutputStream(file));
if (LOG.isLoggable(Level.FINE)) {
LOG.log(Level.FINE, "Temporrary file created: {0}" , file); // NOI18N
}
} catch (IOException iOException) {
out = System.out;
}
out.println("Deadlocked threads :"); // NOI18N
ThreadInfo[] deadlocked = threadMXBean.getThreadInfo(tids, true, true);
for (ThreadInfo ti : deadlocked) {
printThreadInfo(ti, out);
printMonitorInfo(ti, out);
printLockInfo(ti.getLockedSynchronizers(), out);
out.println();
}
out.println("All threads :"); // NOI18N
ThreadInfo[] infos = threadMXBean.dumpAllThreads(true, true);
for (ThreadInfo ti : infos) {
if (ti == null) {
continue; // null can be returned in the array
}
printThreadInfo(ti, out);
printMonitorInfo(ti, out);
printLockInfo(ti.getLockedSynchronizers(), out);
out.println();
}
if (out != System.out) {
out.close();
}
reportStackTrace(deadlocked, file);
}
private void printThreadInfo(ThreadInfo ti, PrintStream out) {
printThread(ti, out);
// print stack trace with locks
StackTraceElement[] stacktrace = ti.getStackTrace();
MonitorInfo[] monitors = ti.getLockedMonitors();
for (int i = 0; i < stacktrace.length; i++) {
StackTraceElement ste = stacktrace[i];
out.println(INDENT + "at " + ste.toString()); // NOI18N
for (MonitorInfo mi : monitors) {
if (mi.getLockedStackDepth() == i) {
out.println(INDENT + " - locked " + mi); // NOI18N
}
}
}
out.println();
}
private void printThread(ThreadInfo ti, PrintStream out) {
StringBuilder sb = new StringBuilder("\"" + ti.getThreadName() + "\"" + // NOI18N
" Id=" + ti.getThreadId() + // NOI18N
" in " + ti.getThreadState()); // NOI18N
if (ti.getLockName() != null) {
sb.append(" on lock=").append(ti.getLockName()); // NOI18N
}
if (ti.isSuspended()) {
sb.append(" (suspended)"); // NOI18N
}
if (ti.isInNative()) {
sb.append(" (running in native)"); // NOI18N
}
out.println(sb.toString());
if (ti.getLockOwnerName() != null) {
out.println(INDENT + " owned by " + ti.getLockOwnerName() + // NOI18N
" Id=" + ti.getLockOwnerId()); // NOI18N
}
}
private void printMonitorInfo(ThreadInfo ti, PrintStream out) {
MonitorInfo[] monitors = ti.getLockedMonitors();
out.println(INDENT + "Locked monitors: count = " + monitors.length); // NOI18N
for (MonitorInfo mi : monitors) {
out.println(INDENT + " - " + mi + " locked at "); // NOI18N
out.println(INDENT + " " + mi.getLockedStackDepth() + // NOI18N
" " + mi.getLockedStackFrame()); // NOI18N
}
}
private void printLockInfo(LockInfo[] locks, PrintStream out) {
out.println(INDENT + "Locked synchronizers: count = " + locks.length); // NOI18N
for (LockInfo li : locks) {
out.println(INDENT + " - " + li); // NOI18N
}
out.println();
}
/**
* Use exception reporter to report the stack trace of the deadlocked threads.
* @param deadlocked
*/
private void reportStackTrace(ThreadInfo[] deadlocked, File report) {
DeadlockDetectedException deadlockException = new DeadlockDetectedException(null);
deadlockException.setStackTrace(deadlocked[0].getStackTrace());
DeadlockDetectedException lastDde = deadlockException;
for (ThreadInfo toBeReported : deadlocked) {
DeadlockDetectedException dde = new DeadlockDetectedException(toBeReported.getThreadName());
dde.setStackTrace(toBeReported.getStackTrace());
lastDde.initCause(dde);
lastDde = dde;
}
LOG.log(Level.SEVERE, report.getAbsolutePath(), deadlockException);
}
private static class DeadlockDetectedException extends RuntimeException {
public DeadlockDetectedException(String threadName) {
super(threadName);
}
@NbBundle.Messages("MSG_DeadlockDetected=A deadlock was detected.\nWe suggest to restart the IDE to recover.")
@Override
public String getLocalizedMessage() {
if (getMessage() == null) {
return Bundle.MSG_DeadlockDetected();
} else {
return super.getLocalizedMessage();
}
}
}
}
| 4,180 |
785 | <reponame>eddie4941/servicetalk
/*
* Copyright © 2019, 2021 Apple Inc. and the ServiceTalk project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicetalk.http.api;
import io.servicetalk.buffer.netty.BufferAllocators;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.concurrent.ExecutionException;
import java.util.stream.Stream;
import static io.servicetalk.http.api.HttpProtocolVersion.HTTP_1_1;
import static io.servicetalk.http.api.HttpRequestMethod.GET;
import static io.servicetalk.http.api.StreamingHttpRequests.newRequest;
import static io.servicetalk.http.api.StreamingHttpResponses.newResponse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
@SuppressWarnings("ConstantConditions")
class InvalidMetadataValuesTest {
@SuppressWarnings("unused")
private static Stream<Arguments> data() throws ExecutionException, InterruptedException {
return Stream.of(
Arguments.of(newRequest(GET, "/", HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE), "streaming request"),
Arguments.of(newResponse(HttpResponseStatus.OK, HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE), "streaming response"),
Arguments.of(newRequest(GET, "/", HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE).toRequest().toFuture().get(), "request"),
Arguments.of(newResponse(HttpResponseStatus.OK, HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE).toResponse().toFuture().get(), "response"),
Arguments.of(newRequest(GET, "/", HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE).toBlockingStreamingRequest(), "blocking streaming request"),
Arguments.of(newResponse(HttpResponseStatus.OK, HTTP_1_1,
DefaultHttpHeadersFactory.INSTANCE.newHeaders(), BufferAllocators.DEFAULT_ALLOCATOR,
DefaultHttpHeadersFactory.INSTANCE).toBlockingStreamingResponse(), "blocking streaming response"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullHeaderNameToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addHeader(null, "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptyHeaderNameToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addHeader("", "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullHeaderValueToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addHeader("foo", null));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullHeaderNameToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.setHeader(null, "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptyHeaderNameToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.setHeader("", "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullHeaderValueToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.setHeader("foo", null));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullQPNameToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.addQueryParameter(null, "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptyQPNameToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.addQueryParameter("", "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullQPValueToAdd(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.addQueryParameter("foo", null));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullQPNameToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.setQueryParameter(null, ""));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptyQPNameToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.setQueryParameter("", "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullQPValueToSet(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
HttpRequestMetaData requestMeta = assumeRequestMeta(metaData);
assertThrows(IllegalArgumentException.class, () -> requestMeta.setQueryParameter("foo", null));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullCookieName(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addCookie(null, "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptyCookieName(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addCookie("", ""));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullCookieValue(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addCookie("foo", null));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullSetCookieName(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addSetCookie(null, "foo"));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void emptySetCookieName(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addSetCookie("", ""));
}
@ParameterizedTest(name = "{displayName} [{index}]: source = {1}")
@MethodSource("data")
void nullSetCookieValue(final HttpMetaData metaData, @SuppressWarnings("unused") String testName) {
assertThrows(IllegalArgumentException.class, () -> metaData.addSetCookie("foo", null));
}
private static HttpRequestMetaData assumeRequestMeta(final HttpMetaData metaData) {
assumeTrue(metaData instanceof HttpRequestMetaData, "Test not applicable for response.");
return (HttpRequestMetaData) metaData;
}
}
| 3,285 |
2,151 | <gh_stars>1000+
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/base/file_stream_context.h"
#include <errno.h>
#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/files/file_path.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/task_runner.h"
#include "base/task_runner_util.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
namespace net {
FileStream::Context::Context(const scoped_refptr<base::TaskRunner>& task_runner)
: async_in_progress_(false),
orphaned_(false),
task_runner_(task_runner) {
}
FileStream::Context::Context(base::File file,
const scoped_refptr<base::TaskRunner>& task_runner)
: file_(std::move(file)),
async_in_progress_(false),
orphaned_(false),
task_runner_(task_runner) {}
FileStream::Context::~Context() = default;
int FileStream::Context::Read(IOBuffer* in_buf,
int buf_len,
CompletionOnceCallback callback) {
DCHECK(!async_in_progress_);
scoped_refptr<IOBuffer> buf = in_buf;
const bool posted = base::PostTaskAndReplyWithResult(
task_runner_.get(), FROM_HERE,
base::BindOnce(&Context::ReadFileImpl, base::Unretained(this), buf,
buf_len),
base::BindOnce(&Context::OnAsyncCompleted, base::Unretained(this),
IntToInt64(std::move(callback))));
DCHECK(posted);
async_in_progress_ = true;
return ERR_IO_PENDING;
}
int FileStream::Context::Write(IOBuffer* in_buf,
int buf_len,
CompletionOnceCallback callback) {
DCHECK(!async_in_progress_);
scoped_refptr<IOBuffer> buf = in_buf;
const bool posted = base::PostTaskAndReplyWithResult(
task_runner_.get(), FROM_HERE,
base::BindOnce(&Context::WriteFileImpl, base::Unretained(this), buf,
buf_len),
base::BindOnce(&Context::OnAsyncCompleted, base::Unretained(this),
IntToInt64(std::move(callback))));
DCHECK(posted);
async_in_progress_ = true;
return ERR_IO_PENDING;
}
FileStream::Context::IOResult FileStream::Context::SeekFileImpl(
int64_t offset) {
int64_t res = file_.Seek(base::File::FROM_BEGIN, offset);
if (res == -1)
return IOResult::FromOSError(errno);
return IOResult(res, 0);
}
void FileStream::Context::OnFileOpened() {
}
FileStream::Context::IOResult FileStream::Context::ReadFileImpl(
scoped_refptr<IOBuffer> buf,
int buf_len) {
int res = file_.ReadAtCurrentPosNoBestEffort(buf->data(), buf_len);
if (res == -1)
return IOResult::FromOSError(errno);
return IOResult(res, 0);
}
FileStream::Context::IOResult FileStream::Context::WriteFileImpl(
scoped_refptr<IOBuffer> buf,
int buf_len) {
int res = file_.WriteAtCurrentPosNoBestEffort(buf->data(), buf_len);
if (res == -1)
return IOResult::FromOSError(errno);
return IOResult(res, 0);
}
} // namespace net
| 1,352 |
657 | <reponame>daaawx/bearblog<gh_stars>100-1000
# Generated by Django 3.0.7 on 2021-09-18 12:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blogs', '0041_auto_20210808_2118'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=200)),
('url', models.CharField(max_length=200, unique=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogs.Blog')),
],
),
]
| 393 |
721 | <reponame>J-Z-Z/akshare<filename>akshare/other/other_game.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/16 21:12
Desc: 中国电竞价值排行榜
http://rank.uuu9.com/player/ranking
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
def club_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
"""
中国电竞价值排行榜-俱乐部排名
http://rank.uuu9.com/
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 俱乐部排名
:rtype: pandas.DataFrame
"""
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/club/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol in {"英雄联盟", "王者荣耀", "DOTA2"}:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"俱乐部名称",
"人气指数",
"舆论指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
def player_rank_game(symbol: str = "英雄联盟") -> pd.DataFrame:
"""
中国电竞价值排行榜-选手排行榜
http://rank.uuu9.com/player/ranking
:param symbol: choice of {'英雄联盟', '绝地求生', '王者荣耀', 'DOTA2', '穿越火线', '和平精英'}
:type symbol: str
:return: 选手排行榜
:rtype: pandas.DataFrame
"""
symbol_map = {
"DOTA2": "1",
"英雄联盟": "2",
"绝地求生": "3",
"王者荣耀": "4",
"穿越火线": "5",
"和平精英": "6",
}
url = "http://rank.uuu9.com/player/ranking"
params = {"gameId": symbol_map[symbol], "type": "0"}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find("div", attrs={"class": "ec_data"}).text
report_date = data_text.split(":")[-1]
temp_df = pd.read_html(r.text)[0]
if symbol == "王者荣耀":
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
if symbol in {"英雄联盟", "DOTA2"}:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"战绩指数",
"综合指数",
"身价",
"排名变动",
]
]
else:
temp_df.columns = [
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
"-",
]
temp_df = temp_df[
[
"排名",
"选手ID",
"所属战队",
"人气指数",
"舆论指数",
"综合指数",
"身价",
"排名变动",
]
]
temp_df['更新时间'] = report_date
return temp_df
if __name__ == "__main__":
club_rank_game_df = club_rank_game(symbol="英雄联盟")
print(club_rank_game_df)
player_rank_game_df = player_rank_game(symbol="英雄联盟")
print(player_rank_game_df)
for item in ["英雄联盟", "绝地求生", "王者荣耀", "DOTA2", "穿越火线", "和平精英"]:
print(item)
club_rank_game_df = club_rank_game(symbol=item)
print(club_rank_game_df)
player_rank_game_df = player_rank_game(symbol=item)
print(player_rank_game_df)
| 3,894 |
335 | {
"word": "Daily",
"definitions": [
"A newspaper published every day except Sunday.",
"A woman who is employed to clean someone else's house each day.",
"The first prints from cinematographic takes; the rushes."
],
"parts-of-speech": "Noun"
} | 103 |
2,111 | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016
// <NAME> Codeplay Software Ltd.
// <NAME> Codeplay Software Ltd.
// <NAME> Codeplay Software Ltd.
// Contact: <<EMAIL>>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template <typename DataType, int DataLayout, typename IndexType>
void test_sycl_cumsum(const Eigen::SyclDevice& sycl_device, IndexType m_size,
IndexType k_size, IndexType n_size, int consume_dim,
bool exclusive) {
static const DataType error_threshold = 1e-4f;
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size
<< " consume_dim : " << consume_dim << ")" << std::endl;
Tensor<DataType, 3, DataLayout, IndexType> t_input(m_size, k_size, n_size);
Tensor<DataType, 3, DataLayout, IndexType> t_result(m_size, k_size, n_size);
Tensor<DataType, 3, DataLayout, IndexType> t_result_gpu(m_size, k_size,
n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(DataType);
std::size_t t_result_bytes = t_result.size() * sizeof(DataType);
DataType* gpu_data_in =
static_cast<DataType*>(sycl_device.allocate(t_input_bytes));
DataType* gpu_data_out =
static_cast<DataType*>(sycl_device.allocate(t_result_bytes));
array<IndexType, 3> tensorRange = {{m_size, k_size, n_size}};
TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu_t_input(
gpu_data_in, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu_t_result(
gpu_data_out, tensorRange);
sycl_device.memcpyHostToDevice(gpu_data_in, t_input.data(), t_input_bytes);
sycl_device.memcpyHostToDevice(gpu_data_out, t_input.data(), t_input_bytes);
gpu_t_result.device(sycl_device) = gpu_t_input.cumsum(consume_dim, exclusive);
t_result = t_input.cumsum(consume_dim, exclusive);
sycl_device.memcpyDeviceToHost(t_result_gpu.data(), gpu_data_out,
t_result_bytes);
sycl_device.synchronize();
for (IndexType i = 0; i < t_result.size(); i++) {
if (static_cast<DataType>(std::fabs(static_cast<DataType>(
t_result(i) - t_result_gpu(i)))) < error_threshold) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i),
error_threshold)) {
continue;
}
std::cout << "mismatch detected at index " << i << " CPU : " << t_result(i)
<< " vs SYCL : " << t_result_gpu(i) << std::endl;
assert(false);
}
sycl_device.deallocate(gpu_data_in);
sycl_device.deallocate(gpu_data_out);
}
template <typename DataType, typename Dev>
void sycl_scan_test_exclusive_dim0_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 2049, 1023, 127, 0,
true);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 2049, 1023, 127, 0,
true);
}
template <typename DataType, typename Dev>
void sycl_scan_test_exclusive_dim1_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 1023, 2049, 127, 1,
true);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 1023, 2049, 127, 1,
true);
}
template <typename DataType, typename Dev>
void sycl_scan_test_exclusive_dim2_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 1023, 127, 2049, 2,
true);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 1023, 127, 2049, 2,
true);
}
template <typename DataType, typename Dev>
void sycl_scan_test_inclusive_dim0_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 2049, 1023, 127, 0,
false);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 2049, 1023, 127, 0,
false);
}
template <typename DataType, typename Dev>
void sycl_scan_test_inclusive_dim1_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 1023, 2049, 127, 1,
false);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 1023, 2049, 127, 1,
false);
}
template <typename DataType, typename Dev>
void sycl_scan_test_inclusive_dim2_per_device(const Dev& sycl_device) {
test_sycl_cumsum<DataType, ColMajor, int64_t>(sycl_device, 1023, 127, 2049, 2,
false);
test_sycl_cumsum<DataType, RowMajor, int64_t>(sycl_device, 1023, 127, 2049, 2,
false);
}
EIGEN_DECLARE_TEST(cxx11_tensor_scan_sycl) {
for (const auto& device : Eigen::get_sycl_supported_devices()) {
std::cout << "Running on "
<< device.template get_info<cl::sycl::info::device::name>()
<< std::endl;
QueueInterface queueInterface(device);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
CALL_SUBTEST_1(
sycl_scan_test_exclusive_dim0_per_device<float>(sycl_device));
CALL_SUBTEST_2(
sycl_scan_test_exclusive_dim1_per_device<float>(sycl_device));
CALL_SUBTEST_3(
sycl_scan_test_exclusive_dim2_per_device<float>(sycl_device));
CALL_SUBTEST_4(
sycl_scan_test_inclusive_dim0_per_device<float>(sycl_device));
CALL_SUBTEST_5(
sycl_scan_test_inclusive_dim1_per_device<float>(sycl_device));
CALL_SUBTEST_6(
sycl_scan_test_inclusive_dim2_per_device<float>(sycl_device));
}
}
| 3,062 |
2,151 | /****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef __NDCTL_H__
#define __NDCTL_H__
#include <linux/types.h>
struct nd_cmd_smart {
__u32 status;
__u8 data[128];
} __packed;
#define ND_SMART_HEALTH_VALID (1 << 0)
#define ND_SMART_SPARES_VALID (1 << 1)
#define ND_SMART_USED_VALID (1 << 2)
#define ND_SMART_TEMP_VALID (1 << 3)
#define ND_SMART_CTEMP_VALID (1 << 4)
#define ND_SMART_ALARM_VALID (1 << 9)
#define ND_SMART_SHUTDOWN_VALID (1 << 10)
#define ND_SMART_VENDOR_VALID (1 << 11)
#define ND_SMART_SPARE_TRIP (1 << 0)
#define ND_SMART_TEMP_TRIP (1 << 1)
#define ND_SMART_CTEMP_TRIP (1 << 2)
#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0)
#define ND_SMART_CRITICAL_HEALTH (1 << 1)
#define ND_SMART_FATAL_HEALTH (1 << 2)
struct nd_smart_payload {
__u32 flags;
__u8 reserved0[4];
__u8 health;
__u8 spares;
__u8 life_used;
__u8 alarm_flags;
__u16 temperature;
__u16 ctrl_temperature;
__u8 reserved1[15];
__u8 shutdown_state;
__u32 vendor_size;
__u8 vendor_data[92];
} __packed;
struct nd_cmd_smart_threshold {
__u32 status;
__u8 data[8];
} __packed;
struct nd_smart_threshold_payload {
__u8 alarm_control;
__u8 reserved0;
__u16 temperature;
__u8 spares;
__u8 reserved[3];
} __packed;
struct nd_cmd_dimm_flags {
__u32 status;
__u32 flags;
} __packed;
struct nd_cmd_get_config_size {
__u32 status;
__u32 config_size;
__u32 max_xfer;
} __packed;
struct nd_cmd_get_config_data_hdr {
__u32 in_offset;
__u32 in_length;
__u32 status;
__u8 out_buf[0];
} __packed;
struct nd_cmd_set_config_hdr {
__u32 in_offset;
__u32 in_length;
__u8 in_buf[0];
} __packed;
struct nd_cmd_vendor_hdr {
__u32 opcode;
__u32 in_length;
__u8 in_buf[0];
} __packed;
struct nd_cmd_vendor_tail {
__u32 status;
__u32 out_length;
__u8 out_buf[0];
} __packed;
struct nd_cmd_ars_cap {
__u64 address;
__u64 length;
__u32 status;
__u32 max_ars_out;
__u32 clear_err_unit;
__u32 reserved;
} __packed;
struct nd_cmd_ars_start {
__u64 address;
__u64 length;
__u16 type;
__u8 flags;
__u8 reserved[5];
__u32 status;
__u32 scrub_time;
} __packed;
struct nd_cmd_ars_status {
__u32 status;
__u32 out_length;
__u64 address;
__u64 length;
__u64 restart_address;
__u64 restart_length;
__u16 type;
__u16 flags;
__u32 num_records;
struct nd_ars_record {
__u32 handle;
__u32 reserved;
__u64 err_address;
__u64 length;
} __packed records[0];
} __packed;
struct nd_cmd_clear_error {
__u64 address;
__u64 length;
__u32 status;
__u8 reserved[4];
__u64 cleared;
} __packed;
enum {
ND_CMD_IMPLEMENTED = 0,
ND_CMD_ARS_CAP = 1,
ND_CMD_ARS_START = 2,
ND_CMD_ARS_STATUS = 3,
ND_CMD_CLEAR_ERROR = 4,
ND_CMD_SMART = 1,
ND_CMD_SMART_THRESHOLD = 2,
ND_CMD_DIMM_FLAGS = 3,
ND_CMD_GET_CONFIG_SIZE = 4,
ND_CMD_GET_CONFIG_DATA = 5,
ND_CMD_SET_CONFIG_DATA = 6,
ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
ND_CMD_VENDOR_EFFECT_LOG = 8,
ND_CMD_VENDOR = 9,
ND_CMD_CALL = 10,
};
enum {
ND_ARS_VOLATILE = 1,
ND_ARS_PERSISTENT = 2,
ND_CONFIG_LOCKED = 1,
};
#define ND_IOCTL 'N'
#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART, struct nd_cmd_smart)
#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD, struct nd_cmd_smart_threshold)
#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS, struct nd_cmd_dimm_flags)
#define ND_IOCTL_GET_CONFIG_SIZE _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_SIZE, struct nd_cmd_get_config_size)
#define ND_IOCTL_GET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_DATA, struct nd_cmd_get_config_data_hdr)
#define ND_IOCTL_SET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_SET_CONFIG_DATA, struct nd_cmd_set_config_hdr)
#define ND_IOCTL_VENDOR _IOWR(ND_IOCTL, ND_CMD_VENDOR, struct nd_cmd_vendor_hdr)
#define ND_IOCTL_ARS_CAP _IOWR(ND_IOCTL, ND_CMD_ARS_CAP, struct nd_cmd_ars_cap)
#define ND_IOCTL_ARS_START _IOWR(ND_IOCTL, ND_CMD_ARS_START, struct nd_cmd_ars_start)
#define ND_IOCTL_ARS_STATUS _IOWR(ND_IOCTL, ND_CMD_ARS_STATUS, struct nd_cmd_ars_status)
#define ND_IOCTL_CLEAR_ERROR _IOWR(ND_IOCTL, ND_CMD_CLEAR_ERROR, struct nd_cmd_clear_error)
#define ND_DEVICE_DIMM 1
#define ND_DEVICE_REGION_PMEM 2
#define ND_DEVICE_REGION_BLK 3
#define ND_DEVICE_NAMESPACE_IO 4
#define ND_DEVICE_NAMESPACE_PMEM 5
#define ND_DEVICE_NAMESPACE_BLK 6
#define ND_DEVICE_DAX_PMEM 7
enum nd_driver_flags {
ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
ND_DRIVER_REGION_PMEM = 1 << ND_DEVICE_REGION_PMEM,
ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
enum {
ND_MIN_NAMESPACE_SIZE = 0x00400000,
};
enum ars_masks {
ARS_STATUS_MASK = 0x0000FFFF,
ARS_EXT_STATUS_SHIFT = 16,
};
struct nd_cmd_pkg {
__u64 nd_family;
__u64 nd_command;
__u32 nd_size_in;
__u32 nd_size_out;
__u32 nd_reserved2[9];
__u32 nd_fw_size;
unsigned char nd_payload[];
};
#define NVDIMM_FAMILY_INTEL 0
#define NVDIMM_FAMILY_HPE1 1
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL, struct nd_cmd_pkg)
#endif
| 2,681 |
417 | //-----------------------------------------------------------------------------
// Copyright (c) 2013 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#ifndef CUBEMAP_ASSET_H
#include "CubemapAsset.h"
#endif
#ifndef _ASSET_MANAGER_H_
#include "assets/assetManager.h"
#endif
#ifndef _CONSOLETYPES_H_
#include "console/consoleTypes.h"
#endif
#ifndef _TAML_
#include "persistence/taml/taml.h"
#endif
#ifndef _ASSET_PTR_H_
#include "assets/assetPtr.h"
#endif
// Debug Profiling.
#include "platform/profiler.h"
//-----------------------------------------------------------------------------
IMPLEMENT_CONOBJECT(CubemapAsset);
ConsoleType(CubemapAssetPtr, TypeCubemapAssetPtr, CubemapAsset, ASSET_ID_FIELD_PREFIX)
//-----------------------------------------------------------------------------
ConsoleGetType(TypeCubemapAssetPtr)
{
// Fetch asset Id.
return (*((AssetPtr<CubemapAsset>*)dptr)).getAssetId();
}
//-----------------------------------------------------------------------------
ConsoleSetType(TypeCubemapAssetPtr)
{
// Was a single argument specified?
if (argc == 1)
{
// Yes, so fetch field value.
const char* pFieldValue = argv[0];
// Fetch asset pointer.
AssetPtr<CubemapAsset>* pAssetPtr = dynamic_cast<AssetPtr<CubemapAsset>*>((AssetPtrBase*)(dptr));
// Is the asset pointer the correct type?
if (pAssetPtr == NULL)
{
// No, so fail.
//Con::warnf("(TypeCubemapAssetPtr) - Failed to set asset Id '%d'.", pFieldValue);
return;
}
// Set asset.
pAssetPtr->setAssetId(pFieldValue);
return;
}
// Warn.
Con::warnf("(TypeCubemapAssetPtr) - Cannot set multiple args to a single asset.");
}
//-----------------------------------------------------------------------------
CubemapAsset::CubemapAsset()
{
mComponentName = StringTable->EmptyString();
mComponentClass = StringTable->EmptyString();
mFriendlyName = StringTable->EmptyString();
mComponentType = StringTable->EmptyString();
mDescription = StringTable->EmptyString();
mScriptFile = StringTable->EmptyString();
}
//-----------------------------------------------------------------------------
CubemapAsset::~CubemapAsset()
{
}
//-----------------------------------------------------------------------------
void CubemapAsset::initPersistFields()
{
// Call parent.
Parent::initPersistFields();
addField("componentName", TypeString, Offset(mComponentName, CubemapAsset), "Unique Name of the component. Defines the namespace of the scripts for the component.");
addField("componentClass", TypeString, Offset(mComponentClass, CubemapAsset), "Class of object this component uses.");
addField("friendlyName", TypeString, Offset(mFriendlyName, CubemapAsset), "The human-readble name for the component.");
addField("componentType", TypeString, Offset(mComponentType, CubemapAsset), "The category of the component for organizing in the editor.");
addField("description", TypeString, Offset(mDescription, CubemapAsset), "Simple description of the component.");
addProtectedField("scriptFile", TypeAssetLooseFilePath, Offset(mScriptFile, CubemapAsset),
&setScriptFile, &getScriptFile, "A script file with additional scripted functionality for this component.");
}
//------------------------------------------------------------------------------
void CubemapAsset::copyTo(SimObject* object)
{
// Call to parent.
Parent::copyTo(object);
}
void CubemapAsset::initializeAsset()
{
// Call parent.
Parent::initializeAsset();
mScriptPath = getOwned() ? expandAssetFilePath(mScriptFile) : mScriptPath;
if (Torque::FS::IsScriptFile(mScriptPath))
Con::executeFile(mScriptPath, false, false);
}
void CubemapAsset::onAssetRefresh()
{
mScriptPath = getOwned() ? expandAssetFilePath(mScriptFile) : mScriptPath;
if (Torque::FS::IsScriptFile(mScriptPath))
Con::executeFile(mScriptPath, false, false);
}
void CubemapAsset::setScriptFile(const char* pScriptFile)
{
// Sanity!
AssertFatal(pScriptFile != NULL, "Cannot use a NULL script file.");
// Fetch image file.
pScriptFile = StringTable->insert(pScriptFile, true);
// Ignore no change,
if (pScriptFile == mScriptFile)
return;
// Update.
mScriptFile = getOwned() ? expandAssetFilePath(pScriptFile) : pScriptFile;
// Refresh the asset.
refreshAsset();
}
//-----------------------------------------------------------------------------
// GuiInspectorTypeAssetId
//-----------------------------------------------------------------------------
IMPLEMENT_CONOBJECT(GuiInspectorTypeCubemapAssetPtr);
ConsoleDocClass(GuiInspectorTypeCubemapAssetPtr,
"@brief Inspector field type for Shapes\n\n"
"Editor use only.\n\n"
"@internal"
);
void GuiInspectorTypeCubemapAssetPtr::consoleInit()
{
Parent::consoleInit();
ConsoleBaseType::getType(TypeCubemapAssetPtr)->setInspectorFieldType("GuiInspectorTypeCubemapAssetPtr");
}
GuiControl* GuiInspectorTypeCubemapAssetPtr::constructEditControl()
{
// Create base filename edit controls
GuiControl* retCtrl = Parent::constructEditControl();
if (retCtrl == NULL)
return retCtrl;
// Change filespec
char szBuffer[512];
dSprintf(szBuffer, sizeof(szBuffer), "AssetBrowser.showDialog(\"CubemapAsset\", \"AssetBrowser.changeAsset\", %d, %s);",
mInspector->getIdString(), mCaption);
mBrowseButton->setField("Command", szBuffer);
setDataField(StringTable->insert("object"), NULL, String::ToString(mInspector->getInspectObject()).c_str());
// Create "Open in ShapeEditor" button
mShapeEdButton = new GuiBitmapButtonCtrl();
dSprintf(szBuffer, sizeof(szBuffer), "CubemapEditor.openCubemapAsset(%d.getText());", retCtrl->getId());
mShapeEdButton->setField("Command", szBuffer);
char bitmapName[512] = "ToolsModule:shape_editor_n_image";
mShapeEdButton->setBitmap(StringTable->insert(bitmapName));
mShapeEdButton->setDataField(StringTable->insert("Profile"), NULL, "GuiButtonProfile");
mShapeEdButton->setDataField(StringTable->insert("tooltipprofile"), NULL, "GuiToolTipProfile");
mShapeEdButton->setDataField(StringTable->insert("hovertime"), NULL, "1000");
mShapeEdButton->setDataField(StringTable->insert("tooltip"), NULL, "Open this file in the Shape Editor");
mShapeEdButton->registerObject();
addObject(mShapeEdButton);
return retCtrl;
}
bool GuiInspectorTypeCubemapAssetPtr::updateRects()
{
S32 dividerPos, dividerMargin;
mInspector->getDivider(dividerPos, dividerMargin);
Point2I fieldExtent = getExtent();
Point2I fieldPos = getPosition();
mCaptionRect.set(0, 0, fieldExtent.x - dividerPos - dividerMargin, fieldExtent.y);
mEditCtrlRect.set(fieldExtent.x - dividerPos + dividerMargin, 1, dividerPos - dividerMargin - 34, fieldExtent.y);
bool resized = mEdit->resize(mEditCtrlRect.point, mEditCtrlRect.extent);
if (mBrowseButton != NULL)
{
mBrowseRect.set(fieldExtent.x - 32, 2, 14, fieldExtent.y - 4);
resized |= mBrowseButton->resize(mBrowseRect.point, mBrowseRect.extent);
}
if (mShapeEdButton != NULL)
{
RectI shapeEdRect(fieldExtent.x - 16, 2, 14, fieldExtent.y - 4);
resized |= mShapeEdButton->resize(shapeEdRect.point, shapeEdRect.extent);
}
return resized;
}
| 2,663 |
2,568 | <reponame>rkelly/twofactorauth
{
"Malwarebytes (Personal)": {
"domain": "malwarebytes.com",
"tfa": [
"email"
],
"documentation": "https://support.malwarebytes.com/hc/en-us/articles/4402158331411",
"keywords": [
"security"
]
}
} | 127 |
2,659 | <reponame>yangcol/OpenMLDB<filename>src/client/taskmanager_client.h
/*
* Copyright 2021 4Paradigm
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SRC_CLIENT_TASKMANAGER_CLIENT_H_
#define SRC_CLIENT_TASKMANAGER_CLIENT_H_
#include <string>
#include "base/status.h"
#include "client/client.h"
#include "proto/taskmanager.pb.h"
#include "rpc/rpc_client.h"
namespace openmldb {
namespace client {
class TaskManagerClient : public Client {
public:
TaskManagerClient(const std::string& endpoint, const std::string& real_endpoint, bool use_sleep_policy)
: Client(endpoint, real_endpoint),
client_(real_endpoint.empty() ? endpoint : real_endpoint, use_sleep_policy) {}
TaskManagerClient(const std::string& endpoint, const std::string& real_endpoint)
: Client(endpoint, real_endpoint), client_(real_endpoint.empty() ? endpoint : real_endpoint, true) {}
~TaskManagerClient() override {}
int Init() override { return client_.Init(); }
::openmldb::base::Status ShowJobs(const bool only_unfinished,
std::vector<::openmldb::taskmanager::JobInfo>& job_infos);
::openmldb::base::Status ShowJob(const int id, ::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status StopJob(const int id, ::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status RunBatchAndShow(const std::string& sql, const std::map<std::string, std::string>& config,
const std::string& default_db, ::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status ImportOnlineData(const std::string& sql, const std::map<std::string, std::string>& config,
const std::string& default_db,
::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status ImportOfflineData(const std::string& sql, const std::map<std::string, std::string>& config,
const std::string& default_db,
::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status ExportOfflineData(const std::string& sql, const std::map<std::string, std::string>& config,
const std::string& default_db,
::openmldb::taskmanager::JobInfo& job_info);
::openmldb::base::Status DropOfflineTable(const std::string& db, const std::string& table);
private:
::openmldb::RpcClient<::openmldb::taskmanager::TaskManagerServer_Stub> client_;
};
} // namespace client
} // namespace openmldb
#endif // SRC_CLIENT_TASKMANAGER_CLIENT_H_
| 1,332 |
2,059 | <gh_stars>1000+
/*
* Copyright (c) 2016 Spotify AB.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#import "HUBJSONCompatibleBuilder.h"
@protocol HUBViewModelBuilder;
@class HUBIdentifier;
NS_ASSUME_NONNULL_BEGIN
/**
* Protocol defining the public API for a builder that builds component target objects
*
* This builder acts like a mutable model counterpart for `HUBComponentTarget`, with the
* key difference that they are not related by inheritance.
*
* All properties are briefly documented as part of this protocol, but for more extensive
* documentation and use case examples, see the full documentation in the `HUBComponentTarget`
* protocol definition.
*/
@protocol HUBComponentTargetBuilder <HUBJSONCompatibleBuilder>
/// Any URI that should be opened when the user interacts with the target's component
@property (nonatomic, copy, nullable) NSURL *URI;
/// Any initial view model tha should be used for any Hub Framework-powered target view
@property (nonatomic, strong, readonly) id<HUBViewModelBuilder> initialViewModelBuilder;
/// The identifiers of any custom actions that should be performed when the target is executed
@property (nonatomic, strong, readonly) NSMutableOrderedSet<HUBIdentifier *> *actionIdentifiers;
/// Any custom data to associate with the target
@property (nonatomic, copy, nullable) NSDictionary<NSString *, id> *customData;
/**
* Add a custom action identifier to this target
*
* @param actionNamespace The namespace part of the action's identifier
* @param actionName The name part of the action's identifier
*
* This method is an alias/convenience API for `[self.actionIdentifiers addObject:]`.
*/
- (void)addActionWithNamespace:(NSString *)actionNamespace name:(NSString *)actionName;
@end
NS_ASSUME_NONNULL_END
| 687 |
451 | <gh_stars>100-1000
from lightning_transformers.task.nlp.question_answering.datasets.squad.data import SquadDataModule # noqa: F401
| 46 |
618 | <gh_stars>100-1000
package cn.hadcn.keyboard.emoticon.db;
import android.provider.BaseColumns;
/**
* @author chris
*/
final class TableColumns {
/**
* the columns definition for emoticon item
*/
class EmoticonItem implements BaseColumns {
private EmoticonItem() {
}
//1, for send in input area; 2, for sent in chat area directly
static final String EVENT_TYPE = "event_type";
static final String TAG = "tag"; //for matching, should be unique
static final String NAME = "name"; //for displaying
static final String ICON_URI = "icon_uri"; //uri for displaying in grid
static final String MSG_URI = "msg_uri"; //for being sent in chat
static final String EMOTICON_SET_NAME = "emoticon_set_name"; //emoticon set name
}
/**
* the columns definition for emoticons set
*/
class EmoticonSet implements BaseColumns {
private EmoticonSet() {
}
static final String NAME = "name";
static final String LINE = "line";
static final String ROW = "row";
static final String ICON_URI = "icon_uri";
static final String IS_SHOW_DEL_BTN = "is_show_del_btn";
static final String ITEM_PADDING = "item_padding";
static final String HORIZONTAL_SPACING = "horizontal_spacing";
static final String VERTICAL_SPACING = "vertical_spacing";
static final String IS_SHOW_NAME = "is_show_name";
}
}
| 575 |
809 | <filename>src/drivers/audio/portaudio/stm32_pa_cube.c
/**
* @file
* @brief
*
* @date 25.09.14
* @author <NAME>
* @author <NAME>
* Adaptation for STM32F4/F7 Cube
*/
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <linux/byteorder.h>
#include <util/err.h>
#include <framework/mod/options.h>
#include <kernel/irq.h>
#include <kernel/thread.h>
#include <kernel/thread/thread_sched_wait.h>
#include <kernel/panic.h>
#include <kernel/printk.h>
#include <mem/misc/pool.h>
#include <util/dlist.h>
#include <util/bit.h>
#include <portaudio.h>
#include <drivers/audio/stm32_audio.h>
#define MODOPS_VOLUME OPTION_GET(NUMBER, volume)
#define MODOPS_SAMPLE_RATE OPTION_GET(NUMBER, sample_rate)
#define MODOPS_BUF_CNT OPTION_GET(NUMBER, buf_cnt)
#define D(fmt, ...) \
do { \
printk("%s" fmt "\n", __VA_ARGS__); \
} while (0)
#define MAX_BUF_LEN (160 * 6)
#define CHAN_N OPTION_GET(NUMBER, chan_n)
#define SLOT_N OPTION_GET(NUMBER, slot_n)
/* TODO chan_n - (1 for mono, 2 for stereo). Each of channels can include several
* slots (slot_n). Currently we interprete each OUTPUT slot as OUTPUT channel.
* E.g. for STM32F7-Discovery we have 2 channels with 2 slots on each channel
* (64 bit audio frame).
* So if we play mono wav, we just copy each 16 bit sample 4 times. */
#define OUTPUT_CHAN_N (CHAN_N * SLOT_N)
#define BUF_N 2
struct pa_strm {
int started;
int paused;
int completed;
int sample_format;
PaStreamCallback *callback;
void *callback_data;
size_t chan_buf_len;
uint16_t in_buf[MAX_BUF_LEN];
uint16_t out_buf[MAX_BUF_LEN * OUTPUT_CHAN_N * BUF_N];
volatile unsigned char out_buf_empty_mask;
};
static_assert(BUF_N <= 8, "");
static struct thread *pa_thread;
static struct pa_strm pa_stream;
static irq_return_t stm32_audio_dma_interrupt(unsigned int irq_num, void *dev_id);
static void strm_get_data(struct pa_strm *strm, int buf_index) {
uint16_t *buf;
int i_in, rc;
rc = strm->callback(NULL, strm->in_buf, strm->chan_buf_len, NULL, 0, strm->callback_data);
if (rc == paComplete) {
strm->completed = 1;
}
else assert(rc == paContinue);
buf = strm->out_buf + buf_index * strm->chan_buf_len * OUTPUT_CHAN_N;
for (i_in = 0; i_in < strm->chan_buf_len; ++i_in) {
const uint16_t hw_frame = le16_to_cpu(strm->in_buf[i_in]);
int i_out;
for (i_out = 0; i_out < OUTPUT_CHAN_N; ++i_out) {
buf[i_in * OUTPUT_CHAN_N + i_out] = hw_frame;
}
}
}
static void *pa_thread_hnd(void *arg) {
struct pa_strm *strm = arg;
while (1) {
SCHED_WAIT(strm->out_buf_empty_mask);
if (!strm->completed) {
unsigned char empty_mask;
int buf_index;
empty_mask = strm->out_buf_empty_mask;
static_assert(BUF_N == 2, "");
if (empty_mask < 3) {
/* 0 - impossible; 1 -> 0; 2 -> 1 */
buf_index = empty_mask >> 1;
} else {
/* there are some empty buffers, but should be 1 */
printk("stm32_pa: underrun!\n");
buf_index = bit_ffs(empty_mask);
}
if (buf_index == 1) {
BSP_AUDIO_OUT_Play(strm->out_buf,
strm->chan_buf_len * OUTPUT_CHAN_N * BUF_N * sizeof(uint16_t));
}
strm_get_data(strm, buf_index);
irq_lock();
strm->out_buf_empty_mask &= ~(1 << buf_index);
irq_unlock();
} else {
BSP_AUDIO_OUT_Stop(CODEC_PDWN_HW);
}
}
return NULL;
}
PaError Pa_Initialize(void) {
D("", __func__);
return paNoError;
}
PaError Pa_Terminate(void) {
D("", __func__);
return paNoError;
}
PaHostApiIndex Pa_GetHostApiCount(void) { return 1; }
PaDeviceIndex Pa_GetDeviceCount(void) { return 1; }
PaDeviceIndex Pa_GetDefaultOutputDevice(void) { return 0; }
PaDeviceIndex Pa_GetDefaultInputDevice(void) { return 1; }
const char * Pa_GetErrorText(PaError errorCode) {
D(": %d", __func__, errorCode);
return "Pa_GetErrorText not implemented";
}
const PaDeviceInfo * Pa_GetDeviceInfo(PaDeviceIndex device) {
static const PaDeviceInfo info = {
.structVersion = 1,
.name = "stm32_audio",
.hostApi = 0,
.maxInputChannels = 1,
.maxOutputChannels = 1,
.defaultLowInputLatency = 0,
.defaultLowOutputLatency = 0,
.defaultHighInputLatency = 0,
.defaultHighOutputLatency = 0,
.defaultSampleRate = MODOPS_SAMPLE_RATE
};
D(": %d = %p", __func__, device, device == 0 ? &info : NULL);
return device == 0 ? &info : NULL;
}
const PaHostApiInfo * Pa_GetHostApiInfo(PaHostApiIndex hostApi) {
static const PaHostApiInfo info = {
.structVersion = 1,
.name = "stm32f4_audio_host_api",
};
D(": %d = %p", __func__, hostApi, hostApi == 0 ? &info : NULL);
return hostApi == 0 ? &info : NULL;
}
const PaStreamInfo * Pa_GetStreamInfo(PaStream *stream) {
static PaStreamInfo info = {
.structVersion = 1,
.inputLatency = 0,
.outputLatency = 0,
.sampleRate = MODOPS_SAMPLE_RATE
};
D(": %p = %p", __func__, stream, stream != NULL ? &info : NULL);
return stream != NULL ? &info : NULL;
}
PaError Pa_OpenStream(PaStream** stream,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate, unsigned long framesPerBuffer,
PaStreamFlags streamFlags, PaStreamCallback *streamCallback,
void *userData) {
struct pa_strm *strm;
assert(stream != NULL);
/*assert(inputParameters == NULL);*/
assert(outputParameters != NULL);
assert(outputParameters->device == 0);
assert(outputParameters->channelCount == 1);
assert(outputParameters->sampleFormat == paInt16);
assert(outputParameters->hostApiSpecificStreamInfo == 0);
assert(streamFlags == paNoFlag || streamFlags == paClipOff);
assert(streamCallback != NULL);
assert(framesPerBuffer <= MAX_BUF_LEN);
D(": %p %p %p %f %lu %lu %p %p", __func__, stream, inputParameters,
outputParameters, sampleRate, framesPerBuffer, streamFlags, streamCallback, userData);
strm = &pa_stream;
strm->started = 0;
strm->paused = 0;
strm->completed = 0;
strm->sample_format = outputParameters->sampleFormat;
strm->chan_buf_len = (MAX_BUF_LEN / framesPerBuffer) * framesPerBuffer;
strm->callback = streamCallback;
strm->callback_data = userData;
strm->out_buf_empty_mask = 0;
pa_thread = thread_create(0, pa_thread_hnd, strm);
if (err(pa_thread)) {
goto err_out;
}
if (0 != irq_attach(STM32_AUDIO_DMA_IRQ, stm32_audio_dma_interrupt,
0, strm, "stm32_audio")) {
goto err_thread_free;
}
if (0 != BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_HEADPHONE, MODOPS_VOLUME,
sampleRate)) {
goto err_irq_detach;
}
*stream = &pa_stream;
return paNoError;
err_irq_detach:
irq_detach(STM32_AUDIO_DMA_IRQ, NULL);
err_thread_free:
thread_delete(pa_thread);
pa_thread = NULL;
err_out:
return paUnanticipatedHostError;
}
PaError Pa_CloseStream(PaStream *stream) {
D(": %p", __func__, stream);
BSP_AUDIO_OUT_Stop(CODEC_PDWN_HW);
irq_detach(STM32_AUDIO_DMA_IRQ, NULL);
thread_delete(pa_thread);
pa_thread = NULL;
return paNoError;
}
PaError Pa_StartStream(PaStream *stream) {
struct pa_strm *strm;
D(": %p", __func__, stream);
assert(stream != NULL);
strm = (struct pa_strm *)stream;
assert(!strm->started || strm->paused);
if (!strm->started) {
strm->out_buf_empty_mask = 0;
if (0 != BSP_AUDIO_OUT_Play(strm->out_buf,
strm->chan_buf_len * OUTPUT_CHAN_N * BUF_N * sizeof(uint16_t))) {
return paInternalError;
}
printk("playing\n");
strm->started = 1;
} else {
assert(strm->paused);
if (0 != BSP_AUDIO_OUT_Resume()) {
return paInternalError;
}
strm->paused = 0;
}
return paNoError;
}
PaError Pa_StopStream(PaStream *stream) {
struct pa_strm *strm;
D(": %p", __func__, stream);
assert(stream != NULL);
strm = (struct pa_strm *)stream;
assert(strm->started && !strm->paused);
if (0 != BSP_AUDIO_OUT_Pause()) {
return paInternalError;
}
strm->paused = 1;
return paNoError;
}
void Pa_Sleep(long msec) {
D(" %ld", __func__, msec);
usleep(msec * USEC_PER_MSEC);
}
static void stm32_audio_irq_fill_buffer(int buf_index) {
pa_stream.out_buf_empty_mask |= 1 << buf_index;
sched_wakeup(&pa_thread->schedee);
}
void BSP_AUDIO_OUT_HalfTransfer_CallBack(void) {
stm32_audio_irq_fill_buffer(0);
}
void BSP_AUDIO_OUT_TransferComplete_CallBack(void) {
stm32_audio_irq_fill_buffer(1);
}
STATIC_IRQ_ATTACH(STM32_AUDIO_DMA_IRQ, stm32_audio_dma_interrupt, &pa_stream);
| 3,437 |
4,432 | {
"name": "hexo-theme-fluid",
"version": "1.8.12",
"description": "An elegant Material-Design theme for Hexo.",
"repository": {
"type": "git",
"url": "git+https://github.com/fluid-dev/hexo-theme-fluid.git"
},
"keywords": [
"hexo",
"theme",
"fluid",
"material"
],
"author": "fluid-dev (https://github.com/fluid-dev)",
"license": "MIT",
"bugs": {
"url": "https://github.com/fluid-dev/hexo-theme-fluid/issues"
},
"homepage": "https://hexo.fluid-dev.com/docs",
"engines": {
"node": ">=8.10.0"
},
"peerDependencies": {
"nunjucks": "^3.0.0"
}
}
| 283 |
989 | <filename>NotSoFatso/Wave_MMC5.h
/*
* Copyright (C) 2004 Disch
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//////////////////////////////////////////////////////////////////////////
//
// Wave_MMC5.h
//
// These are similar to the native NES square waves, only they lack a sweep unit and aren't
// (as far as I know), interdependant on each other's output.
//
// The voice is similar to the $4011 register, but without the DMC's sample playback capabilities.
// so it's rather useless. I haven't been able to find any game to test it with (since I'm not aware of
// any who use it... nor do I see how it could be used in an NSF because of lack of IRQ support). But it's
// included anyway. Theoretically it should work... but like I said, can't test it.
class CMMC5SquareWave
{
public:
///////////////////////////////////
// Programmable Timer
TWIN nFreqTimer;
int nFreqCount;
///////////////////////////////////
// Length Counter
BYTE nLengthCount;
BYTE bLengthEnabled;
BYTE bChannelEnabled;
///////////////////////////////////
// Volume / Decay
BYTE nVolume;
BYTE nDecayVolume;
BYTE bDecayEnable;
BYTE bDecayLoop;
BYTE nDecayTimer;
BYTE nDecayCount;
///////////////////////////////////
// Duty Cycle
BYTE nDutyCount;
BYTE nDutyCycle;
///////////////////////////////////
// Output and Downsampling
BYTE bChannelMix;
short nOutputTable_L[0x10];
short nOutputTable_R[0x10];
int nMixL;
int nMixR;
///////////////////////////////////
// Inverting
BYTE bDoInvert;
BYTE bInvert;
WORD nInvertFreqCutoff;
///////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
// Functions
///////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
FORCEINLINE void ClockMajor() //decay
{
if(nDecayCount)
nDecayCount--;
else
{
nDecayCount = nDecayTimer;
if(nDecayVolume)
nDecayVolume--;
else
{
if(bDecayLoop)
nDecayVolume = 0x0F;
}
if(bDecayEnable)
nVolume = nDecayVolume;
}
}
FORCEINLINE void ClockMinor() //length
{
if(bLengthEnabled && nLengthCount)
nLengthCount--;
}
FORCEINLINE void DoTicks(int ticks,BYTE mix)
{
register int mn;
if(nFreqTimer.W < 8) return;
while(ticks)
{
mn = min(nFreqCount,ticks);
ticks -= mn;
nFreqCount -= mn;
if(mix && (nDutyCount < nDutyCycle) && nLengthCount)
{
nMixL += nOutputTable_L[nVolume] * mn;
nMixR += nOutputTable_R[nVolume] * (bDoInvert ? -mn : mn);
}
if(!nFreqCount)
{
nFreqCount = nFreqTimer.W + 1;
nDutyCount = (nDutyCount + 1) & 0x0F;
if(!nDutyCount)
{
bDoInvert = bInvert;
if(nInvertFreqCutoff < nFreqTimer.W)
bDoInvert = 0;
}
}
}
}
FORCEINLINE void Mix_Mono(int& mix,int downsample)
{
mix += (nMixL / downsample);
nMixL = 0;
}
FORCEINLINE void Mix_Stereo(int& mixL,int& mixR,int downsample)
{
mixL += (nMixL / downsample);
mixR += (nMixR / downsample);
nMixL = nMixR = 0;
}
};
class CMMC5VoiceWave
{
public:
///////////////////////////////////
// Everything
BYTE nOutput;
short nOutputTable_L[0x80];
short nOutputTable_R[0x80];
int nMixL;
int nMixR;
BYTE bInvert;
///////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
// Functions
///////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
FORCEINLINE void DoTicks(int ticks)
{
nMixL += nOutputTable_L[nOutput] * ticks;
nMixR += nOutputTable_R[nOutput] * (bInvert ? -ticks : ticks);
}
FORCEINLINE void Mix_Mono(int& mix,int downsample)
{
mix += (nMixL / downsample);
nMixL = 0;
}
FORCEINLINE void Mix_Stereo(int& mixL,int& mixR,int downsample)
{
mixL += (nMixL / downsample);
mixR += (nMixR / downsample);
nMixL = nMixR = 0;
}
}; | 1,685 |
336 | <reponame>nattangwiwat/Mayan-EDMS-recitation<gh_stars>100-1000
from django.utils.translation import ugettext_lazy as _
from mayan.apps.events.classes import EventTypeNamespace
namespace = EventTypeNamespace(label=_('Mailing'), name='mailing')
event_email_sent = namespace.add_event_type(
label=_('Email sent'), name='email_send'
)
| 119 |
1,383 | <reponame>lucasw/chrono
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: <NAME>, <NAME>
// =============================================================================
#ifndef CHMESHSURFACE_H
#define CHMESHSURFACE_H
#include "chrono/physics/ChLoadable.h"
#include "chrono/fea/ChElementBase.h"
namespace chrono {
namespace fea {
/// @addtogroup chrono_fea
/// @{
// Forward references (for parent hierarchy pointer)
class ChMesh;
/// Class which defines a surface for a mesh FEA elements.
/// The contact surface is a collection of pointers to ChLoadableUV objects, which can be shells in the mesh or proxies
/// to faces of solid elements (such as ChTetrahedronFace or ChHexahedronFace).
class ChApi ChMeshSurface {
public:
ChMeshSurface(ChMesh* parentmesh = nullptr) : mmesh(parentmesh) {}
virtual ~ChMeshSurface() {}
/// Get owner mesh.
ChMesh* GetMesh() { return mmesh; }
/// Set owner mesh.
void SetMesh(ChMesh* mm) { mmesh = mm; }
/// Direct access to the list of faces.
std::vector<std::shared_ptr<ChLoadableUV> >& GetFacesList() { return faces; }
/// Add a single mesh face.
/// Note that this function does not check for double insertion of the same face.
virtual void AddFace(std::shared_ptr<ChLoadableUV> mface) { faces.push_back(mface); }
/// Add multiple faces of FEM elements given a set of nodes at vertexes.
/// Scan all the finite elements already added in the parent ChMesh, and check if any has a face whose vertexes are
/// all in the given node set; if so, add it to this mesh surface, with these rules:
/// - surface elements inherited from ChLoadableUV: the element is added
/// - face of ChElementTetrahedron : a ChTetrahedronFace proxy is created and added
/// - face of ChElementHexahedron : a ChHexahedronFace proxy is created and added
virtual void AddFacesFromNodeSet(std::vector<std::shared_ptr<ChNodeFEAbase> >& node_set);
/// Find faces on the outer boundary of a solid mesh.
/// Scan all the finite elements already added in the parent ChMesh and add the faces that are not shared.
virtual void AddFacesFromBoundary();
private:
std::vector<std::shared_ptr<ChLoadableUV> > faces; ///< mesh faces
std::shared_ptr<ChMaterialSurface> matsurface; ///< contact material
ChMesh* mmesh; ///< parent mesh
};
/// @} chrono_fea
} // end namespace fea
} // end namespace chrono
#endif
| 902 |
421 | #using <System.dll>
using namespace System;
using namespace System::Text;
using namespace System::IO;
using namespace System::Net;
using namespace System::Net::Sockets;
using namespace System::Threading;
using namespace System::Security::Permissions;
using namespace System::Collections;
void MySocketPermission()
{
//<Snippet1>
//<Snippet2>
// Creates a SocketPermission restricting access to and from all URIs.
SocketPermission^ mySocketPermission1 = gcnew SocketPermission( PermissionState::None );
// The socket to which this permission will apply will allow connections from www.contoso.com.
mySocketPermission1->AddPermission( NetworkAccess::Accept, TransportType::Tcp, "www.contoso.com", 11000 );
// Creates a SocketPermission which will allow the target Socket to connect with www.southridgevideo.com.
SocketPermission^ mySocketPermission2 = gcnew SocketPermission( NetworkAccess::Connect,TransportType::Tcp, "www.southridgevideo.com",11002 );
// Creates a SocketPermission from the union of two SocketPermissions.
SocketPermission^ mySocketPermissionUnion =
(SocketPermission^)( mySocketPermission1->Union( mySocketPermission2 ) );
// Checks to see if the union was successfully created by using the IsSubsetOf method.
if ( mySocketPermission1->IsSubsetOf( mySocketPermissionUnion ) &&
mySocketPermission2->IsSubsetOf( mySocketPermissionUnion ) )
{
Console::WriteLine( "This union contains permissions from both mySocketPermission1 and mySocketPermission2" );
// Prints the allowable accept URIs to the console.
Console::WriteLine( "This union accepts connections on :" );
IEnumerator^ myEnumerator = mySocketPermissionUnion->AcceptList;
while ( myEnumerator->MoveNext() )
{
Console::WriteLine( safe_cast<EndpointPermission^>( myEnumerator->Current )->ToString() );
}
// Prints the allowable connect URIs to the console.
Console::WriteLine( "This union permits connections to :" );
myEnumerator = mySocketPermissionUnion->ConnectList;
while ( myEnumerator->MoveNext() )
{
Console::WriteLine( safe_cast<EndpointPermission^>( myEnumerator->Current )->ToString() );
}
}
//</Snippet2>
//<Snippet3>
// Creates a SocketPermission from the intersect of two SocketPermissions.
SocketPermission^ mySocketPermissionIntersect =
(SocketPermission^)( mySocketPermission1->Intersect( mySocketPermissionUnion ) );
// mySocketPermissionIntersect should now contain the permissions of mySocketPermission1.
if ( mySocketPermission1->IsSubsetOf( mySocketPermissionIntersect ) )
{
Console::WriteLine( "This is expected" );
}
// mySocketPermissionIntersect should not contain the permissios of mySocketPermission2.
if ( mySocketPermission2->IsSubsetOf( mySocketPermissionIntersect ) )
{
Console::WriteLine( "This should not print" );
}
//</Snippet3>
//<Snippet4>
// Creates a copy of the intersect SocketPermission.
SocketPermission^ mySocketPermissionIntersectCopy =
(SocketPermission^)( mySocketPermissionIntersect->Copy() );
if ( mySocketPermissionIntersectCopy->Equals( mySocketPermissionIntersect ) )
{
Console::WriteLine( "Copy successfull" );
}
//</Snippet4>
// Converts a SocketPermission to XML format and then immediately converts it back to a SocketPermission.
mySocketPermission1->FromXml( mySocketPermission1->ToXml() );
// Checks to see if permission for this socket resource is unrestricted. If it is, then there is no need to
// demand that permissions be enforced.
if ( mySocketPermissionUnion->IsUnrestricted() )
{
//Do nothing. There are no restrictions.
}
else
{
// Enforces the permissions found in mySocketPermissionUnion on any Socket Resources used below this statement.
mySocketPermissionUnion->Demand();
}
IPHostEntry^ myIpHostEntry = Dns::Resolve( "www.contoso.com" );
IPEndPoint^ myLocalEndPoint = gcnew IPEndPoint( myIpHostEntry->AddressList[ 0 ], 11000 );
Socket^ s = gcnew Socket( myLocalEndPoint->Address->AddressFamily,
SocketType::Stream,
ProtocolType::Tcp );
try
{
s->Connect( myLocalEndPoint );
}
catch ( Exception^ e )
{
Console::Write( "Exception Thrown: " );
Console::WriteLine( e->ToString() );
}
// Perform all socket operations in here.
s->Close();
//</Snippet1>
}
int main()
{
MySocketPermission();
}
| 1,634 |
897 | /*Problem: Given array of strings, find the longest common prefix present in all the strings.
Print the longest common prefix. If no common prefix is present, then print empty string ("").
Approach: Comparing characters of the same column in all the strings. If the character does not much
or the string ends at that column, algorithm will retun lcp till that point.
Time complexity:O(S) where S is the number of all the characters in all strings.
Space complexity: O(1)
*/
#include <bits/stdc++.h>
using namespace std;
string solve(int n, string *strs)
{
string lcp = "";
char cur_car;
int j = 0;
if (n == 0)
{
return "";
}
while (1)
{
if (strs[0] == "")
{
return "";
}
if (strs[0][j] == '\0')
{
return lcp;
}
else
{
cur_car = strs[0][j];
}
for (int i = 1; i < n; i++)
{
if (strs[i] == "")
{
return "";
}
if (strs[i][j] == '\0' || strs[i][j] != cur_car)
{
return lcp;
}
}
lcp += cur_car;
j++;
}
return lcp;
}
int main()
{
//input
//number of strings in array
cout << "Enter number of strings:" << endl;
int n;
cin >> n;
string *strs = new string[n];
//enter strings
cout << "Enter the strings:" << endl;
for (int i = 0; i < n; i++)
{
cin >> strs[i];
}
string lcp = solve(n, strs);
if (lcp == "")
{
cout << "No common prefix";
}
else
{
cout << "Longest Common Prefix is: " << lcp;
}
}
/*
Sample input:
"apple", "appology", "apps"
Sample output:
"app"
*/ | 860 |
348 | {"nom":"Equihen-Plage","circ":"5ème circonscription","dpt":"Pas-de-Calais","inscrits":2186,"abs":1242,"votants":944,"blancs":28,"nuls":7,"exp":909,"res":[{"nuance":"REM","nom":"<NAME>","voix":274},{"nuance":"FN","nom":"<NAME>","voix":270},{"nuance":"FI","nom":"M. <NAME>","voix":136},{"nuance":"LR","nom":"Mme <NAME>","voix":82},{"nuance":"SOC","nom":"Mme <NAME>","voix":43},{"nuance":"COM","nom":"<NAME>","voix":36},{"nuance":"ECO","nom":"Mme <NAME>","voix":25},{"nuance":"EXG","nom":"M. <NAME>","voix":18},{"nuance":"DVD","nom":"M. <NAME>","voix":10},{"nuance":"DIV","nom":"M. <NAME>","voix":5},{"nuance":"DIV","nom":"Mme <NAME>","voix":4},{"nuance":"DVG","nom":"<NAME>","voix":4},{"nuance":"ECO","nom":"Mme <NAME>","voix":2}]} | 299 |
327 | /*
Copyright (c) DataStax, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "dse.h"
#include "dse_date_range.hpp"
#include "dse_line_string.hpp"
#include "dse_point.hpp"
#include "dse_polygon.hpp"
using namespace datastax::internal::enterprise;
extern "C" {
CassError cass_tuple_set_dse_point(CassTuple* tuple, size_t index, cass_double_t x,
cass_double_t y) {
Bytes bytes = encode_point(x, y);
return cass_tuple_set_custom(tuple, index, DSE_POINT_TYPE, bytes.data(), bytes.size());
}
CassError cass_tuple_set_dse_line_string(CassTuple* tuple, size_t index,
const DseLineString* line_string) {
return cass_tuple_set_custom(tuple, index, DSE_LINE_STRING_TYPE, line_string->bytes().data(),
line_string->bytes().size());
}
CassError cass_tuple_set_dse_polygon(CassTuple* tuple, size_t index, const DsePolygon* polygon) {
return cass_tuple_set_custom(tuple, index, DSE_POLYGON_TYPE, polygon->bytes().data(),
polygon->bytes().size());
}
CassError cass_tuple_set_dse_date_range(CassTuple* tuple, size_t index, const DseDateRange* range) {
Bytes bytes = encode_date_range(range);
return cass_tuple_set_custom(tuple, index, DSE_DATE_RANGE_TYPE, bytes.data(), bytes.size());
}
} // extern "C"
| 727 |
724 | <filename>src/lib/language-service/schema/ros/ALIYUN-CEN-CenInstanceAttachment.json
{
"$id": "ALIYUN::CEN::CenInstanceAttachment",
"type": "object",
"properties": {
"Type": {
"type": "string",
"enum": [
"ALIYUN::CEN::CenInstanceAttachment"
]
},
"Properties": {
"type": "object",
"properties": {
"ChildInstanceType": {
"type": "string"
},
"CenId": {
"type": "string"
},
"ChildInstanceOwnerId": {
"type": "integer"
},
"ChildInstanceId": {
"type": "string"
},
"ChildInstanceRegionId": {
"type": "string"
}
},
"required": [
"ChildInstanceType",
"CenId",
"ChildInstanceId",
"ChildInstanceRegionId"
],
"additionalProperties": false,
"document": {
"default": "https://rosnext.console.aliyun.com/resourceType/ALIYUN::CEN::CenInstanceAttachment"
}
}
},
"required": [
"Type",
"Properties"
],
"document": {
"default": "https://rosnext.console.aliyun.com/resourceType/ALIYUN::CEN::CenInstanceAttachment"
},
"insertText": "${1:CenInstanceAttachment}:\n Type: ALIYUN::CEN::CenInstanceAttachment\n Properties:\n "
} | 635 |
451 | '''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved."
ScriptName: test_api_security_passwd_resets.py
Author: <NAME>
Email: <EMAIL>
Create Date: 07/28/2017
Purpose: This script consists of cases intended to test RackHD login functions.
As of date, there are three user account roles (Administrator, Operator, and ReadOnly).
We will first test the RackHD endpoint's ability to create an account for each of these
roles for both RackHD and Redfish interfaces (redfish=False). We will then test the
Redfish endpoint's ability to also create Redfish and RackHD accounts (Redfish=True).
In both cases, all user roles will have established accounts in both endpoints and
will possess tokens for role base access and permission.
EX: python run_tests.py -stack 2
-config dellemc-test/config-mn/
-test tests/security/test_api_security_passwd_resets.py
python run_tests.py -stack vagrant -test tests/security/test_api_security_passwd_resets.py
'''
import sys
import flogging
import fit_common
import exrex
from nose.plugins.attrib import attr
sys.path.append(fit_common.TEST_PATH + "/classes")
from classes.administrator import Administrator
from classes.readonly import ReadOnly
from classes.operator import Operator
# Globals
logs = flogging.get_loggers()
# Helper functions
def createUsername():
return exrex.getone('[a-zA-Z]{1}[a-zA-Z0-9._\-]{1,}')
@attr(regression=True, smoke=True, security=True)
class TestCase01(fit_common.unittest.TestCase):
def setUp(self):
global admin, readonly, operator
admin = Administrator(createUsername(), 'passwd', 'Administrator', redfish=False)
operator = Operator(createUsername(), 'passwd', 'Operator', redfish=False)
readonly = ReadOnly(createUsername(), 'passwd', 'ReadOnly', redfish=False)
logs.debug_3("setUP() created the following accounts for testing")
logs.debug_3(" admin => %s", admin.username)
logs.debug_3("operator => %s", operator.username)
logs.debug_3("readonly => %s", readonly.username)
def tearDown(self):
logs.debug_3("running tearDown() to delete following test accounts")
logs.debug_3(" admin => %s", admin.username)
logs.debug_3("operator => %s", operator.username)
logs.debug_3("readonly => %s", readonly.username)
if isinstance(admin, Administrator):
admin.deleteRedfishUserAccount()
if isinstance(operator, Operator):
operator.deleteRedfishUserAccount()
if isinstance(readonly, ReadOnly):
readonly.deleteRedfishUserAccount()
def shortDescription(self):
logs.info("\n\n\
This scenario tests each roles ability to change its own password.\n\
A successful test creates an Administrator, ReadOnly, and Operator user account each\n\
obtaining session tokens for both RackHD and Redfish APIs.\n\
Only the administrator should be able to change their own passwords\n\n")
def test_rackhd_system_roles_patch(self):
pass
self.assertIsNotNone(admin)
http_resp_code = admin.changeRackHDPasswd('<PASSWORD>', admin.rackhd_token)
if http_resp_code is not None:
self.assertEqual(http_resp_code, 200, 'Incorrect HTTP return code, expected 200, got: ' + str(http_resp_code))
else:
self.skip('Skipping test. API is unavailable')
self.assertIsNotNone(operator)
http_resp_code = operator.changeRackHDPasswd('<PASSWORD>', operator.rackhd_token)
if http_resp_code is not None:
self.assertEqual(http_resp_code, 400, 'Incorrect HTTP return code, expected 400, got: ' + str(http_resp_code))
logs.info("RAC-4795, expected response should be 401")
else:
self.skip('Skipping test. API is unavailable')
self.assertIsNotNone(readonly)
http_resp_code = readonly.changeRackHDPasswd('<PASSWORD>', readonly.rackhd_token)
if http_resp_code is not None:
self.assertEqual(http_resp_code, 400, 'Incorrect HTTP return code, expected 401, got: ' + str(http_resp_code))
logs.info("RAC-4795, marking passed, expected response should be 401")
else:
self.skip('Skipping test. API is unavailable')
@attr(regression=False, smoke=True, security=True)
class TestCase02(fit_common.unittest.TestCase):
def setUp(self):
global admin, readonly, operator
admin = Administrator(createUsername(), 'passwd', 'Administrator', redfish=True)
readonly = ReadOnly(createUsername(), 'passwd', 'ReadOnly', redfish=True)
operator = Operator(createUsername(), 'passwd', 'Operator', redfish=True)
logs.info("setUP() created the following accounts for testing")
logs.info(" admin => %s", admin.username)
logs.info("readonly => %s", readonly.username)
logs.info("operator => %s", operator.username)
def tearDown(self):
logs.info("running tearDown() to delete following test accounts")
logs.info(" admin => %s", admin.username)
logs.info("readonly => %s", readonly.username)
logs.info("operator => %s", operator.username)
if isinstance(admin, Administrator):
admin.deleteRedfishUserAccount()
if isinstance(operator, Operator):
operator.deleteRedfishUserAccount()
if isinstance(readonly, ReadOnly):
readonly.deleteRedfishUserAccount()
def shortDescription(self):
logs.info("\n\n\
This scenario tests RackHD's setup administrative login credentials as well as the\n\
system's ability to create both RackHD and Redfish accounts via the Redfish API\n\
A successful test creates an Administrator, ReadOnly, and Operator user account each\n\
obtaining session tokens for both RackHD and Redfish APIs\n\n")
def test_redfish_system_roles_login_success(self):
self.assertIsNotNone(admin)
self.assertIsNotNone(readonly)
self.assertIsNotNone(operator)
| 2,423 |
2,053 | /*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouterx.webapp.layer.service;
import scouter.lang.constants.ParamConstant;
import scouterx.webapp.framework.client.server.Server;
import scouterx.webapp.layer.consumer.CustomKvStoreConsumer;
import scouterx.webapp.model.KeyValueData;
import java.util.List;
import java.util.Map;
/**
* @author <NAME> (<EMAIL>) on 2017. 8. 27.
*/
public class CustomKvStoreService {
private final CustomKvStoreConsumer kvStoreConsumer;
public CustomKvStoreService() {
this.kvStoreConsumer = new CustomKvStoreConsumer();
}
public String get(String keySpace, String key, Server server) {
return kvStoreConsumer.get(keySpace, key, server);
}
public boolean set(String keySpace, String key, String value, Server server) {
return set(keySpace, key, value, ParamConstant.TTL_PERMANENT, server);
}
public boolean set(String keySpace, String key, String value, long ttl, Server server) {
boolean result = kvStoreConsumer.set(keySpace, key, value, ttl, server);
if (!result) {
throw new RuntimeException("Error on setting value to kvstore!");
}
return true;
}
public boolean setTTL(String keySpace, String key, long ttl, Server server) {
boolean result = kvStoreConsumer.setTTL(keySpace, key, ttl, server);
if (!result) {
throw new RuntimeException("Error on setting value to kvstore!");
}
return true;
}
public List<KeyValueData> getBulk(String keySpace, List<String> paramList, final Server server) {
return kvStoreConsumer.getBulk(keySpace, paramList, server);
}
public List<KeyValueData> setBulk(String keySpace, Map<String, String> paramMap, long ttl, final Server server) {
return kvStoreConsumer.setBulk(keySpace, paramMap, ttl, server);
}
}
| 846 |
8,273 | <filename>ortools/base/random.cc
// Copyright 2010-2021 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if defined(__GNUC__)
#include <unistd.h>
#if defined(__linux__)
#include <linux/limits.h>
#endif
#endif
#if defined(_MSC_VER)
#include <Winsock2.h>
#include <windows.h>
#define PATH_MAX 4096
#else
#include <sys/time.h>
#endif
#include <cstring>
#include <ctime>
#include "ortools/base/hash.h"
#include "ortools/base/random.h"
namespace operations_research {
uint32_t ACMRandom::Next() {
return absl::uniform_int_distribution<uint32_t>(0, kuint32max)(generator_);
}
uint32_t ACMRandom::Uniform(uint32_t n) { return n == 0 ? 0 : Next() % n; }
uint64_t ACMRandom::Next64() {
return absl::uniform_int_distribution<uint64_t>(0, kuint64max)(generator_);
}
uint64_t ACMRandom::operator()(uint64_t val_max) {
return val_max == 0 ? 0 : Next64() % val_max;
}
namespace {
static inline uint32_t Word32At(const char* ptr) {
return ((static_cast<uint32_t>(ptr[0])) +
(static_cast<uint32_t>(ptr[1]) << 8) +
(static_cast<uint32_t>(ptr[2]) << 16) +
(static_cast<uint32_t>(ptr[3]) << 24));
}
} // namespace
int32_t ACMRandom::HostnamePidTimeSeed() {
char name[PATH_MAX + 20]; // need 12 bytes for 3 'empty' uint32_t's
assert(sizeof(name) - PATH_MAX > sizeof(uint32_t) * 3);
if (gethostname(name, PATH_MAX) != 0) {
strcpy(name, "default-hostname"); // NOLINT
}
const int namelen = strlen(name);
for (size_t i = 0; i < sizeof(uint32_t) * 3; ++i) {
name[namelen + i] = '\0'; // so we mix 0's once we get to end-of-string
}
#if defined(__GNUC__)
uint32_t a = getpid();
struct timeval tv;
gettimeofday(&tv, NULL);
uint32_t b = static_cast<uint32_t>((tv.tv_sec + tv.tv_usec) & 0xffffffff);
#elif defined(_MSC_VER)
uint32_t a = GetCurrentProcessId();
uint32_t b = GetTickCount();
#else // Do not know what to do, returning 0.
return 0;
#endif
uint32_t c = 0;
for (int i = 0; i < namelen; i += sizeof(uint32_t) * 3) {
a += Word32At(name + i);
b += Word32At(name + i + sizeof(uint32_t));
c += Word32At(name + i + sizeof(uint32_t) + sizeof(uint32_t));
mix(a, b, c);
}
c += namelen; // one final mix
mix(a, b, c);
return static_cast<int32_t>(c); // I guess the seed can be negative
}
int32_t ACMRandom::DeterministicSeed() { return 0; }
} // namespace operations_research
| 1,125 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.