repo_name
stringlengths 4
116
| path
stringlengths 4
379
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
himynameismax/codeigniter | glpi/inc/interfacetype.class.php | 3097 | <?php
/*
-------------------------------------------------------------------------
GLPI - Gestionnaire Libre de Parc Informatique
Copyright (C) 2015-2016 Teclib'.
http://glpi-project.org
based on GLPI - Gestionnaire Libre de Parc Informatique
Copyright (C) 2003-2014 by the INDEPNET Development Team.
-------------------------------------------------------------------------
LICENSE
This file is part of GLPI.
GLPI is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
GLPI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GLPI. If not, see <http://www.gnu.org/licenses/>.
--------------------------------------------------------------------------
*/
/** @file
* @brief
*/
if (!defined('GLPI_ROOT')) {
die("Sorry. You can't access this file directly");
}
/// Class InterfaceType (Interface is a reserved keyword)
class InterfaceType extends CommonDropdown {
public $can_be_translated = false;
static function getTypeName($nb=0) {
return _n('Interface type (Hard drive...)', 'Interface types (Hard drive...)', $nb);
}
/**
* @since version 0.84
*
* @param $itemtype
* @param $base HTMLTableBase object
* @param $super HTMLTableSuperHeader object (default NULL)
* @param $father HTMLTableHeader object (default NULL)
* @param $options array
**/
static function getHTMLTableHeader($itemtype, HTMLTableBase $base,
HTMLTableSuperHeader $super=NULL,
HTMLTableHeader $father=NULL, array $options=array()) {
$column_name = __CLASS__;
if (isset($options['dont_display'][$column_name])) {
return;
}
$base->addHeader($column_name, __('Interface'), $super, $father);
}
/**
* @since version 0.84
*
* @param $row HTMLTableRow object (default NULL)
* @param $item CommonDBTM object (default NULL)
* @param $father HTMLTableCell object (default NULL)
* @param $options array
**/
static function getHTMLTableCellsForItem(HTMLTableRow $row=NULL, CommonDBTM $item=NULL,
HTMLTableCell $father=NULL, array $options=array()) {
$column_name = __CLASS__;
if (isset($options['dont_display'][$column_name])) {
return;
}
if ($item->fields["interfacetypes_id"]) {
$row->addCell($row->getHeaderByName($column_name),
Dropdown::getDropdownName("glpi_interfacetypes",
$item->fields["interfacetypes_id"]));
}
}
}
| mit |
DanielParra159/EngineAndGame | Engine/extern/BOOST/include/boost/fusion/container/vector/vector_fwd.hpp | 854 | /*=============================================================================
Copyright (c) 1999-2003 Jaakko Jarvi
Copyright (c) 2001-2006 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(FUSION_VECTOR_FORWARD_07072005_0125)
#define FUSION_VECTOR_FORWARD_07072005_0125
#include <boost/fusion/container/vector/limits.hpp>
#include <boost/preprocessor/repetition/enum_params_with_a_default.hpp>
namespace boost { namespace fusion
{
struct void_;
template <
BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT(
FUSION_MAX_VECTOR_SIZE, typename T, void_)
>
struct vector;
}}
#endif
| cc0-1.0 |
cronchinsky/dh-drupal-8 | vendor/consolidation/filter-via-dot-access-data/tests/FactoryTest.php | 4164 | <?php
namespace Consolidation\Filter;
use PHPUnit\Framework\TestCase;
use Dflydev\DotAccessData\Data;
class FactoryTest extends TestCase
{
protected $factory;
public function setUp()
{
$this->factory = LogicalOpFactory::get();
}
/**
* Data provider for testFactoryParsing.
*
* Return an array of arrays, each of which contains the parameter
* values to be used in one invocation of the testExample test function.
*/
public function testFactoryParsingValues()
{
return [
['a=b',],
['a*=b',],
['a~=#b#',],
['!a=b',],
['!a*=b',],
['!a~=#b#',],
['a!=b', '!a=b',],
['a!*=b', '!a*=b',],
['a!~=#b#', '!a~=#b#',],
['a=b&&c=d',],
['a*=b||c=d',],
['a~=#b#&&c~=d',],
['!a=b||!c=d',],
['!a*=b&&c*=d',],
['!a~=#b#&&c=d',],
['a!=b&&c!=d', '!a=b&&!c=d',],
['a!*=b||c!*=d', '!a*=b||!c*=d',],
['a!~=#b#&&c!~=#d#', '!a~=#b#&&!c~=#d#',],
['a=b&&c=d&&e=f',],
['a=b||c=d&&e=f',],
['a=b||c=d||e=f',],
];
}
/**
* Test our example class. Each time this function is called, it will
* be passed data from the data provider function idendified by the
* dataProvider annotation.
*
* @dataProvider testFactoryParsingValues
*/
public function testFactoryParsing($expr, $expected = false)
{
// The expected value is often the source value
if ($expected === false) {
$expected = $expr;
}
$op = $this->factory->evaluate($expr);
$this->assertEquals($expected, (string)$op);
}
/**
* Data provider for testFactoryEvaluation.
*
* Return an array of arrays, each of which contains the parameter
* values to be used in one invocation of the testExample test function.
*/
public function testFactoryEvaluationValues()
{
return [
['a=b', ['a' => 'b'], true,],
['a=b', ['a' => 'abc'], false,],
['a=b', ['b' => 'b'], false,],
['a*=b', ['a' => 'b'], true,],
['a*=b', ['a' => 'abc'], true,],
['a*=b', ['b' => 'b'], false,],
['a~=#b#', ['a' => 'b'], true,],
['a~=#b#', ['a' => 'abc'], true,],
['a~=#b#', ['b' => 'b'], false,],
['a.b=c', ['a' => ['b' => 'c']], true,],
['a.b=c', ['a' => ['b' => 'abcd']], false,],
['a.b=c', ['b' => ['b' => 'c']], false,],
['a.b*=c', ['a' => ['b' => 'c']], true,],
['a.b*=c', ['a' => ['b' => 'abcd']], true,],
['a.b*=c', ['b' => ['b' => 'c']], false,],
['a.b~=#c#', ['a' => ['b' => 'c']], true,],
['a.b~=#c#', ['a' => ['b' => 'abcd']], true,],
['a.b~=#c#', ['b' => ['b' => 'c']], false,],
['a=b&&c=d', ['a' => 'b', 'c' => 'd'], true,],
['a=b&&c=d', ['a' => 'b', 'c' => 'xd'], false,],
['a=b||c=d', ['a' => 'b', 'c' => 'xd'], true,],
['a*=b&&c*=d', ['a' => 'b', 'c' => 'd'], true,],
['a*=b&&c*=d', ['a' => 'b', 'c' => 'xd'], true,],
['a*=b&&c*=d', ['a' => 'b', 'c' => 'xy'], false,],
['a*=b||c*=d', ['a' => 'b', 'c' => 'xd'], true,],
['a*=b||c*=d', ['a' => 'xb', 'c' => 'xd'], true,],
['a*=b||c*=d', ['a' => 'xy', 'c' => 'xy'], false,],
['a!=b', ['a' => 'b'], false,],
['a!=b', ['a' => 'abc'], true,],
['!a=b', ['b' => 'b'], true,],
];
}
/**
* Test our example class. Each time this function is called, it will
* be passed data from the data provider function idendified by the
* dataProvider annotation.
*
* @dataProvider testFactoryEvaluationValues
*/
public function testFactoryEvaluation($expr, $source, $expected)
{
$op = $this->factory->evaluate($expr);
$data = new Data($source);
$this->assertEquals($expected, $op->test($data));
}
}
| gpl-2.0 |
dmlloyd/openjdk-modules | jdk/src/java.sql/share/classes/java/sql/Connection.java | 84366 | /*
* Copyright (c) 1996, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.sql;
import java.util.Properties;
import java.util.concurrent.Executor;
/**
* <P>A connection (session) with a specific
* database. SQL statements are executed and results are returned
* within the context of a connection.
* <P>
* A <code>Connection</code> object's database is able to provide information
* describing its tables, its supported SQL grammar, its stored
* procedures, the capabilities of this connection, and so on. This
* information is obtained with the <code>getMetaData</code> method.
*
* <P><B>Note:</B> When configuring a <code>Connection</code>, JDBC applications
* should use the appropriate <code>Connection</code> method such as
* <code>setAutoCommit</code> or <code>setTransactionIsolation</code>.
* Applications should not invoke SQL commands directly to change the connection's
* configuration when there is a JDBC method available. By default a <code>Connection</code> object is in
* auto-commit mode, which means that it automatically commits changes
* after executing each statement. If auto-commit mode has been
* disabled, the method <code>commit</code> must be called explicitly in
* order to commit changes; otherwise, database changes will not be saved.
* <P>
* A new <code>Connection</code> object created using the JDBC 2.1 core API
* has an initially empty type map associated with it. A user may enter a
* custom mapping for a UDT in this type map.
* When a UDT is retrieved from a data source with the
* method <code>ResultSet.getObject</code>, the <code>getObject</code> method
* will check the connection's type map to see if there is an entry for that
* UDT. If so, the <code>getObject</code> method will map the UDT to the
* class indicated. If there is no entry, the UDT will be mapped using the
* standard mapping.
* <p>
* A user may create a new type map, which is a <code>java.util.Map</code>
* object, make an entry in it, and pass it to the <code>java.sql</code>
* methods that can perform custom mapping. In this case, the method
* will use the given type map instead of the one associated with
* the connection.
* <p>
* For example, the following code fragment specifies that the SQL
* type <code>ATHLETES</code> will be mapped to the class
* <code>Athletes</code> in the Java programming language.
* The code fragment retrieves the type map for the <code>Connection
* </code> object <code>con</code>, inserts the entry into it, and then sets
* the type map with the new entry as the connection's type map.
* <pre>
* java.util.Map map = con.getTypeMap();
* map.put("mySchemaName.ATHLETES", Class.forName("Athletes"));
* con.setTypeMap(map);
* </pre>
*
* @see DriverManager#getConnection
* @see Statement
* @see ResultSet
* @see DatabaseMetaData
* @since 1.1
*/
public interface Connection extends Wrapper, AutoCloseable {
/**
* Creates a <code>Statement</code> object for sending
* SQL statements to the database.
* SQL statements without parameters are normally
* executed using <code>Statement</code> objects. If the same SQL statement
* is executed many times, it may be more efficient to use a
* <code>PreparedStatement</code> object.
* <P>
* Result sets created using the returned <code>Statement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @return a new default <code>Statement</code> object
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
Statement createStatement() throws SQLException;
/**
* Creates a <code>PreparedStatement</code> object for sending
* parameterized SQL statements to the database.
* <P>
* A SQL statement with or without IN parameters can be
* pre-compiled and stored in a <code>PreparedStatement</code> object. This
* object can then be used to efficiently execute this statement
* multiple times.
*
* <P><B>Note:</B> This method is optimized for handling
* parametric SQL statements that benefit from precompilation. If
* the driver supports precompilation,
* the method <code>prepareStatement</code> will send
* the statement to the database for precompilation. Some drivers
* may not support precompilation. In this case, the statement may
* not be sent to the database until the <code>PreparedStatement</code>
* object is executed. This has no direct effect on users; however, it does
* affect which methods throw certain <code>SQLException</code> objects.
* <P>
* Result sets created using the returned <code>PreparedStatement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql an SQL statement that may contain one or more '?' IN
* parameter placeholders
* @return a new default <code>PreparedStatement</code> object containing the
* pre-compiled SQL statement
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
PreparedStatement prepareStatement(String sql)
throws SQLException;
/**
* Creates a <code>CallableStatement</code> object for calling
* database stored procedures.
* The <code>CallableStatement</code> object provides
* methods for setting up its IN and OUT parameters, and
* methods for executing the call to a stored procedure.
*
* <P><B>Note:</B> This method is optimized for handling stored
* procedure call statements. Some drivers may send the call
* statement to the database when the method <code>prepareCall</code>
* is done; others
* may wait until the <code>CallableStatement</code> object
* is executed. This has no
* direct effect on users; however, it does affect which method
* throws certain SQLExceptions.
* <P>
* Result sets created using the returned <code>CallableStatement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql an SQL statement that may contain one or more '?'
* parameter placeholders. Typically this statement is specified using JDBC
* call escape syntax.
* @return a new default <code>CallableStatement</code> object containing the
* pre-compiled SQL statement
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
CallableStatement prepareCall(String sql) throws SQLException;
/**
* Converts the given SQL statement into the system's native SQL grammar.
* A driver may convert the JDBC SQL grammar into its system's
* native SQL grammar prior to sending it. This method returns the
* native form of the statement that the driver would have sent.
*
* @param sql an SQL statement that may contain one or more '?'
* parameter placeholders
* @return the native form of this statement
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
String nativeSQL(String sql) throws SQLException;
/**
* Sets this connection's auto-commit mode to the given state.
* If a connection is in auto-commit mode, then all its SQL
* statements will be executed and committed as individual
* transactions. Otherwise, its SQL statements are grouped into
* transactions that are terminated by a call to either
* the method <code>commit</code> or the method <code>rollback</code>.
* By default, new connections are in auto-commit
* mode.
* <P>
* The commit occurs when the statement completes. The time when the statement
* completes depends on the type of SQL Statement:
* <ul>
* <li>For DML statements, such as Insert, Update or Delete, and DDL statements,
* the statement is complete as soon as it has finished executing.
* <li>For Select statements, the statement is complete when the associated result
* set is closed.
* <li>For <code>CallableStatement</code> objects or for statements that return
* multiple results, the statement is complete
* when all of the associated result sets have been closed, and all update
* counts and output parameters have been retrieved.
*</ul>
* <P>
* <B>NOTE:</B> If this method is called during a transaction and the
* auto-commit mode is changed, the transaction is committed. If
* <code>setAutoCommit</code> is called and the auto-commit mode is
* not changed, the call is a no-op.
*
* @param autoCommit <code>true</code> to enable auto-commit mode;
* <code>false</code> to disable it
* @exception SQLException if a database access error occurs,
* setAutoCommit(true) is called while participating in a distributed transaction,
* or this method is called on a closed connection
* @see #getAutoCommit
*/
void setAutoCommit(boolean autoCommit) throws SQLException;
/**
* Retrieves the current auto-commit mode for this <code>Connection</code>
* object.
*
* @return the current state of this <code>Connection</code> object's
* auto-commit mode
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #setAutoCommit
*/
boolean getAutoCommit() throws SQLException;
/**
* Makes all changes made since the previous
* commit/rollback permanent and releases any database locks
* currently held by this <code>Connection</code> object.
* This method should be
* used only when auto-commit mode has been disabled.
*
* @exception SQLException if a database access error occurs,
* this method is called while participating in a distributed transaction,
* if this method is called on a closed connection or this
* <code>Connection</code> object is in auto-commit mode
* @see #setAutoCommit
*/
void commit() throws SQLException;
/**
* Undoes all changes made in the current transaction
* and releases any database locks currently held
* by this <code>Connection</code> object. This method should be
* used only when auto-commit mode has been disabled.
*
* @exception SQLException if a database access error occurs,
* this method is called while participating in a distributed transaction,
* this method is called on a closed connection or this
* <code>Connection</code> object is in auto-commit mode
* @see #setAutoCommit
*/
void rollback() throws SQLException;
/**
* Releases this <code>Connection</code> object's database and JDBC resources
* immediately instead of waiting for them to be automatically released.
* <P>
* Calling the method <code>close</code> on a <code>Connection</code>
* object that is already closed is a no-op.
* <P>
* It is <b>strongly recommended</b> that an application explicitly
* commits or rolls back an active transaction prior to calling the
* <code>close</code> method. If the <code>close</code> method is called
* and there is an active transaction, the results are implementation-defined.
*
* @exception SQLException if a database access error occurs
*/
void close() throws SQLException;
/**
* Retrieves whether this <code>Connection</code> object has been
* closed. A connection is closed if the method <code>close</code>
* has been called on it or if certain fatal errors have occurred.
* This method is guaranteed to return <code>true</code> only when
* it is called after the method <code>Connection.close</code> has
* been called.
* <P>
* This method generally cannot be called to determine whether a
* connection to a database is valid or invalid. A typical client
* can determine that a connection is invalid by catching any
* exceptions that might be thrown when an operation is attempted.
*
* @return <code>true</code> if this <code>Connection</code> object
* is closed; <code>false</code> if it is still open
* @exception SQLException if a database access error occurs
*/
boolean isClosed() throws SQLException;
//======================================================================
// Advanced features:
/**
* Retrieves a <code>DatabaseMetaData</code> object that contains
* metadata about the database to which this
* <code>Connection</code> object represents a connection.
* The metadata includes information about the database's
* tables, its supported SQL grammar, its stored
* procedures, the capabilities of this connection, and so on.
*
* @return a <code>DatabaseMetaData</code> object for this
* <code>Connection</code> object
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
DatabaseMetaData getMetaData() throws SQLException;
/**
* Puts this connection in read-only mode as a hint to the driver to enable
* database optimizations.
*
* <P><B>Note:</B> This method cannot be called during a transaction.
*
* @param readOnly <code>true</code> enables read-only mode;
* <code>false</code> disables it
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection or this
* method is called during a transaction
*/
void setReadOnly(boolean readOnly) throws SQLException;
/**
* Retrieves whether this <code>Connection</code>
* object is in read-only mode.
*
* @return <code>true</code> if this <code>Connection</code> object
* is read-only; <code>false</code> otherwise
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
boolean isReadOnly() throws SQLException;
/**
* Sets the given catalog name in order to select
* a subspace of this <code>Connection</code> object's database
* in which to work.
* <P>
* If the driver does not support catalogs, it will
* silently ignore this request.
* <p>
* Calling {@code setCatalog} has no effect on previously created or prepared
* {@code Statement} objects. It is implementation defined whether a DBMS
* prepare operation takes place immediately when the {@code Connection}
* method {@code prepareStatement} or {@code prepareCall} is invoked.
* For maximum portability, {@code setCatalog} should be called before a
* {@code Statement} is created or prepared.
*
* @param catalog the name of a catalog (subspace in this
* <code>Connection</code> object's database) in which to work
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #getCatalog
*/
void setCatalog(String catalog) throws SQLException;
/**
* Retrieves this <code>Connection</code> object's current catalog name.
*
* @return the current catalog name or <code>null</code> if there is none
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #setCatalog
*/
String getCatalog() throws SQLException;
/**
* A constant indicating that transactions are not supported.
*/
int TRANSACTION_NONE = 0;
/**
* A constant indicating that
* dirty reads, non-repeatable reads and phantom reads can occur.
* This level allows a row changed by one transaction to be read
* by another transaction before any changes in that row have been
* committed (a "dirty read"). If any of the changes are rolled back,
* the second transaction will have retrieved an invalid row.
*/
int TRANSACTION_READ_UNCOMMITTED = 1;
/**
* A constant indicating that
* dirty reads are prevented; non-repeatable reads and phantom
* reads can occur. This level only prohibits a transaction
* from reading a row with uncommitted changes in it.
*/
int TRANSACTION_READ_COMMITTED = 2;
/**
* A constant indicating that
* dirty reads and non-repeatable reads are prevented; phantom
* reads can occur. This level prohibits a transaction from
* reading a row with uncommitted changes in it, and it also
* prohibits the situation where one transaction reads a row,
* a second transaction alters the row, and the first transaction
* rereads the row, getting different values the second time
* (a "non-repeatable read").
*/
int TRANSACTION_REPEATABLE_READ = 4;
/**
* A constant indicating that
* dirty reads, non-repeatable reads and phantom reads are prevented.
* This level includes the prohibitions in
* <code>TRANSACTION_REPEATABLE_READ</code> and further prohibits the
* situation where one transaction reads all rows that satisfy
* a <code>WHERE</code> condition, a second transaction inserts a row that
* satisfies that <code>WHERE</code> condition, and the first transaction
* rereads for the same condition, retrieving the additional
* "phantom" row in the second read.
*/
int TRANSACTION_SERIALIZABLE = 8;
/**
* Attempts to change the transaction isolation level for this
* <code>Connection</code> object to the one given.
* The constants defined in the interface <code>Connection</code>
* are the possible transaction isolation levels.
* <P>
* <B>Note:</B> If this method is called during a transaction, the result
* is implementation-defined.
*
* @param level one of the following <code>Connection</code> constants:
* <code>Connection.TRANSACTION_READ_UNCOMMITTED</code>,
* <code>Connection.TRANSACTION_READ_COMMITTED</code>,
* <code>Connection.TRANSACTION_REPEATABLE_READ</code>, or
* <code>Connection.TRANSACTION_SERIALIZABLE</code>.
* (Note that <code>Connection.TRANSACTION_NONE</code> cannot be used
* because it specifies that transactions are not supported.)
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameter is not one of the <code>Connection</code>
* constants
* @see DatabaseMetaData#supportsTransactionIsolationLevel
* @see #getTransactionIsolation
*/
void setTransactionIsolation(int level) throws SQLException;
/**
* Retrieves this <code>Connection</code> object's current
* transaction isolation level.
*
* @return the current transaction isolation level, which will be one
* of the following constants:
* <code>Connection.TRANSACTION_READ_UNCOMMITTED</code>,
* <code>Connection.TRANSACTION_READ_COMMITTED</code>,
* <code>Connection.TRANSACTION_REPEATABLE_READ</code>,
* <code>Connection.TRANSACTION_SERIALIZABLE</code>, or
* <code>Connection.TRANSACTION_NONE</code>.
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #setTransactionIsolation
*/
int getTransactionIsolation() throws SQLException;
/**
* Retrieves the first warning reported by calls on this
* <code>Connection</code> object. If there is more than one
* warning, subsequent warnings will be chained to the first one
* and can be retrieved by calling the method
* <code>SQLWarning.getNextWarning</code> on the warning
* that was retrieved previously.
* <P>
* This method may not be
* called on a closed connection; doing so will cause an
* <code>SQLException</code> to be thrown.
*
* <P><B>Note:</B> Subsequent warnings will be chained to this
* SQLWarning.
*
* @return the first <code>SQLWarning</code> object or <code>null</code>
* if there are none
* @exception SQLException if a database access error occurs or
* this method is called on a closed connection
* @see SQLWarning
*/
SQLWarning getWarnings() throws SQLException;
/**
* Clears all warnings reported for this <code>Connection</code> object.
* After a call to this method, the method <code>getWarnings</code>
* returns <code>null</code> until a new warning is
* reported for this <code>Connection</code> object.
*
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
*/
void clearWarnings() throws SQLException;
//--------------------------JDBC 2.0-----------------------------
/**
* Creates a <code>Statement</code> object that will generate
* <code>ResultSet</code> objects with the given type and concurrency.
* This method is the same as the <code>createStatement</code> method
* above, but it allows the default result set
* type and concurrency to be overridden.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param resultSetType a result set type; one of
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency a concurrency type; one of
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @return a new <code>Statement</code> object that will generate
* <code>ResultSet</code> objects with the given type and
* concurrency
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type and concurrency
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type and result set concurrency.
* @since 1.2
*/
Statement createStatement(int resultSetType, int resultSetConcurrency)
throws SQLException;
/**
*
* Creates a <code>PreparedStatement</code> object that will generate
* <code>ResultSet</code> objects with the given type and concurrency.
* This method is the same as the <code>prepareStatement</code> method
* above, but it allows the default result set
* type and concurrency to be overridden.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql a <code>String</code> object that is the SQL statement to
* be sent to the database; may contain one or more '?' IN
* parameters
* @param resultSetType a result set type; one of
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency a concurrency type; one of
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @return a new PreparedStatement object containing the
* pre-compiled SQL statement that will produce <code>ResultSet</code>
* objects with the given type and concurrency
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type and concurrency
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type and result set concurrency.
* @since 1.2
*/
PreparedStatement prepareStatement(String sql, int resultSetType,
int resultSetConcurrency)
throws SQLException;
/**
* Creates a <code>CallableStatement</code> object that will generate
* <code>ResultSet</code> objects with the given type and concurrency.
* This method is the same as the <code>prepareCall</code> method
* above, but it allows the default result set
* type and concurrency to be overridden.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql a <code>String</code> object that is the SQL statement to
* be sent to the database; may contain on or more '?' parameters
* @param resultSetType a result set type; one of
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency a concurrency type; one of
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @return a new <code>CallableStatement</code> object containing the
* pre-compiled SQL statement that will produce <code>ResultSet</code>
* objects with the given type and concurrency
* @exception SQLException if a database access error occurs, this method
* is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type and concurrency
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type and result set concurrency.
* @since 1.2
*/
CallableStatement prepareCall(String sql, int resultSetType,
int resultSetConcurrency) throws SQLException;
/**
* Retrieves the <code>Map</code> object associated with this
* <code>Connection</code> object.
* Unless the application has added an entry, the type map returned
* will be empty.
* <p>
* You must invoke <code>setTypeMap</code> after making changes to the
* <code>Map</code> object returned from
* <code>getTypeMap</code> as a JDBC driver may create an internal
* copy of the <code>Map</code> object passed to <code>setTypeMap</code>:
*
* <pre>
* Map<String,Class<?>> myMap = con.getTypeMap();
* myMap.put("mySchemaName.ATHLETES", Athletes.class);
* con.setTypeMap(myMap);
* </pre>
* @return the <code>java.util.Map</code> object associated
* with this <code>Connection</code> object
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @since 1.2
* @see #setTypeMap
*/
java.util.Map<String,Class<?>> getTypeMap() throws SQLException;
/**
* Installs the given <code>TypeMap</code> object as the type map for
* this <code>Connection</code> object. The type map will be used for the
* custom mapping of SQL structured types and distinct types.
* <p>
* You must set the values for the <code>TypeMap</code> prior to
* callng <code>setMap</code> as a JDBC driver may create an internal copy
* of the <code>TypeMap</code>:
*
* <pre>
* Map myMap<String,Class<?>> = new HashMap<String,Class<?>>();
* myMap.put("mySchemaName.ATHLETES", Athletes.class);
* con.setTypeMap(myMap);
* </pre>
* @param map the <code>java.util.Map</code> object to install
* as the replacement for this <code>Connection</code>
* object's default type map
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection or
* the given parameter is not a <code>java.util.Map</code>
* object
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @since 1.2
* @see #getTypeMap
*/
void setTypeMap(java.util.Map<String,Class<?>> map) throws SQLException;
//--------------------------JDBC 3.0-----------------------------
/**
* Changes the default holdability of <code>ResultSet</code> objects
* created using this <code>Connection</code> object to the given
* holdability. The default holdability of <code>ResultSet</code> objects
* can be determined by invoking
* {@link DatabaseMetaData#getResultSetHoldability}.
*
* @param holdability a <code>ResultSet</code> holdability constant; one of
* <code>ResultSet.HOLD_CURSORS_OVER_COMMIT</code> or
* <code>ResultSet.CLOSE_CURSORS_AT_COMMIT</code>
* @throws SQLException if a database access occurs, this method is called
* on a closed connection, or the given parameter
* is not a <code>ResultSet</code> constant indicating holdability
* @exception SQLFeatureNotSupportedException if the given holdability is not supported
* @see #getHoldability
* @see DatabaseMetaData#getResultSetHoldability
* @see ResultSet
* @since 1.4
*/
void setHoldability(int holdability) throws SQLException;
/**
* Retrieves the current holdability of <code>ResultSet</code> objects
* created using this <code>Connection</code> object.
*
* @return the holdability, one of
* <code>ResultSet.HOLD_CURSORS_OVER_COMMIT</code> or
* <code>ResultSet.CLOSE_CURSORS_AT_COMMIT</code>
* @throws SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #setHoldability
* @see DatabaseMetaData#getResultSetHoldability
* @see ResultSet
* @since 1.4
*/
int getHoldability() throws SQLException;
/**
* Creates an unnamed savepoint in the current transaction and
* returns the new <code>Savepoint</code> object that represents it.
*
*<p> if setSavepoint is invoked outside of an active transaction, a transaction will be started at this newly created
*savepoint.
*
* @return the new <code>Savepoint</code> object
* @exception SQLException if a database access error occurs,
* this method is called while participating in a distributed transaction,
* this method is called on a closed connection
* or this <code>Connection</code> object is currently in
* auto-commit mode
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @see Savepoint
* @since 1.4
*/
Savepoint setSavepoint() throws SQLException;
/**
* Creates a savepoint with the given name in the current transaction
* and returns the new <code>Savepoint</code> object that represents it.
*
* <p> if setSavepoint is invoked outside of an active transaction, a transaction will be started at this newly created
*savepoint.
*
* @param name a <code>String</code> containing the name of the savepoint
* @return the new <code>Savepoint</code> object
* @exception SQLException if a database access error occurs,
* this method is called while participating in a distributed transaction,
* this method is called on a closed connection
* or this <code>Connection</code> object is currently in
* auto-commit mode
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @see Savepoint
* @since 1.4
*/
Savepoint setSavepoint(String name) throws SQLException;
/**
* Undoes all changes made after the given <code>Savepoint</code> object
* was set.
* <P>
* This method should be used only when auto-commit has been disabled.
*
* @param savepoint the <code>Savepoint</code> object to roll back to
* @exception SQLException if a database access error occurs,
* this method is called while participating in a distributed transaction,
* this method is called on a closed connection,
* the <code>Savepoint</code> object is no longer valid,
* or this <code>Connection</code> object is currently in
* auto-commit mode
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @see Savepoint
* @see #rollback
* @since 1.4
*/
void rollback(Savepoint savepoint) throws SQLException;
/**
* Removes the specified <code>Savepoint</code> and subsequent <code>Savepoint</code> objects from the current
* transaction. Any reference to the savepoint after it have been removed
* will cause an <code>SQLException</code> to be thrown.
*
* @param savepoint the <code>Savepoint</code> object to be removed
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection or
* the given <code>Savepoint</code> object is not a valid
* savepoint in the current transaction
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @since 1.4
*/
void releaseSavepoint(Savepoint savepoint) throws SQLException;
/**
* Creates a <code>Statement</code> object that will generate
* <code>ResultSet</code> objects with the given type, concurrency,
* and holdability.
* This method is the same as the <code>createStatement</code> method
* above, but it allows the default result set
* type, concurrency, and holdability to be overridden.
*
* @param resultSetType one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @param resultSetHoldability one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.HOLD_CURSORS_OVER_COMMIT</code> or
* <code>ResultSet.CLOSE_CURSORS_AT_COMMIT</code>
* @return a new <code>Statement</code> object that will generate
* <code>ResultSet</code> objects with the given type,
* concurrency, and holdability
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type, concurrency, and holdability
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type, result set holdability and result set concurrency.
* @see ResultSet
* @since 1.4
*/
Statement createStatement(int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException;
/**
* Creates a <code>PreparedStatement</code> object that will generate
* <code>ResultSet</code> objects with the given type, concurrency,
* and holdability.
* <P>
* This method is the same as the <code>prepareStatement</code> method
* above, but it allows the default result set
* type, concurrency, and holdability to be overridden.
*
* @param sql a <code>String</code> object that is the SQL statement to
* be sent to the database; may contain one or more '?' IN
* parameters
* @param resultSetType one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @param resultSetHoldability one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.HOLD_CURSORS_OVER_COMMIT</code> or
* <code>ResultSet.CLOSE_CURSORS_AT_COMMIT</code>
* @return a new <code>PreparedStatement</code> object, containing the
* pre-compiled SQL statement, that will generate
* <code>ResultSet</code> objects with the given type,
* concurrency, and holdability
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type, concurrency, and holdability
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type, result set holdability and result set concurrency.
* @see ResultSet
* @since 1.4
*/
PreparedStatement prepareStatement(String sql, int resultSetType,
int resultSetConcurrency, int resultSetHoldability)
throws SQLException;
/**
* Creates a <code>CallableStatement</code> object that will generate
* <code>ResultSet</code> objects with the given type and concurrency.
* This method is the same as the <code>prepareCall</code> method
* above, but it allows the default result set
* type, result set concurrency type and holdability to be overridden.
*
* @param sql a <code>String</code> object that is the SQL statement to
* be sent to the database; may contain on or more '?' parameters
* @param resultSetType one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
* @param resultSetConcurrency one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.CONCUR_READ_ONLY</code> or
* <code>ResultSet.CONCUR_UPDATABLE</code>
* @param resultSetHoldability one of the following <code>ResultSet</code>
* constants:
* <code>ResultSet.HOLD_CURSORS_OVER_COMMIT</code> or
* <code>ResultSet.CLOSE_CURSORS_AT_COMMIT</code>
* @return a new <code>CallableStatement</code> object, containing the
* pre-compiled SQL statement, that will generate
* <code>ResultSet</code> objects with the given type,
* concurrency, and holdability
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameters are not <code>ResultSet</code>
* constants indicating type, concurrency, and holdability
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type, result set holdability and result set concurrency.
* @see ResultSet
* @since 1.4
*/
CallableStatement prepareCall(String sql, int resultSetType,
int resultSetConcurrency,
int resultSetHoldability) throws SQLException;
/**
* Creates a default <code>PreparedStatement</code> object that has
* the capability to retrieve auto-generated keys. The given constant
* tells the driver whether it should make auto-generated keys
* available for retrieval. This parameter is ignored if the SQL statement
* is not an <code>INSERT</code> statement, or an SQL statement able to return
* auto-generated keys (the list of such statements is vendor-specific).
* <P>
* <B>Note:</B> This method is optimized for handling
* parametric SQL statements that benefit from precompilation. If
* the driver supports precompilation,
* the method <code>prepareStatement</code> will send
* the statement to the database for precompilation. Some drivers
* may not support precompilation. In this case, the statement may
* not be sent to the database until the <code>PreparedStatement</code>
* object is executed. This has no direct effect on users; however, it does
* affect which methods throw certain SQLExceptions.
* <P>
* Result sets created using the returned <code>PreparedStatement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql an SQL statement that may contain one or more '?' IN
* parameter placeholders
* @param autoGeneratedKeys a flag indicating whether auto-generated keys
* should be returned; one of
* <code>Statement.RETURN_GENERATED_KEYS</code> or
* <code>Statement.NO_GENERATED_KEYS</code>
* @return a new <code>PreparedStatement</code> object, containing the
* pre-compiled SQL statement, that will have the capability of
* returning auto-generated keys
* @exception SQLException if a database access error occurs, this
* method is called on a closed connection
* or the given parameter is not a <code>Statement</code>
* constant indicating whether auto-generated keys should be
* returned
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method with a constant of Statement.RETURN_GENERATED_KEYS
* @since 1.4
*/
PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
throws SQLException;
/**
* Creates a default <code>PreparedStatement</code> object capable
* of returning the auto-generated keys designated by the given array.
* This array contains the indexes of the columns in the target
* table that contain the auto-generated keys that should be made
* available. The driver will ignore the array if the SQL statement
* is not an <code>INSERT</code> statement, or an SQL statement able to return
* auto-generated keys (the list of such statements is vendor-specific).
*<p>
* An SQL statement with or without IN parameters can be
* pre-compiled and stored in a <code>PreparedStatement</code> object. This
* object can then be used to efficiently execute this statement
* multiple times.
* <P>
* <B>Note:</B> This method is optimized for handling
* parametric SQL statements that benefit from precompilation. If
* the driver supports precompilation,
* the method <code>prepareStatement</code> will send
* the statement to the database for precompilation. Some drivers
* may not support precompilation. In this case, the statement may
* not be sent to the database until the <code>PreparedStatement</code>
* object is executed. This has no direct effect on users; however, it does
* affect which methods throw certain SQLExceptions.
* <P>
* Result sets created using the returned <code>PreparedStatement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql an SQL statement that may contain one or more '?' IN
* parameter placeholders
* @param columnIndexes an array of column indexes indicating the columns
* that should be returned from the inserted row or rows
* @return a new <code>PreparedStatement</code> object, containing the
* pre-compiled statement, that is capable of returning the
* auto-generated keys designated by the given array of column
* indexes
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
*
* @since 1.4
*/
PreparedStatement prepareStatement(String sql, int columnIndexes[])
throws SQLException;
/**
* Creates a default <code>PreparedStatement</code> object capable
* of returning the auto-generated keys designated by the given array.
* This array contains the names of the columns in the target
* table that contain the auto-generated keys that should be returned.
* The driver will ignore the array if the SQL statement
* is not an <code>INSERT</code> statement, or an SQL statement able to return
* auto-generated keys (the list of such statements is vendor-specific).
* <P>
* An SQL statement with or without IN parameters can be
* pre-compiled and stored in a <code>PreparedStatement</code> object. This
* object can then be used to efficiently execute this statement
* multiple times.
* <P>
* <B>Note:</B> This method is optimized for handling
* parametric SQL statements that benefit from precompilation. If
* the driver supports precompilation,
* the method <code>prepareStatement</code> will send
* the statement to the database for precompilation. Some drivers
* may not support precompilation. In this case, the statement may
* not be sent to the database until the <code>PreparedStatement</code>
* object is executed. This has no direct effect on users; however, it does
* affect which methods throw certain SQLExceptions.
* <P>
* Result sets created using the returned <code>PreparedStatement</code>
* object will by default be type <code>TYPE_FORWARD_ONLY</code>
* and have a concurrency level of <code>CONCUR_READ_ONLY</code>.
* The holdability of the created result sets can be determined by
* calling {@link #getHoldability}.
*
* @param sql an SQL statement that may contain one or more '?' IN
* parameter placeholders
* @param columnNames an array of column names indicating the columns
* that should be returned from the inserted row or rows
* @return a new <code>PreparedStatement</code> object, containing the
* pre-compiled statement, that is capable of returning the
* auto-generated keys designated by the given array of column
* names
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
*
* @since 1.4
*/
PreparedStatement prepareStatement(String sql, String columnNames[])
throws SQLException;
/**
* Constructs an object that implements the <code>Clob</code> interface. The object
* returned initially contains no data. The <code>setAsciiStream</code>,
* <code>setCharacterStream</code> and <code>setString</code> methods of
* the <code>Clob</code> interface may be used to add data to the <code>Clob</code>.
* @return An object that implements the <code>Clob</code> interface
* @throws SQLException if an object that implements the
* <code>Clob</code> interface can not be constructed, this method is
* called on a closed connection or a database access error occurs.
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this data type
*
* @since 1.6
*/
Clob createClob() throws SQLException;
/**
* Constructs an object that implements the <code>Blob</code> interface. The object
* returned initially contains no data. The <code>setBinaryStream</code> and
* <code>setBytes</code> methods of the <code>Blob</code> interface may be used to add data to
* the <code>Blob</code>.
* @return An object that implements the <code>Blob</code> interface
* @throws SQLException if an object that implements the
* <code>Blob</code> interface can not be constructed, this method is
* called on a closed connection or a database access error occurs.
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this data type
*
* @since 1.6
*/
Blob createBlob() throws SQLException;
/**
* Constructs an object that implements the <code>NClob</code> interface. The object
* returned initially contains no data. The <code>setAsciiStream</code>,
* <code>setCharacterStream</code> and <code>setString</code> methods of the <code>NClob</code> interface may
* be used to add data to the <code>NClob</code>.
* @return An object that implements the <code>NClob</code> interface
* @throws SQLException if an object that implements the
* <code>NClob</code> interface can not be constructed, this method is
* called on a closed connection or a database access error occurs.
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this data type
*
* @since 1.6
*/
NClob createNClob() throws SQLException;
/**
* Constructs an object that implements the <code>SQLXML</code> interface. The object
* returned initially contains no data. The <code>createXmlStreamWriter</code> object and
* <code>setString</code> method of the <code>SQLXML</code> interface may be used to add data to the <code>SQLXML</code>
* object.
* @return An object that implements the <code>SQLXML</code> interface
* @throws SQLException if an object that implements the <code>SQLXML</code> interface can not
* be constructed, this method is
* called on a closed connection or a database access error occurs.
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this data type
* @since 1.6
*/
SQLXML createSQLXML() throws SQLException;
/**
* Returns true if the connection has not been closed and is still valid.
* The driver shall submit a query on the connection or use some other
* mechanism that positively verifies the connection is still valid when
* this method is called.
* <p>
* The query submitted by the driver to validate the connection shall be
* executed in the context of the current transaction.
*
* @param timeout - The time in seconds to wait for the database operation
* used to validate the connection to complete. If
* the timeout period expires before the operation
* completes, this method returns false. A value of
* 0 indicates a timeout is not applied to the
* database operation.
*
* @return true if the connection is valid, false otherwise
* @exception SQLException if the value supplied for <code>timeout</code>
* is less than 0
* @since 1.6
*
* @see java.sql.DatabaseMetaData#getClientInfoProperties
*/
boolean isValid(int timeout) throws SQLException;
/**
* Sets the value of the client info property specified by name to the
* value specified by value.
* <p>
* Applications may use the <code>DatabaseMetaData.getClientInfoProperties</code>
* method to determine the client info properties supported by the driver
* and the maximum length that may be specified for each property.
* <p>
* The driver stores the value specified in a suitable location in the
* database. For example in a special register, session parameter, or
* system table column. For efficiency the driver may defer setting the
* value in the database until the next time a statement is executed or
* prepared. Other than storing the client information in the appropriate
* place in the database, these methods shall not alter the behavior of
* the connection in anyway. The values supplied to these methods are
* used for accounting, diagnostics and debugging purposes only.
* <p>
* The driver shall generate a warning if the client info name specified
* is not recognized by the driver.
* <p>
* If the value specified to this method is greater than the maximum
* length for the property the driver may either truncate the value and
* generate a warning or generate a <code>SQLClientInfoException</code>. If the driver
* generates a <code>SQLClientInfoException</code>, the value specified was not set on the
* connection.
* <p>
* The following are standard client info properties. Drivers are not
* required to support these properties however if the driver supports a
* client info property that can be described by one of the standard
* properties, the standard property name should be used.
*
* <ul>
* <li>ApplicationName - The name of the application currently utilizing
* the connection</li>
* <li>ClientUser - The name of the user that the application using
* the connection is performing work for. This may
* not be the same as the user name that was used
* in establishing the connection.</li>
* <li>ClientHostname - The hostname of the computer the application
* using the connection is running on.</li>
* </ul>
*
* @param name The name of the client info property to set
* @param value The value to set the client info property to. If the
* value is null, the current value of the specified
* property is cleared.
*
* @throws SQLClientInfoException if the database server returns an error while
* setting the client info value on the database server or this method
* is called on a closed connection
*
* @since 1.6
*/
void setClientInfo(String name, String value)
throws SQLClientInfoException;
/**
* Sets the value of the connection's client info properties. The
* <code>Properties</code> object contains the names and values of the client info
* properties to be set. The set of client info properties contained in
* the properties list replaces the current set of client info properties
* on the connection. If a property that is currently set on the
* connection is not present in the properties list, that property is
* cleared. Specifying an empty properties list will clear all of the
* properties on the connection. See <code>setClientInfo (String, String)</code> for
* more information.
* <p>
* If an error occurs in setting any of the client info properties, a
* <code>SQLClientInfoException</code> is thrown. The <code>SQLClientInfoException</code>
* contains information indicating which client info properties were not set.
* The state of the client information is unknown because
* some databases do not allow multiple client info properties to be set
* atomically. For those databases, one or more properties may have been
* set before the error occurred.
*
*
* @param properties the list of client info properties to set
*
* @see java.sql.Connection#setClientInfo(String, String) setClientInfo(String, String)
* @since 1.6
*
* @throws SQLClientInfoException if the database server returns an error while
* setting the clientInfo values on the database server or this method
* is called on a closed connection
*
*/
void setClientInfo(Properties properties)
throws SQLClientInfoException;
/**
* Returns the value of the client info property specified by name. This
* method may return null if the specified client info property has not
* been set and does not have a default value. This method will also
* return null if the specified client info property name is not supported
* by the driver.
* <p>
* Applications may use the <code>DatabaseMetaData.getClientInfoProperties</code>
* method to determine the client info properties supported by the driver.
*
* @param name The name of the client info property to retrieve
*
* @return The value of the client info property specified
*
* @throws SQLException if the database server returns an error when
* fetching the client info value from the database
* or this method is called on a closed connection
*
* @since 1.6
*
* @see java.sql.DatabaseMetaData#getClientInfoProperties
*/
String getClientInfo(String name)
throws SQLException;
/**
* Returns a list containing the name and current value of each client info
* property supported by the driver. The value of a client info property
* may be null if the property has not been set and does not have a
* default value.
*
* @return A <code>Properties</code> object that contains the name and current value of
* each of the client info properties supported by the driver.
*
* @throws SQLException if the database server returns an error when
* fetching the client info values from the database
* or this method is called on a closed connection
*
* @since 1.6
*/
Properties getClientInfo()
throws SQLException;
/**
* Factory method for creating Array objects.
*<p>
* <b>Note: </b>When <code>createArrayOf</code> is used to create an array object
* that maps to a primitive data type, then it is implementation-defined
* whether the <code>Array</code> object is an array of that primitive
* data type or an array of <code>Object</code>.
* <p>
* <b>Note: </b>The JDBC driver is responsible for mapping the elements
* <code>Object</code> array to the default JDBC SQL type defined in
* java.sql.Types for the given class of <code>Object</code>. The default
* mapping is specified in Appendix B of the JDBC specification. If the
* resulting JDBC type is not the appropriate type for the given typeName then
* it is implementation defined whether an <code>SQLException</code> is
* thrown or the driver supports the resulting conversion.
*
* @param typeName the SQL name of the type the elements of the array map to. The typeName is a
* database-specific name which may be the name of a built-in type, a user-defined type or a standard SQL type supported by this database. This
* is the value returned by <code>Array.getBaseTypeName</code>
* @param elements the elements that populate the returned object
* @return an Array object whose elements map to the specified SQL type
* @throws SQLException if a database error occurs, the JDBC type is not
* appropriate for the typeName and the conversion is not supported, the typeName is null or this method is called on a closed connection
* @throws SQLFeatureNotSupportedException if the JDBC driver does not support this data type
* @since 1.6
*/
Array createArrayOf(String typeName, Object[] elements) throws
SQLException;
/**
* Factory method for creating Struct objects.
*
* @param typeName the SQL type name of the SQL structured type that this <code>Struct</code>
* object maps to. The typeName is the name of a user-defined type that
* has been defined for this database. It is the value returned by
* <code>Struct.getSQLTypeName</code>.
* @param attributes the attributes that populate the returned object
* @return a Struct object that maps to the given SQL type and is populated with the given attributes
* @throws SQLException if a database error occurs, the typeName is null or this method is called on a closed connection
* @throws SQLFeatureNotSupportedException if the JDBC driver does not support this data type
* @since 1.6
*/
Struct createStruct(String typeName, Object[] attributes)
throws SQLException;
//--------------------------JDBC 4.1 -----------------------------
/**
* Sets the given schema name to access.
* <P>
* If the driver does not support schemas, it will
* silently ignore this request.
* <p>
* Calling {@code setSchema} has no effect on previously created or prepared
* {@code Statement} objects. It is implementation defined whether a DBMS
* prepare operation takes place immediately when the {@code Connection}
* method {@code prepareStatement} or {@code prepareCall} is invoked.
* For maximum portability, {@code setSchema} should be called before a
* {@code Statement} is created or prepared.
*
* @param schema the name of a schema in which to work
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #getSchema
* @since 1.7
*/
void setSchema(String schema) throws SQLException;
/**
* Retrieves this <code>Connection</code> object's current schema name.
*
* @return the current schema name or <code>null</code> if there is none
* @exception SQLException if a database access error occurs
* or this method is called on a closed connection
* @see #setSchema
* @since 1.7
*/
String getSchema() throws SQLException;
/**
* Terminates an open connection. Calling <code>abort</code> results in:
* <ul>
* <li>The connection marked as closed
* <li>Closes any physical connection to the database
* <li>Releases resources used by the connection
* <li>Insures that any thread that is currently accessing the connection
* will either progress to completion or throw an <code>SQLException</code>.
* </ul>
* <p>
* Calling <code>abort</code> marks the connection closed and releases any
* resources. Calling <code>abort</code> on a closed connection is a
* no-op.
* <p>
* It is possible that the aborting and releasing of the resources that are
* held by the connection can take an extended period of time. When the
* <code>abort</code> method returns, the connection will have been marked as
* closed and the <code>Executor</code> that was passed as a parameter to abort
* may still be executing tasks to release resources.
* <p>
* This method checks to see that there is an <code>SQLPermission</code>
* object before allowing the method to proceed. If a
* <code>SecurityManager</code> exists and its
* <code>checkPermission</code> method denies calling <code>abort</code>,
* this method throws a
* <code>java.lang.SecurityException</code>.
* @param executor The <code>Executor</code> implementation which will
* be used by <code>abort</code>.
* @throws java.sql.SQLException if a database access error occurs or
* the {@code executor} is {@code null},
* @throws java.lang.SecurityException if a security manager exists and its
* <code>checkPermission</code> method denies calling <code>abort</code>
* @see SecurityManager#checkPermission
* @see Executor
* @since 1.7
*/
void abort(Executor executor) throws SQLException;
/**
*
* Sets the maximum period a <code>Connection</code> or
* objects created from the <code>Connection</code>
* will wait for the database to reply to any one request. If any
* request remains unanswered, the waiting method will
* return with a <code>SQLException</code>, and the <code>Connection</code>
* or objects created from the <code>Connection</code> will be marked as
* closed. Any subsequent use of
* the objects, with the exception of the <code>close</code>,
* <code>isClosed</code> or <code>Connection.isValid</code>
* methods, will result in a <code>SQLException</code>.
* <p>
* <b>Note</b>: This method is intended to address a rare but serious
* condition where network partitions can cause threads issuing JDBC calls
* to hang uninterruptedly in socket reads, until the OS TCP-TIMEOUT
* (typically 10 minutes). This method is related to the
* {@link #abort abort() } method which provides an administrator
* thread a means to free any such threads in cases where the
* JDBC connection is accessible to the administrator thread.
* The <code>setNetworkTimeout</code> method will cover cases where
* there is no administrator thread, or it has no access to the
* connection. This method is severe in it's effects, and should be
* given a high enough value so it is never triggered before any more
* normal timeouts, such as transaction timeouts.
* <p>
* JDBC driver implementations may also choose to support the
* {@code setNetworkTimeout} method to impose a limit on database
* response time, in environments where no network is present.
* <p>
* Drivers may internally implement some or all of their API calls with
* multiple internal driver-database transmissions, and it is left to the
* driver implementation to determine whether the limit will be
* applied always to the response to the API call, or to any
* single request made during the API call.
* <p>
*
* This method can be invoked more than once, such as to set a limit for an
* area of JDBC code, and to reset to the default on exit from this area.
* Invocation of this method has no impact on already outstanding
* requests.
* <p>
* The {@code Statement.setQueryTimeout()} timeout value is independent of the
* timeout value specified in {@code setNetworkTimeout}. If the query timeout
* expires before the network timeout then the
* statement execution will be canceled. If the network is still
* active the result will be that both the statement and connection
* are still usable. However if the network timeout expires before
* the query timeout or if the statement timeout fails due to network
* problems, the connection will be marked as closed, any resources held by
* the connection will be released and both the connection and
* statement will be unusable.
* <p>
* When the driver determines that the {@code setNetworkTimeout} timeout
* value has expired, the JDBC driver marks the connection
* closed and releases any resources held by the connection.
* <p>
*
* This method checks to see that there is an <code>SQLPermission</code>
* object before allowing the method to proceed. If a
* <code>SecurityManager</code> exists and its
* <code>checkPermission</code> method denies calling
* <code>setNetworkTimeout</code>, this method throws a
* <code>java.lang.SecurityException</code>.
*
* @param executor The <code>Executor</code> implementation which will
* be used by <code>setNetworkTimeout</code>.
* @param milliseconds The time in milliseconds to wait for the database
* operation
* to complete. If the JDBC driver does not support milliseconds, the
* JDBC driver will round the value up to the nearest second. If the
* timeout period expires before the operation
* completes, a SQLException will be thrown.
* A value of 0 indicates that there is not timeout for database operations.
* @throws java.sql.SQLException if a database access error occurs, this
* method is called on a closed connection,
* the {@code executor} is {@code null},
* or the value specified for <code>seconds</code> is less than 0.
* @throws java.lang.SecurityException if a security manager exists and its
* <code>checkPermission</code> method denies calling
* <code>setNetworkTimeout</code>.
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @see SecurityManager#checkPermission
* @see Statement#setQueryTimeout
* @see #getNetworkTimeout
* @see #abort
* @see Executor
* @since 1.7
*/
void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException;
/**
* Retrieves the number of milliseconds the driver will
* wait for a database request to complete.
* If the limit is exceeded, a
* <code>SQLException</code> is thrown.
*
* @return the current timeout limit in milliseconds; zero means there is
* no limit
* @throws SQLException if a database access error occurs or
* this method is called on a closed <code>Connection</code>
* @exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method
* @see #setNetworkTimeout
* @since 1.7
*/
int getNetworkTimeout() throws SQLException;
// JDBC 4.3
/**
* Hints to the driver that a request, an independent unit of work, is beginning
* on this connection. Each request is independent of all other requests
* with regard to state local to the connection either on the client or the
* server. Work done between {@code beginRequest}, {@code endRequest}
* pairs does not depend on any other work done on the connection either as
* part of another request or outside of any request. A request may include multiple
* transactions. There may be dependencies on committed database state as
* that is not local to the connection.
* <p>
* Local state is defined as any state associated with a Connection that is
* local to the current Connection either in the client or the database that
* is not transparently reproducible.
* <p>
* Calls to {@code beginRequest} and {@code endRequest} are not nested.
* Multiple calls to {@code beginRequest} without an intervening call
* to {@code endRequest} is not an error. The first {@code beginRequest} call
* marks the start of the request and subsequent calls are treated as
* a no-op
* <p>
* Use of {@code beginRequest} and {@code endRequest} is optional, vendor
* specific and should largely be transparent. In particular
* implementations may detect conditions that indicate dependence on
* other work such as an open transaction. It is recommended though not
* required that implementations throw a {@code SQLException} if there is an active
* transaction and {@code beginRequest} is called.
* Using these methods may improve performance or provide other benefits.
* Consult your vendors documentation for additional information.
* <p>
* It is recommended to
* enclose each unit of work in {@code beginRequest}, {@code endRequest}
* pairs such that there is no open transaction at the beginning or end of
* the request and no dependency on local state that crosses request
* boundaries. Committed database state is not local.
*
* @implSpec
* The default implementation is a no-op.
* <p>
* @apiNote
* This method is to be used by Connection pooling managers.
* <p>
* The pooling manager should call {@code beginRequest} on the underlying connection
* prior to returning a connection to the caller.
* <p>
* The pooling manager does not need to call {@code beginRequest} if:
* <ul>
* <li>The connection pool caches {@code PooledConnection} objects</li>
* <li>Returns a logical connection handle when {@code getConnection} is
* called by the application</li>
* <li>The logical {@code Connection} is closed by calling
* {@code Connection.close} prior to returning the {@code PooledConnection}
* to the cache.</li>
* </ul>
* @throws SQLException if an error occurs
* @since 9
* @see endRequest
* @see javax.sql.PooledConnection
*/
default void beginRequest() throws SQLException {
// Default method takes no action
}
/**
* Hints to the driver that a request, an independent unit of work,
* has completed. Calls to {@code beginRequest}
* and {@code endRequest} are not nested. Multiple
* calls to {@code endRequest} without an intervening call to {@code beginRequest}
* is not an error. The first {@code endRequest} call
* marks the request completed and subsequent calls are treated as
* a no-op. If {@code endRequest} is called without an initial call to
* {@code beginRequest} is a no-op.
*<p>
* The exact behavior of this method is vendor specific. In particular
* implementations may detect conditions that indicate dependence on
* other work such as an open transaction. It is recommended though not
* required that implementations throw a {@code SQLException} if there is an active
* transaction and {@code endRequest} is called.
*
* @implSpec
* The default implementation is a no-op.
* @apiNote
*
* This method is to be used by Connection pooling managers.
* <p>
* The pooling manager should call {@code endRequest} on the underlying connection
* when the applications returns the connection back to the connection pool.
* <p>
* The pooling manager does not need to call {@code endRequest} if:
* <ul>
* <li>The connection pool caches {@code PooledConnection} objects</li>
* <li>Returns a logical connection handle when {@code getConnection} is
* called by the application</li>
* <li>The logical {@code Connection} is closed by calling
* {@code Connection.close} prior to returning the {@code PooledConnection}
* to the cache.</li>
* </ul>
* @throws SQLException if an error occurs
* @since 9
* @see beginRequest
* @see javax.sql.PooledConnection
*/
default void endRequest() throws SQLException {
// Default method takes no action
}
/**
* Sets and validates the sharding keys for this connection. A {@code null}
* value may be specified for the sharding Key. The validity
* of a {@code null} sharding key is vendor-specific. Consult your vendor's
* documentation for additional information.
* @implSpec
* The default implementation will throw a
* {@code SQLFeatureNotSupportedException}.
*
* @apiNote
* This method validates that the sharding keys are valid for the
* {@code Connection}. The timeout value indicates how long the driver
* should wait for the {@code Connection} to verify that the sharding key
* is valid before {@code setShardingKeyIfValid} returns false.
* @param shardingKey the sharding key to be validated against this connection.
* The sharding key may be {@code null}
* @param superShardingKey the super sharding key to be validated against this
* connection. The super sharding key may be {@code null}.
* @param timeout time in seconds before which the validation process is expected to
* be completed, otherwise the validation process is aborted. A value of 0 indicates
* the validation process will not time out.
* @return true if the connection is valid and the sharding keys are valid
* and set on this connection; false if the sharding keys are not valid or
* the timeout period expires before the operation completes.
* @throws SQLException if an error occurs while performing this validation;
* a {@code superSharedingKey} is specified
* without a {@code shardingKey};
* this method is called on a closed {@code connection}; or
* the {@code timeout} value is negative.
* @throws SQLFeatureNotSupportedException if the driver does not support sharding
* @since 9
* @see ShardingKey
* @see ShardingKeyBuilder
*/
default boolean setShardingKeyIfValid(ShardingKey shardingKey,
ShardingKey superShardingKey, int timeout)
throws SQLException {
throw new SQLFeatureNotSupportedException("setShardingKeyIfValid not implemented");
}
/**
* Sets and validates the sharding key for this connection. A {@code null}
* value may be specified for the sharding Key. The validity
* of a {@code null} sharding key is vendor-specific. Consult your vendor's
* documentation for additional information.
* @implSpec
* The default implementation will throw a
* {@code SQLFeatureNotSupportedException}.
* @apiNote
* This method validates that the sharding key is valid for the
* {@code Connection}. The timeout value indicates how long the driver
* should wait for the {@code Connection} to verify that the sharding key
* is valid before {@code setShardingKeyIfValid} returns false.
* @param shardingKey the sharding key to be validated against this connection.
* The sharding key may be {@code null}
* @param timeout time in seconds before which the validation process is expected to
* be completed,else the validation process is aborted. A value of 0 indicates
* the validation process will not time out.
* @return true if the connection is valid and the sharding key is valid to be
* set on this connection; false if the sharding key is not valid or
* the timeout period expires before the operation completes.
* @throws SQLException if there is an error while performing this validation;
* this method is called on a closed {@code connection};
* or the {@code timeout} value is negative.
* @throws SQLFeatureNotSupportedException if the driver does not support sharding
* @since 9
* @see ShardingKey
* @see ShardingKeyBuilder
*/
default boolean setShardingKeyIfValid(ShardingKey shardingKey, int timeout)
throws SQLException {
throw new SQLFeatureNotSupportedException("setShardingKeyIfValid not implemented");
}
/**
* Specifies a shardingKey and superShardingKey to use with this Connection
* @implSpec
* The default implementation will throw a
* {@code SQLFeatureNotSupportedException}.
* @apiNote
* This method sets the specified sharding keys but does not require a
* round trip to the database to validate that the sharding keys are valid
* for the {@code Connection}.
* @param shardingKey the sharding key to set on this connection. The sharding
* key may be {@code null}
* @param superShardingKey the super sharding key to set on this connection.
* The super sharding key may be {@code null}
* @throws SQLException if an error occurs setting the sharding keys;
* this method is called on a closed {@code connection}; or
* a {@code superSharedingKey} is specified without a {@code shardingKey}
* @throws SQLFeatureNotSupportedException if the driver does not support sharding
* @since 9
* @see ShardingKey
* @see ShardingKeyBuilder
*/
default void setShardingKey(ShardingKey shardingKey, ShardingKey superShardingKey)
throws SQLException {
throw new SQLFeatureNotSupportedException("setShardingKey not implemented");
}
/**
* Specifies a shardingKey to use with this Connection
* @implSpec
* The default implementation will throw a
* {@code SQLFeatureNotSupportedException}.
* @apiNote
* This method sets the specified sharding key but does not require a
* round trip to the database to validate that the sharding key is valid
* for the {@code Connection}.
* @param shardingKey the sharding key to set on this connection. The sharding
* key may be {@code null}
* @throws SQLException if an error occurs setting the sharding key; or
* this method is called on a closed {@code connection}
* @throws SQLFeatureNotSupportedException if the driver does not support sharding
* @since 9
* @see ShardingKey
* @see ShardingKeyBuilder
*/
default void setShardingKey(ShardingKey shardingKey)
throws SQLException {
throw new SQLFeatureNotSupportedException("setShardingKey not implemented");
}
}
| gpl-2.0 |
agiliopadua/lammps | lib/mdi/Install.py | 6330 | #!/usr/bin/env python
# install.py tool to do a generic build of a library
# soft linked to by many of the lib/Install.py files
# used to automate the steps described in the corresponding lib/README
from __future__ import print_function
import sys,os,subprocess
import glob
sys.path.append('..')
from install_helpers import checkmd5sum
# help message
help = """
Syntax from src dir: make lib-libname args="-m machine -e suffix"
Syntax from lib dir: python Install.py -m machine -e suffix
libname = name of lib dir (e.g. atc, h5md, meam, poems, etc)
specify -m and optionally -e, order does not matter
-m = peform a clean followed by "make -f Makefile.machine"
machine = suffix of a lib/Makefile.* file
-e = set EXTRAMAKE variable in Makefile.machine to Makefile.lammps.suffix
does not alter existing Makefile.machine
Examples:
make lib-poems args="-m serial" # build POEMS lib with same settings as in the serial Makefile in src
make lib-colvars args="-m mpi" # build COLVARS lib with same settings as in the mpi Makefile in src
make lib-meam args="-m ifort" # build MEAM lib with custom Makefile.ifort (using Intel Fortran)
"""
# settings
version = "1.2.9"
url = "https://github.com/MolSSI-MDI/MDI_Library/archive/v%s.tar.gz" % version
# known checksums for different MDI versions. used to validate the download.
checksums = { \
'1.2.7' : '2f3177b30ccdbd6ae28ea3bdd5fed0db', \
'1.2.9' : 'ddfa46d6ee15b4e59cfd527ec7212184', \
}
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# expand to full path name
# process leading '~' or relative path
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def geturl(url,fname):
success = False
if which('curl') != None:
cmd = 'curl -L -o "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling curl failed with: %s" % e.output.decode('UTF-8'))
if not success and which('wget') != None:
cmd = 'wget -O "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling wget failed with: %s" % e.output.decode('UTF-8'))
if not success:
error("Failed to download source code with 'curl' or 'wget'")
return
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
machine = None
extraflag = 0
iarg = 0
while iarg < nargs:
if args[iarg] == "-m":
if iarg+2 > nargs: error()
machine = args[iarg+1]
iarg += 2
elif args[iarg] == "-e":
if iarg+2 > nargs: error()
extraflag = 1
suffix = args[iarg+1]
iarg += 2
else: error()
# set lib from working dir
cwd = os.getcwd()
lib = os.path.basename(cwd)
# download and unpack MDI_Library tarball
homepath = "."
homedir = "%s/MDI_Library" % homepath
print("Downloading MDI_Library ...")
mditar = "%s/v%s.tar.gz" % (homepath,version)
geturl(url, mditar)
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], mditar):
sys.exit("Checksum for MDI library does not match")
print("Unpacking MDI_Library tarball ...")
if os.path.exists("%s/v%s" % (homepath,version)):
cmd = 'rm -rf "%s/v%s"' % (homepath,version)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'cd "%s"; tar -xzvf v%s.tar.gz' % (homepath,version)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
os.remove("%s/v%s.tar.gz" % (homepath,version))
if os.path.basename(homedir) != version:
if os.path.exists(homedir):
cmd = 'rm -rf "%s"' % homedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
os.rename("%s/MDI_Library-%s" % (homepath,version),homedir)
# create Makefile.auto as copy of Makefile.machine
# reset EXTRAMAKE if requested
if not os.path.exists("Makefile.%s" % machine):
error("lib/%s/Makefile.%s does not exist" % (lib,machine))
lines = open("Makefile.%s" % machine,'r').readlines()
fp = open("Makefile.auto",'w')
has_extramake = False
for line in lines:
words = line.split()
if len(words) == 3 and words[0] == "EXTRAMAKE" and words[1] == '=':
has_extramake = True
if extraflag:
line = line.replace(words[2],"Makefile.lammps.%s" % suffix)
fp.write(line)
fp.close()
# make the library via Makefile.auto optionally with parallel make
try:
import multiprocessing
n_cpus = multiprocessing.cpu_count()
except:
n_cpus = 1
print("Building lib%s.so ..." % lib)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto -j%d" % n_cpus
txt = subprocess.check_output(cmd,shell=True,stderr=subprocess.STDOUT)
print(txt.decode('UTF-8'))
# create 2 links in lib/mdi to MDI Library src dir
print("Creating links to MDI Library include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
os.symlink(os.path.join(homedir, 'MDI_Library'), 'includelink')
os.symlink(os.path.join(homepath, 'build', 'MDI_Library'), 'liblink')
# Append the -rpath option to Makefile.lammps
dir_path = os.path.dirname(os.path.realpath(__file__))
rpath_option = "-Wl,-rpath=" + str(dir_path) + "/liblink"
makefile_lammps = open(str(dir_path) + "/Makefile.lammps", "a")
makefile_lammps.write(str(rpath_option) + "\n")
makefile_lammps.close()
shared_files = glob.glob( os.path.join( homepath, "liblink", "lib%s.so*" % lib) )
if len(shared_files) > 0:
print("Build was successful")
else:
error("Build of lib/%s/lib%s.so was NOT successful" % (lib,lib))
if has_extramake and not os.path.exists("Makefile.lammps"):
print("lib/%s/Makefile.lammps was NOT created" % lib)
| gpl-2.0 |
moljac/sharpen | src/main/sharpen/core/framework/BindingUtils.java | 8837 | /* Copyright (C) 2004 - 2008 Versant Inc. http://www.db4o.com
This file is part of the sharpen open source java to c# translator.
sharpen is free software; you can redistribute it and/or modify it under
the terms of version 2 of the GNU General Public License as published
by the Free Software Foundation and as clarified by db4objects' GPL
interpretation policy, available at
http://www.db4o.com/about/company/legalpolicies/gplinterpretation/
Alternatively you can write to db4objects, Inc., 1900 S Norfolk Street,
Suite 350, San Mateo, CA 94403, USA.
sharpen is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
/**
* Portions of this file were taken from Eclipse:
* /org.eclipse.jdt.ui/core extension/org/eclipse/jdt/internal/corext/dom/Bindings.java
* and are subject to the CPL.
*
* Original copyright notice on file follows:
* /*******************************************************************************
* * Copyright (c) 2000, orporation and others.
* * All rights reserved. This program and the accompanying materials
* * are made available under the terms of the Common Public License v1.0
* * which accompanies this distribution, and is available at
* * http://www.eclipse.org/legal/cpl-v10.html
* *
* * Contributors:
* * IBM Corporation - initial API and implementation
* * Dmitry Stalnov ([email protected]) - contributed fix for
* * bug "inline method - doesn't handle implicit cast" (see
* * https://bugs.eclipse.org/bugs/show_bug.cgi?id=24941).
* *******************************************************************************
*/
package sharpen.core.framework;
import org.eclipse.jdt.core.dom.*;
public class BindingUtils {
/**
* Finds the method in the given <code>type</code> that is overrideen by the specified <code>method<code> . Returns <code>null</code> if no such method exits.
* @param type The type to search the method in
* @param method The specified method that would override the result
* @return the method binding representing the method oevrriding the specified <code>method<code>
*/
public static IMethodBinding findOverriddenMethodInType(ITypeBinding type, IMethodBinding method) {
if (type.getName().equals("Object") && method.getName().equals("clone"))
return null;
for (Object o : type.getDeclaredMethods()) {
IMethodBinding existing = (IMethodBinding)o;
if (existing.isSubsignature(method)
|| method.isSubsignature(existing)) {
return existing;
}
}
return null;
}
public static IMethodBinding findOverriddenMethodInTypeOrSuperclasses(ITypeBinding type, IMethodBinding method) {
IMethodBinding found = findOverriddenMethodInType(type, method);
if (null != found) {
return found;
}
ITypeBinding superClass = type.getSuperclass();
if (null != superClass) {
return findOverriddenMethodInTypeOrSuperclasses(superClass, method);
}
return null;
}
/**
* Finds a method in the hierarchy of <code>type</code> that is overridden by </code>binding</code>.
* Returns <code>null</code> if no such method exists. First the super class is examined and than the implemented interfaces.
* @param type The type to search the method in
* @param binding The method that overrides
* @return the method binding overridden the method
*/
public static IMethodBinding findOverriddenMethodInHierarchy(ITypeBinding type, IMethodBinding binding) {
return findOverriddenMethodInHierarchy(type, binding, true);
}
public static IMethodBinding findOverriddenMethodInHierarchy(ITypeBinding type, IMethodBinding binding, boolean considerInterfaces, boolean considerItself) {
final ITypeBinding superClass= type.getSuperclass();
if (superClass != null) {
final IMethodBinding superClassMethod= findOverriddenMethodInHierarchy(superClass, binding, true, true);
if (superClassMethod != null) return superClassMethod;
}
if (considerItself) {
final IMethodBinding method = BindingUtils.findOverriddenMethodInType(type, binding);
if (method != null) return method;
}
if (considerInterfaces) {
final ITypeBinding[] interfaces= type.getInterfaces();
for (int i= 0; i < interfaces.length; i++) {
final IMethodBinding interfaceMethod= findOverriddenMethodInHierarchy(interfaces[i], binding, true, true);
if (interfaceMethod != null) return interfaceMethod;
}
}
return null;
}
public static IMethodBinding findOverriddenMethodInHierarchy(ITypeBinding type, IMethodBinding binding, boolean considerInterfaces) {
return findOverriddenMethodInHierarchy(type, binding, considerInterfaces, true);
}
/**
* Finds the method that is defines the given method. The returned method might not be visible.
* @param method The method to find
* @param typeResolver TODO
* @return the method binding representing the method
*/
public static IMethodBinding findMethodDefininition(IMethodBinding method, AST typeResolver) {
if (null == method) {
return null;
}
IMethodBinding definition = null;
ITypeBinding type= method.getDeclaringClass();
ITypeBinding[] interfaces= type.getInterfaces();
for (int i= 0; i < interfaces.length; i++) {
IMethodBinding res= findOverriddenMethodInHierarchy(interfaces[i], method);
if (res != null) {
definition = res; // methods from interfaces are always public and therefore visible
break;
}
}
if (type.getSuperclass() != null) {
IMethodBinding res= findOverriddenMethodInHierarchy(type.getSuperclass(), method);
if (res != null && !Modifier.isPrivate(res.getModifiers())) {
definition = res;
}
} else if (type.isInterface() && null != typeResolver) {
IMethodBinding res = findOverriddenMethodInHierarchy(typeResolver.resolveWellKnownType("java.lang.Object"), method);
if (res != null) {
definition = res;
}
}
IMethodBinding def = findMethodDefininition(definition, typeResolver);
if (def != null) {
return def;
}
return definition;
}
public static boolean isVisibleInHierarchy(IMethodBinding member, IPackageBinding pack) {
int otherflags= member.getModifiers();
ITypeBinding declaringType= member.getDeclaringClass();
if (Modifier.isPublic(otherflags) || Modifier.isProtected(otherflags) || (declaringType != null && declaringType.isInterface())) {
return true;
} else if (Modifier.isPrivate(otherflags)) {
return false;
}
return pack == declaringType.getPackage();
}
public static String qualifiedName(IMethodBinding binding) {
return qualifiedName(binding.getDeclaringClass()) + "."
+ binding.getName();
}
public static String qualifiedSignature(IMethodBinding binding) {
StringBuffer buf = new StringBuffer();
buf.append(qualifiedName(binding.getDeclaringClass())).append(".").append(binding.getName()).append("(");
ITypeBinding[] parameterTypes = binding.getParameterTypes();
for (int i = 0; i < parameterTypes.length; i++) {
if (i != 0) buf.append(",");
buf.append(qualifiedName(parameterTypes[i]));
}
buf.append(")");
return buf.toString();
}
public static String typeMappingKey(final ITypeBinding type) {
ITypeBinding[] typeArguments = type.getTypeArguments();
if (typeArguments.length == 0)
typeArguments = type.getTypeParameters();
if (typeArguments.length > 0) {
return qualifiedName(type) + "<" + repeat(',', typeArguments.length - 1) + ">";
}
return qualifiedName(type);
}
private static String repeat(char c, int count) {
StringBuilder builder = new StringBuilder(count);
for (int i = 0; i < count; ++i) {
builder.append(c);
}
return builder.toString();
}
public static String qualifiedName(final ITypeBinding declaringClass) {
String qn = declaringClass.getTypeDeclaration().getQualifiedName();
if (qn.length() > 0)
return qn;
else
return declaringClass.getQualifiedName();
}
public static String qualifiedName(IVariableBinding binding) {
ITypeBinding declaringClass = binding.getDeclaringClass();
if (null == declaringClass) {
return binding.getName();
}
return qualifiedName(declaringClass) + "." + binding.getName();
}
public static boolean isStatic(IMethodBinding binding) {
return Modifier.isStatic(binding.getModifiers());
}
public static boolean isStatic(IVariableBinding binding) {
return Modifier.isStatic(binding.getModifiers());
}
public static boolean isStatic(MethodInvocation invocation) {
return isStatic(invocation.resolveMethodBinding());
}
}
| gpl-2.0 |
jack51706/FuzzLabs | agents/FuzzlabsAgentLinux/connection.cpp | 603 | #include "connection.h"
Connection::Connection(int c_fd, struct sockaddr_in *c_sin) {
sock = c_fd;
sin = c_sin;
client_addr = (char *)inet_ntoa(sin->sin_addr);
}
int Connection::socket() {
return sock;
}
char *Connection::address() {
return client_addr;
}
int Connection::transmit(char *data, unsigned int len) {
return send(sock, data, len, 0);
}
int Connection::receive(char *data) {
size_t length = 0;
memset(data, 0x00, RECV_BUFFER_SIZE);
return(recv(sock, data, RECV_BUFFER_SIZE - 1, MSG_DONTWAIT));
}
void Connection::terminate() {
close(sock);
} | gpl-2.0 |
sevir/laaam | pomo/streams.php | 4481 | <?php
/**
* Classes, which help reading streams of data from files.
* Based on the classes from Danilo Segan <[email protected]>
*
* @version $Id$
* @package pomo
* @subpackage streams
*/
if ( !class_exists( 'POMO_Reader' ) ):
class POMO_Reader {
var $endian = 'little';
var $_post = '';
function POMO_Reader() {
$this->is_overloaded = ((ini_get("mbstring.func_overload") & 2) != 0) && function_exists('mb_substr');
$this->_pos = 0;
}
/**
* Sets the endianness of the file.
*
* @param $endian string 'big' or 'little'
*/
function setEndian($endian) {
$this->endian = $endian;
}
/**
* Reads a 32bit Integer from the Stream
*
* @return mixed The integer, corresponding to the next 32 bits from
* the stream of false if there are not enough bytes or on error
*/
function readint32() {
$bytes = $this->read(4);
if (4 != $this->strlen($bytes))
return false;
$endian_letter = ('big' == $this->endian)? 'N' : 'V';
$int = unpack($endian_letter, $bytes);
return array_shift($int);
}
/**
* Reads an array of 32-bit Integers from the Stream
*
* @param integer count How many elements should be read
* @return mixed Array of integers or false if there isn't
* enough data or on error
*/
function readint32array($count) {
$bytes = $this->read(4 * $count);
if (4*$count != $this->strlen($bytes))
return false;
$endian_letter = ('big' == $this->endian)? 'N' : 'V';
return unpack($endian_letter.$count, $bytes);
}
function substr($string, $start, $length) {
if ($this->is_overloaded) {
return mb_substr($string, $start, $length, 'ascii');
} else {
return substr($string, $start, $length);
}
}
function strlen($string) {
if ($this->is_overloaded) {
return mb_strlen($string, 'ascii');
} else {
return strlen($string);
}
}
function str_split($string, $chunk_size) {
if (!function_exists('str_split')) {
$length = $this->strlen($string);
$out = array();
for ($i = 0; $i < $length; $i += $chunk_size)
$out[] = $this->substr($string, $i, $chunk_size);
return $out;
} else {
return str_split( $string, $chunk_size );
}
}
function pos() {
return $this->_pos;
}
function is_resource() {
return true;
}
function close() {
return true;
}
}
endif;
if ( !class_exists( 'POMO_FileReader' ) ):
class POMO_FileReader extends POMO_Reader {
function POMO_FileReader($filename) {
parent::POMO_Reader();
$this->_f = fopen($filename, 'rb');
}
function read($bytes) {
return fread($this->_f, $bytes);
}
function seekto($pos) {
if ( -1 == fseek($this->_f, $pos, SEEK_SET)) {
return false;
}
$this->_pos = $pos;
return true;
}
function is_resource() {
return is_resource($this->_f);
}
function feof() {
return feof($this->_f);
}
function close() {
return fclose($this->_f);
}
function read_all() {
$all = '';
while ( !$this->feof() )
$all .= $this->read(4096);
return $all;
}
}
endif;
if ( !class_exists( 'POMO_StringReader' ) ):
/**
* Provides file-like methods for manipulating a string instead
* of a physical file.
*/
class POMO_StringReader extends POMO_Reader {
var $_str = '';
function POMO_StringReader($str = '') {
parent::POMO_Reader();
$this->_str = $str;
$this->_pos = 0;
}
function read($bytes) {
$data = $this->substr($this->_str, $this->_pos, $bytes);
$this->_pos += $bytes;
if ($this->strlen($this->_str) < $this->_pos) $this->_pos = $this->strlen($this->_str);
return $data;
}
function seekto($pos) {
$this->_pos = $pos;
if ($this->strlen($this->_str) < $this->_pos) $this->_pos = $this->strlen($this->_str);
return $this->_pos;
}
function length() {
return $this->strlen($this->_str);
}
function read_all() {
return $this->substr($this->_str, $this->_pos, $this->strlen($this->_str));
}
}
endif;
if ( !class_exists( 'POMO_CachedFileReader' ) ):
/**
* Reads the contents of the file in the beginning.
*/
class POMO_CachedFileReader extends POMO_StringReader {
function POMO_CachedFileReader($filename) {
parent::POMO_StringReader();
$this->_str = file_get_contents($filename);
if (false === $this->_str)
return false;
$this->_pos = 0;
}
}
endif;
if ( !class_exists( 'POMO_CachedIntFileReader' ) ):
/**
* Reads the contents of the file in the beginning.
*/
class POMO_CachedIntFileReader extends POMO_CachedFileReader {
function POMO_CachedIntFileReader($filename) {
parent::POMO_CachedFileReader($filename);
}
}
endif; | gpl-2.0 |
misilot/vufind | module/VuFind/src/VuFind/View/Helper/jQueryMobile/MobileMenu.php | 2161 | <?php
/**
* MobileMenu view helper
*
* PHP version 5
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
namespace VuFind\View\Helper\jQueryMobile;
use Zend\View\Helper\AbstractHelper;
/**
* MobileMenu view helper
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
class MobileMenu extends AbstractHelper
{
/**
* Display the top menu.
*
* @param array $extras Associative array of extra parameters to send to the
* view template.
*
* @return string
*/
public function header($extras = [])
{
$context = $this->getView()->plugin('context');
return $context($this->getView())->renderInContext('header.phtml', $extras);
}
/**
* Display the bottom menu.
*
* @param array $extras Associative array of extra parameters to send to the
* view template.
*
* @return string
*/
public function footer($extras = [])
{
$context = $this->getView()->plugin('context');
return $context($this->getView())->renderInContext('footer.phtml', $extras);
}
}
| gpl-2.0 |
SuriyaaKudoIsc/wikia-app-test | extensions/wikia/ImageServing/Test/ImageServingTest.alias.php | 146 | <?php
$specialPageAliases = array();
/** English */
$specialPageAliases['en'] = array(
'ImageServingTest' => array( 'ImageServingTest' ),
); | gpl-2.0 |
cronchinsky/dh-drupal-8 | core/tests/Drupal/FunctionalJavascriptTests/Ajax/AjaxFormCacheTest.php | 3843 | <?php
namespace Drupal\FunctionalJavascriptTests\Ajax;
use Drupal\Core\EventSubscriber\MainContentViewSubscriber;
use Drupal\Core\Form\FormBuilderInterface;
use Drupal\Core\Url;
use Drupal\FunctionalJavascriptTests\WebDriverTestBase;
/**
* Tests the usage of form caching for AJAX forms.
*
* @group Ajax
*/
class AjaxFormCacheTest extends WebDriverTestBase {
/**
* {@inheritdoc}
*/
public static $modules = ['ajax_test', 'ajax_forms_test'];
/**
* {@inheritdoc}
*/
protected $defaultTheme = 'stark';
/**
* Tests the usage of form cache for AJAX forms.
*/
public function testFormCacheUsage() {
/** @var \Drupal\Core\KeyValueStore\KeyValueStoreExpirableInterface $key_value_expirable */
$key_value_expirable = \Drupal::service('keyvalue.expirable')->get('form');
$this->drupalLogin($this->rootUser);
// Ensure that the cache is empty.
$this->assertEqual(0, count($key_value_expirable->getAll()));
// Visit an AJAX form that is not cached, 3 times.
$uncached_form_url = Url::fromRoute('ajax_forms_test.commands_form');
$this->drupalGet($uncached_form_url);
$this->drupalGet($uncached_form_url);
$this->drupalGet($uncached_form_url);
// The number of cache entries should not have changed.
$this->assertEqual(0, count($key_value_expirable->getAll()));
}
/**
* Tests AJAX forms in blocks.
*/
public function testBlockForms() {
$this->container->get('module_installer')->install(['block', 'search']);
$this->rebuildContainer();
$this->container->get('router.builder')->rebuild();
$this->drupalLogin($this->rootUser);
$this->drupalPlaceBlock('search_form_block', ['weight' => -5]);
$this->drupalPlaceBlock('ajax_forms_test_block');
$this->drupalGet('');
$session = $this->getSession();
// Select first option and trigger ajax update.
$session->getPage()->selectFieldOption('edit-test1', 'option1');
// DOM update: The InsertCommand in the AJAX response changes the text
// in the option element to 'Option1!!!'.
$opt1_selector = $this->assertSession()->waitForElement('css', "select[data-drupal-selector='edit-test1'] option:contains('Option 1!!!')");
$this->assertNotEmpty($opt1_selector);
$this->assertTrue($opt1_selector->isSelected());
// Confirm option 3 exists.
$page = $session->getPage();
$opt3_selector = $page->find('xpath', '//select[@data-drupal-selector="edit-test1"]//option[@value="option3"]');
$this->assertNotEmpty($opt3_selector);
// Confirm success message appears after a submit.
$page->findButton('edit-submit')->click();
$this->assertSession()->waitForButton('edit-submit');
$updated_page = $session->getPage();
$updated_page->hasContent('Submission successful.');
}
/**
* Tests AJAX forms on pages with a query string.
*/
public function testQueryString() {
$this->container->get('module_installer')->install(['block']);
$this->drupalLogin($this->rootUser);
$this->drupalPlaceBlock('ajax_forms_test_block');
$url = Url::fromRoute('entity.user.canonical', ['user' => $this->rootUser->id()], ['query' => ['foo' => 'bar']]);
$this->drupalGet($url);
$session = $this->getSession();
// Select first option and trigger ajax update.
$session->getPage()->selectFieldOption('edit-test1', 'option1');
// DOM update: The InsertCommand in the AJAX response changes the text
// in the option element to 'Option1!!!'.
$opt1_selector = $this->assertSession()->waitForElement('css', "option:contains('Option 1!!!')");
$this->assertNotEmpty($opt1_selector);
$url->setOption('query', [
'foo' => 'bar',
FormBuilderInterface::AJAX_FORM_REQUEST => 1,
MainContentViewSubscriber::WRAPPER_FORMAT => 'drupal_ajax',
]);
$this->assertUrl($url);
}
}
| gpl-2.0 |
baxtree/OKBook | sites/all/modules/neologism/includes/rap/sparql/SparqlEngine/ResultRenderer/XML.php | 4523 | <?php
require_once RDFAPI_INCLUDE_DIR . 'sparql/SparqlEngineDb/ResultRenderer.php';
/**
* XML result renderer for SparqlEngine
*
* @author Tobias Gauß <[email protected]>
* @author Christian Weiske <[email protected]>
* @license http://www.gnu.org/licenses/lgpl.html LGPL
*
* @package sparql
*/
class SparqlEngine_ResultRenderer_XML implements SparqlEngine_ResultRenderer
{
/**
* Converts the database results into the output format
* and returns the result.
*
* @param array $arVartable Variable table
* @param Query $query SPARQL query object
* @param SparqlEngine $engine Sparql Engine to query the database
* @return string XML result
*/
public function convertFromResult($arVartable, Query $query, SparqlEngine $engine)
{
$this->query = $query;
$this->engine = $engine;
$this->dataset = $engine->getDataset();
if ($arVartable instanceof NamedGraphMem) {
return $arVartable->writeRdfToString();
}
$result = '<sparql xmlns="http://www.w3.org/2005/sparql-results#">';
$header = '<head>';
// build header
if (is_array($arVartable)) {
$vars = $this->query->getResultVars();
$header = '<head>';
foreach ($vars as $value) {
$header = $header
. '<variable name="' . substr($value, 1) . '"/>';
}
$header = $header . '</head>';
// build results
$solm = $this->query->getSolutionModifier();
$sel = $this->query->getResultForm();
$distinct = 'false';
if ($sel == 'select distinct') {
$distinct = 'true';
}
$ordered = 'false';
if ($solm['order by'] != 0) {
$ordered = 'true';
}
$results = '<results ordered="'.$ordered.'" distinct="'.$distinct.'">';
foreach ($arVartable as $value) {
$results = $results.'<result>';
foreach ($value as $varname => $varvalue) {
$results = $results
. $this->_getBindingString(
substr($varname, 1),
$varvalue
);
}
$results = $results . '</result>';
}
$results = $results . '</results>';
} else {
$results = '</head><boolean>' . $vartable . '</boolean>';
}
$result = $result . $header . $results . '</sparql>';
$result = simplexml_load_string($result);
return $result->asXML();
}//public function convertFromResult($arVartable, Query $query, SparqlEngine $engine)
/**
* Helper Function for function buildXmlResult($vartable). Generates
* an xml string for a single variable an their corresponding value.
*
* @param String $varname The variables name
* @param Node $varvalue The value of the variable
* @return String The xml string
*/
protected function _getBindingString($varname, $varvalue)
{
$binding = '<binding name="'.$varname.'">';
$value = '<unbound/>';
if ($varvalue instanceof BlankNode) {
$value = '<bnode>' . $varvalue->getLabel() . '</bnode>';
} else if ($varvalue instanceof Resource) {
$value = '<uri>' . $varvalue->getUri() . '</uri>';
} else if ($varvalue instanceof Literal) {
$label = htmlspecialchars($varvalue->getLabel());
$value = '<literal>'.$label.'</literal>';
if ($varvalue->getDatatype() != null) {
$value = '<literal datatype="'
. $varvalue->getDatatype() . '">'
. $label
. '</literal>';
}
if ($varvalue->getLanguage() != null) {
$value = '<literal xml:lang="'
. $varvalue->getLanguage() . '">'
. $label
. '</literal>';
}
}
$binding = $binding . $value . '</binding>';
return $binding;
}//protected function _getBindingString($varname, $varvalue)
}//class SparqlEngine_ResultRenderer_XML implements SparqlEngine_ResultRenderer
?> | gpl-2.0 |
MonsieurTweek/PitchMyGame | kernel/shop/discountgroupedit.php | 1371 | <?php
/**
* @copyright Copyright (C) 1999-2012 eZ Systems AS. All rights reserved.
* @license http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
* @version 2012.6
* @package kernel
*/
$module = $Params['Module'];
$discountGroupID = null;
if ( isset( $Params["DiscountGroupID"] ) )
$discountGroupID = $Params["DiscountGroupID"];
if ( is_numeric( $discountGroupID ) )
{
$discountGroup = eZDiscountRule::fetch( $discountGroupID );
}
else
{
$discountGroup = eZDiscountRule::create();
$discountGroupID = $discountGroup->attribute( "id" );
}
$http = eZHTTPTool::instance();
if ( $http->hasPostVariable( "DiscardButton" ) )
{
$module->redirectTo( $module->functionURI( "discountgroup" ) . "/" );
return;
}
if ( $http->hasPostVariable( "ApplyButton" ) )
{
if ( $http->hasPostVariable( "discount_group_name" ) )
{
$name = $http->postVariable( "discount_group_name" );
}
$discountGroup->setAttribute( "name", $name );
$discountGroup->store();
$module->redirectTo( $module->functionURI( "discountgroup" ) . "/" );
return;
}
$module->setTitle( "Editing discount group" );
$tpl = eZTemplate::factory();
$tpl->setVariable( "module", $module );
$tpl->setVariable( "discount_group", $discountGroup );
$Result = array();
$Result['content'] = $tpl->fetch( "design:shop/discountgroupedit.tpl" );
?>
| gpl-2.0 |
bellodox/joomla25 | components/com_foxcontact/lib/loader.php | 2103 | <?php defined('_JEXEC') or die('Restricted access');
/*
This file is part of "Fox Joomla Extensions".
You can redistribute it and/or modify it under the terms of the GNU General Public License
GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html
You have the freedom:
* to use this software for both commercial and non-commercial purposes
* to share, copy, distribute and install this software and charge for it if you wish.
Under the following conditions:
* You must attribute the work to the original author by leaving untouched the link "powered by",
except if you obtain a "registerd version" http://www.fox.ra.it/forum/14-licensing/151-remove-the-backlink-powered-by-fox-contact.html
Author: Demis Palma
Documentation at http://www.fox.ra.it/forum/2-documentation.html
*/
abstract class Loader
{
abstract protected function type();
abstract protected function http_headers();
abstract protected function content_header();
abstract protected function content_footer();
public function Show()
{
$this->headers();
$this->http_headers();
$this->content_header();
$this->load();
$this->content_footer();
//die();
JFactory::getApplication()->close();
}
private function headers()
{
// Prepare some useful headers
header("Expires: " . gmdate("D, d M Y H:i:s") . " GMT");
header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT");
// must not be cached by the client browser or any proxy
header("Cache-Control: no-store, no-cache, must-revalidate");
header("Cache-Control: post-check=0, pre-check=0", false);
header("Pragma: no-cache");
}
protected function load()
{
// Complete the script name with its path
$filename = JRequest::getVar("filename", "", "GET");
// Only admit lowercase a-z, underscore and minus. Forbid numbers, symbols, slashes and other stuff.
// For your security, *don't* touch the following regular expression.
preg_match('/^[a-z_-]+$/', $filename) or $filename = "invalid";
$local_name = realpath(dirname(__FILE__) . "/../" . $this->type() . "/" . $filename . "." . $this->type());
require_once $local_name;
}
}
| gpl-2.0 |
ahuraa/TrinityCore-Blabla | src/server/scripts/Kalimdor/boss_azuregos.cpp | 5175 | /*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Azuregos
SD%Complete: 90
SDComment: Teleport not included, spell reflect not effecting dots (Core problem)
SDCategory: Azshara
EndScriptData */
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
enum Say
{
SAY_TELEPORT = -1000100
};
enum Spells
{
SPELL_MARKOFFROST = 23182,
SPELL_MANASTORM = 21097,
SPELL_CHILL = 21098,
SPELL_FROSTBREATH = 21099,
SPELL_REFLECT = 22067,
SPELL_CLEAVE = 8255, //Perhaps not right ID
SPELL_ENRAGE = 23537
};
class boss_azuregos : public CreatureScript
{
public:
boss_azuregos() : CreatureScript("boss_azuregos") { }
CreatureAI* GetAI(Creature* creature) const
{
return new boss_azuregosAI (creature);
}
struct boss_azuregosAI : public ScriptedAI
{
boss_azuregosAI(Creature* creature) : ScriptedAI(creature) {}
uint32 MarkOfFrostTimer;
uint32 ManaStormTimer;
uint32 ChillTimer;
uint32 BreathTimer;
uint32 TeleportTimer;
uint32 ReflectTimer;
uint32 CleaveTimer;
uint32 EnrageTimer;
bool Enraged;
void Reset()
{
MarkOfFrostTimer = 35000;
ManaStormTimer = urand(5000, 17000);
ChillTimer = urand(10000, 30000);
BreathTimer = urand(2000, 8000);
TeleportTimer = 30000;
ReflectTimer = urand(15000, 30000);
CleaveTimer = 7000;
EnrageTimer = 0;
Enraged = false;
}
void EnterCombat(Unit* /*who*/) {}
void UpdateAI(const uint32 diff)
{
//Return since we have no target
if (!UpdateVictim())
return;
if (TeleportTimer <= diff)
{
DoScriptText(SAY_TELEPORT, me);
ThreatContainer::StorageType threatlist = me->getThreatManager().getThreatList();
ThreatContainer::StorageType::const_iterator i = threatlist.begin();
for (i = threatlist.begin(); i != threatlist.end(); ++i)
{
Unit* unit = Unit::GetUnit(*me, (*i)->getUnitGuid());
if (unit && (unit->GetTypeId() == TYPEID_PLAYER))
{
DoTeleportPlayer(unit, me->GetPositionX(), me->GetPositionY(), me->GetPositionZ()+3, unit->GetOrientation());
}
}
DoResetThreat();
TeleportTimer = 30000;
} else TeleportTimer -= diff;
// //MarkOfFrostTimer
// if (MarkOfFrostTimer <= diff)
// {
// DoCast(me->getVictim(), SPELL_MARKOFFROST);
// MarkOfFrostTimer = 25000;
// } else MarkOfFrostTimer -= diff;
//ChillTimer
if (ChillTimer <= diff)
{
DoCast(me->getVictim(), SPELL_CHILL);
ChillTimer = urand(13000, 25000);
} else ChillTimer -= diff;
//BreathTimer
if (BreathTimer <= diff)
{
DoCast(me->getVictim(), SPELL_FROSTBREATH);
BreathTimer = urand(10000, 15000);
} else BreathTimer -= diff;
//ManaStormTimer
if (ManaStormTimer <= diff)
{
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
DoCast(target, SPELL_MANASTORM);
ManaStormTimer = urand(7500, 12500);
} else ManaStormTimer -= diff;
//ReflectTimer
if (ReflectTimer <= diff)
{
DoCast(me, SPELL_REFLECT);
ReflectTimer = urand(20000, 35000);
} else ReflectTimer -= diff;
//CleaveTimer
if (CleaveTimer <= diff)
{
DoCast(me->getVictim(), SPELL_CLEAVE);
CleaveTimer = 7000;
} else CleaveTimer -= diff;
//EnrageTimer
if (HealthBelowPct(26) && !Enraged)
{
DoCast(me, SPELL_ENRAGE);
Enraged = true;
}
DoMeleeAttackIfReady();
}
};
};
void AddSC_boss_azuregos()
{
new boss_azuregos();
}
| gpl-2.0 |
ndtrung81/lammps | src/OPENMP/pair_lj_cut_tip4p_long_soft_omp.cpp | 15594 | // clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, [email protected]
This software is distributed under the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Axel Kohlmeyer (Temple U)
------------------------------------------------------------------------- */
#include "omp_compat.h"
#include <cmath>
#include "pair_lj_cut_tip4p_long_soft_omp.h"
#include "atom.h"
#include "domain.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "error.h"
#include "memory.h"
#include "neigh_list.h"
#include "suffix.h"
using namespace LAMMPS_NS;
#define EWALD_F 1.12837917
#define EWALD_P 0.3275911
#define A1 0.254829592
#define A2 -0.284496736
#define A3 1.421413741
#define A4 -1.453152027
#define A5 1.061405429
/* ---------------------------------------------------------------------- */
PairLJCutTIP4PLongSoftOMP::PairLJCutTIP4PLongSoftOMP(LAMMPS *lmp) :
PairLJCutTIP4PLongSoft(lmp), ThrOMP(lmp, THR_PAIR)
{
suffix_flag |= Suffix::OMP;
respa_enable = 0;
newsite_thr = nullptr;
hneigh_thr = nullptr;
// TIP4P cannot compute virial as F dot r
// due to finding bonded H atoms which are not near O atom
no_virial_fdotr_compute = 1;
}
/* ---------------------------------------------------------------------- */
PairLJCutTIP4PLongSoftOMP::~PairLJCutTIP4PLongSoftOMP()
{
memory->destroy(hneigh_thr);
memory->destroy(newsite_thr);
}
/* ---------------------------------------------------------------------- */
void PairLJCutTIP4PLongSoftOMP::compute(int eflag, int vflag)
{
ev_init(eflag,vflag);
const int nlocal = atom->nlocal;
const int nall = nlocal + atom->nghost;
// reallocate hneigh_thr & newsite_thr if necessary
// initialize hneigh_thr[0] to -1 on steps when reneighboring occurred
// initialize hneigh_thr[2] to 0 every step
if (atom->nmax > nmax) {
nmax = atom->nmax;
memory->destroy(hneigh_thr);
memory->create(hneigh_thr,nmax,"pair:hneigh_thr");
memory->destroy(newsite_thr);
memory->create(newsite_thr,nmax,"pair:newsite_thr");
}
int i;
// tag entire list as completely invalid after a neighbor
// list update, since that can change the order of atoms.
if (neighbor->ago == 0)
for (i = 0; i < nall; i++) hneigh_thr[i].a = -1;
// indicate that the coordinates for the M point need to
// be updated. this needs to be done in every step.
for (i = 0; i < nall; i++) hneigh_thr[i].t = 0;
const int nthreads = comm->nthreads;
const int inum = list->inum;
#if defined(_OPENMP)
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(eflag,vflag)
#endif
{
int ifrom, ito, tid;
loop_setup_thr(ifrom, ito, tid, inum, nthreads);
ThrData *thr = fix->get_thr(tid);
thr->timer(Timer::START);
ev_setup_thr(eflag, vflag, nall, eatom, vatom, nullptr, thr);
if (evflag) {
if (eflag) {
if (vflag) eval<1,1,1>(ifrom, ito, thr);
else eval<1,1,0>(ifrom, ito, thr);
} else {
if (vflag) eval<1,0,1>(ifrom, ito, thr);
else eval<1,0,0>(ifrom, ito, thr);
}
} else eval<0,0,0>(ifrom, ito, thr);
thr->timer(Timer::PAIR);
reduce_thr(this, eflag, vflag, thr);
} // end of omp parallel region
}
/* ---------------------------------------------------------------------- */
template <int EVFLAG, int EFLAG, int VFLAG>
void PairLJCutTIP4PLongSoftOMP::eval(int iifrom, int iito, ThrData * const thr)
{
double qtmp,xtmp,ytmp,ztmp,delx,dely,delz,evdwl,ecoul;
double r,rsq,forcecoul,forcelj,cforce;
double factor_coul,factor_lj;
double grij,expm2,prefactor,t,erfc;
double denc, denlj, r4sig6;
double v[6];
double fdx,fdy,fdz,fOx,fOy,fOz,fHx,fHy,fHz;
dbl3_t x1,x2,xH1,xH2;
int *ilist,*jlist,*numneigh,**firstneigh;
int i,j,ii,jj,jnum,itype,jtype,key;
int n,vlist[6];
int iH1,iH2,jH1,jH2;
evdwl = ecoul = 0.0;
const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0];
dbl3_t * _noalias const f = (dbl3_t *) thr->get_f()[0];
const double * _noalias const q = atom->q;
const int * _noalias const type = atom->type;
const int nlocal = atom->nlocal;
const double * _noalias const special_coul = force->special_coul;
const double * _noalias const special_lj = force->special_lj;
const double qqrd2e = force->qqrd2e;
const double cut_coulsqplus = (cut_coul+2.0*qdist) * (cut_coul+2.0*qdist);
double fxtmp,fytmp,fztmp;
ilist = list->ilist;
numneigh = list->numneigh;
firstneigh = list->firstneigh;
// loop over neighbors of my atoms
for (ii = iifrom; ii < iito; ++ii) {
i = ilist[ii];
qtmp = q[i];
xtmp = x[i].x;
ytmp = x[i].y;
ztmp = x[i].z;
itype = type[i];
// if atom I = water O, set x1 = offset charge site
// else x1 = x of atom I
// NOTE: to make this part thread safe, we need to
// make sure that the hneigh_thr[][] entries only get
// updated, when all data is in place. worst case,
// some calculation is repeated, but since the results
// will be the same, there is no race condition.
if (itype == typeO) {
if (hneigh_thr[i].a < 0) {
iH1 = atom->map(atom->tag[i] + 1);
iH2 = atom->map(atom->tag[i] + 2);
if (iH1 == -1 || iH2 == -1)
error->one(FLERR,"TIP4P hydrogen is missing");
if (atom->type[iH1] != typeH || atom->type[iH2] != typeH)
error->one(FLERR,"TIP4P hydrogen has incorrect atom type");
// set iH1,iH2 to index of closest image to O
iH1 = domain->closest_image(i,iH1);
iH2 = domain->closest_image(i,iH2);
compute_newsite_thr(x[i],x[iH1],x[iH2],newsite_thr[i]);
hneigh_thr[i].t = 1;
hneigh_thr[i].b = iH2;
hneigh_thr[i].a = iH1;
} else {
iH1 = hneigh_thr[i].a;
iH2 = hneigh_thr[i].b;
if (hneigh_thr[i].t == 0) {
compute_newsite_thr(x[i],x[iH1],x[iH2],newsite_thr[i]);
hneigh_thr[i].t = 1;
}
}
x1 = newsite_thr[i];
} else x1 = x[i];
jlist = firstneigh[i];
jnum = numneigh[i];
fxtmp=fytmp=fztmp=0.0;
for (jj = 0; jj < jnum; jj++) {
j = jlist[jj];
factor_lj = special_lj[sbmask(j)];
factor_coul = special_coul[sbmask(j)];
j &= NEIGHMASK;
delx = xtmp - x[j].x;
dely = ytmp - x[j].y;
delz = ztmp - x[j].z;
rsq = delx*delx + dely*dely + delz*delz;
jtype = type[j];
// LJ interaction based on true rsq
if (rsq < cut_ljsq[itype][jtype]) {
r4sig6 = rsq*rsq / lj2[itype][jtype];
denlj = lj3[itype][jtype] + rsq*r4sig6;
forcelj = lj1[itype][jtype] * epsilon[itype][jtype] *
(48.0*r4sig6/(denlj*denlj*denlj) - 24.0*r4sig6/(denlj*denlj));
forcelj *= factor_lj;
fxtmp += delx*forcelj;
fytmp += dely*forcelj;
fztmp += delz*forcelj;
f[j].x -= delx*forcelj;
f[j].y -= dely*forcelj;
f[j].z -= delz*forcelj;
if (EFLAG) {
evdwl = lj1[itype][jtype] * 4.0 * epsilon[itype][jtype] *
(1.0/(denlj*denlj) - 1.0/denlj) - offset[itype][jtype];
evdwl *= factor_lj;
} else evdwl = 0.0;
if (EVFLAG) ev_tally_thr(this,i,j,nlocal, /* newton_pair = */ 1,
evdwl,0.0,forcelj,delx,dely,delz,thr);
}
// adjust rsq and delxyz for off-site O charge(s) if necessary
// but only if they are within reach
// NOTE: to make this part thread safe, we need to
// make sure that the hneigh_thr[][] entries only get
// updated, when all data is in place. worst case,
// some calculation is repeated, but since the results
// will be the same, there is no race condition.
if (rsq < cut_coulsqplus) {
if (itype == typeO || jtype == typeO) {
// if atom J = water O, set x2 = offset charge site
// else x2 = x of atom J
if (jtype == typeO) {
if (hneigh_thr[j].a < 0) {
jH1 = atom->map(atom->tag[j] + 1);
jH2 = atom->map(atom->tag[j] + 2);
if (jH1 == -1 || jH2 == -1)
error->one(FLERR,"TIP4P hydrogen is missing");
if (atom->type[jH1] != typeH || atom->type[jH2] != typeH)
error->one(FLERR,"TIP4P hydrogen has incorrect atom type");
// set jH1,jH2 to closest image to O
jH1 = domain->closest_image(j,jH1);
jH2 = domain->closest_image(j,jH2);
compute_newsite_thr(x[j],x[jH1],x[jH2],newsite_thr[j]);
hneigh_thr[j].t = 1;
hneigh_thr[j].b = jH2;
hneigh_thr[j].a = jH1;
} else {
jH1 = hneigh_thr[j].a;
jH2 = hneigh_thr[j].b;
if (hneigh_thr[j].t == 0) {
compute_newsite_thr(x[j],x[jH1],x[jH2],newsite_thr[j]);
hneigh_thr[j].t = 1;
}
}
x2 = newsite_thr[j];
} else x2 = x[j];
delx = x1.x - x2.x;
dely = x1.y - x2.y;
delz = x1.z - x2.z;
rsq = delx*delx + dely*dely + delz*delz;
}
// Coulombic interaction based on modified rsq
if (rsq < cut_coulsq) {
r = sqrt(rsq);
grij = g_ewald * r;
expm2 = exp(-grij*grij);
t = 1.0 / (1.0 + EWALD_P*grij);
erfc = t * (A1+t*(A2+t*(A3+t*(A4+t*A5)))) * expm2;
denc = sqrt(lj4[itype][jtype] + rsq);
prefactor = qqrd2e * lj1[itype][jtype] * qtmp*q[j] / (denc*denc*denc);
forcecoul = prefactor * (erfc + EWALD_F*grij*expm2);
if (factor_coul < 1.0) {
forcecoul -= (1.0-factor_coul)*prefactor;
}
cforce = forcecoul;
// if i,j are not O atoms, force is applied directly
// if i or j are O atoms, force is on fictitious atom & partitioned
// force partitioning due to Feenstra, J Comp Chem, 20, 786 (1999)
// f_f = fictitious force, fO = f_f (1 - 2 alpha), fH = alpha f_f
// preserves total force and torque on water molecule
// virial = sum(r x F) where each water's atoms are near xi and xj
// vlist stores 2,4,6 atoms whose forces contribute to virial
if (EVFLAG) {
n = 0;
key = 0;
}
if (itype != typeO) {
fxtmp += delx * cforce;
fytmp += dely * cforce;
fztmp += delz * cforce;
if (VFLAG) {
v[0] = x[i].x * delx * cforce;
v[1] = x[i].y * dely * cforce;
v[2] = x[i].z * delz * cforce;
v[3] = x[i].x * dely * cforce;
v[4] = x[i].x * delz * cforce;
v[5] = x[i].y * delz * cforce;
}
if (EVFLAG) vlist[n++] = i;
} else {
if (EVFLAG) key++;
fdx = delx*cforce;
fdy = dely*cforce;
fdz = delz*cforce;
fOx = fdx*(1 - alpha);
fOy = fdy*(1 - alpha);
fOz = fdz*(1 - alpha);
fHx = 0.5*alpha * fdx;
fHy = 0.5*alpha * fdy;
fHz = 0.5*alpha * fdz;
fxtmp += fOx;
fytmp += fOy;
fztmp += fOz;
f[iH1].x += fHx;
f[iH1].y += fHy;
f[iH1].z += fHz;
f[iH2].x += fHx;
f[iH2].y += fHy;
f[iH2].z += fHz;
if (VFLAG) {
xH1 = x[iH1];
xH2 = x[iH2];
v[0] = x[i].x*fOx + xH1.x*fHx + xH2.x*fHx;
v[1] = x[i].y*fOy + xH1.y*fHy + xH2.y*fHy;
v[2] = x[i].z*fOz + xH1.z*fHz + xH2.z*fHz;
v[3] = x[i].x*fOy + xH1.x*fHy + xH2.x*fHy;
v[4] = x[i].x*fOz + xH1.x*fHz + xH2.x*fHz;
v[5] = x[i].y*fOz + xH1.y*fHz + xH2.y*fHz;
}
if (EVFLAG) {
vlist[n++] = i;
vlist[n++] = iH1;
vlist[n++] = iH2;
}
}
if (jtype != typeO) {
f[j].x -= delx * cforce;
f[j].y -= dely * cforce;
f[j].z -= delz * cforce;
if (VFLAG) {
v[0] -= x[j].x * delx * cforce;
v[1] -= x[j].y * dely * cforce;
v[2] -= x[j].z * delz * cforce;
v[3] -= x[j].x * dely * cforce;
v[4] -= x[j].x * delz * cforce;
v[5] -= x[j].y * delz * cforce;
}
if (EVFLAG) vlist[n++] = j;
} else {
if (EVFLAG) key += 2;
fdx = -delx*cforce;
fdy = -dely*cforce;
fdz = -delz*cforce;
fOx = fdx*(1 - alpha);
fOy = fdy*(1 - alpha);
fOz = fdz*(1 - alpha);
fHx = 0.5*alpha * fdx;
fHy = 0.5*alpha * fdy;
fHz = 0.5*alpha * fdz;
f[j].x += fOx;
f[j].y += fOy;
f[j].z += fOz;
f[jH1].x += fHx;
f[jH1].y += fHy;
f[jH1].z += fHz;
f[jH2].x += fHx;
f[jH2].y += fHy;
f[jH2].z += fHz;
if (VFLAG) {
xH1 = x[jH1];
xH2 = x[jH2];
v[0] += x[j].x*fOx + xH1.x*fHx + xH2.x*fHx;
v[1] += x[j].y*fOy + xH1.y*fHy + xH2.y*fHy;
v[2] += x[j].z*fOz + xH1.z*fHz + xH2.z*fHz;
v[3] += x[j].x*fOy + xH1.x*fHy + xH2.x*fHy;
v[4] += x[j].x*fOz + xH1.x*fHz + xH2.x*fHz;
v[5] += x[j].y*fOz + xH1.y*fHz + xH2.y*fHz;
}
if (EVFLAG) {
vlist[n++] = j;
vlist[n++] = jH1;
vlist[n++] = jH2;
}
}
if (EFLAG) {
prefactor = qqrd2e * lj1[itype][jtype] * qtmp*q[j] / denc;
ecoul = prefactor*erfc;
if (factor_coul < 1.0) ecoul -= (1.0-factor_coul)*prefactor;
} else ecoul = 0.0;
if (EVFLAG) ev_tally_list_thr(this,key,vlist,v,ecoul,alpha,thr);
}
}
}
f[i].x += fxtmp;
f[i].y += fytmp;
f[i].z += fztmp;
}
}
/* ----------------------------------------------------------------------
compute position xM of fictitious charge site for O atom and 2 H atoms
return it as xM
------------------------------------------------------------------------- */
void PairLJCutTIP4PLongSoftOMP::compute_newsite_thr(const dbl3_t &xO,
const dbl3_t &xH1,
const dbl3_t &xH2,
dbl3_t &xM) const
{
double delx1 = xH1.x - xO.x;
double dely1 = xH1.y - xO.y;
double delz1 = xH1.z - xO.z;
double delx2 = xH2.x - xO.x;
double dely2 = xH2.y - xO.y;
double delz2 = xH2.z - xO.z;
const double prefac = alpha * 0.5;
xM.x = xO.x + prefac * (delx1 + delx2);
xM.y = xO.y + prefac * (dely1 + dely2);
xM.z = xO.z + prefac * (delz1 + delz2);
}
/* ---------------------------------------------------------------------- */
double PairLJCutTIP4PLongSoftOMP::memory_usage()
{
double bytes = memory_usage_thr();
bytes += PairLJCutTIP4PLongSoft::memory_usage();
return bytes;
}
| gpl-2.0 |
alexknowshtml/discourse | app/assets/javascripts/discourse/routes/discourse_location.js | 4375 | /*global historyState:true */
/**
@module Discourse
*/
var get = Ember.get, set = Ember.set;
var popstateReady = false;
/**
`Ember.DiscourseLocation` implements the location API using the browser's
`history.pushState` API.
@class DiscourseLocation
@namespace Discourse
@extends Ember.Object
*/
Ember.DiscourseLocation = Ember.Object.extend({
init: function() {
set(this, 'location', get(this, 'location') || window.location);
if ( $.inArray('state', $.event.props) < 0 )
jQuery.event.props.push('state')
this.initState();
},
/**
@private
Used to set state on first call to setURL
@method initState
*/
initState: function() {
this.replaceState(this.formatURL(this.getURL()));
set(this, 'history', window.history);
},
/**
@private
Returns the current `location.pathname` without rootURL
@method getURL
*/
getURL: function() {
var rootURL = (Discourse.BaseUri === undefined ? "/" : Discourse.BaseUri),
url = get(this, 'location').pathname;
rootURL = rootURL.replace(/\/$/, '');
url = url.replace(rootURL, '');
return url;
},
/**
@private
Uses `history.pushState` to update the url without a page reload.
@method setURL
@param path {String}
*/
setURL: function(path) {
path = this.formatURL(path);
if (this.getState() && this.getState().path !== path) {
popstateReady = true;
this.pushState(path);
}
},
/**
@private
Uses `history.replaceState` to update the url without a page reload
or history modification.
@method replaceURL
@param path {String}
*/
replaceURL: function(path) {
path = this.formatURL(path);
if (this.getState() && this.getState().path !== path) {
popstateReady = true;
this.replaceState(path);
}
},
/**
@private
Get the current `history.state`
@method getState
*/
getState: function() {
historyState = get(this, 'history').state;
if (historyState) return historyState;
return {path: window.location.pathname};
},
/**
@private
Pushes a new state
@method pushState
@param path {String}
*/
pushState: function(path) {
if (!window.history.pushState) return;
this.set('currentState', { path: path } );
window.history.pushState({ path: path }, null, path);
},
/**
@private
Replaces the current state
@method replaceState
@param path {String}
*/
replaceState: function(path) {
if (!window.history.replaceState) return;
this.set('currentState', { path: path } );
window.history.replaceState({ path: path }, null, path);
},
/**
@private
Register a callback to be invoked whenever the browser
history changes, including using forward and back buttons.
@method onUpdateURL
@param callback {Function}
*/
onUpdateURL: function(callback) {
var guid = Ember.guidFor(this),
self = this;
$(window).bind('popstate.ember-location-'+guid, function(e) {
if (e.state) {
var currentState = self.get('currentState');
if (currentState) {
var url = e.state.path,
rootURL = (Discourse.BaseUri === undefined ? "/" : Discourse.BaseUri);
rootURL = rootURL.replace(/\/$/, '');
url = url.replace(rootURL, '');
callback(url);
} else {
this.set('currentState', e.state);
}
}
});
},
/**
@private
Used when using `{{action}}` helper. The url is always appended to the rootURL.
@method formatURL
@param url {String}
*/
formatURL: function(url) {
var rootURL = (Discourse.BaseUri === undefined ? "/" : Discourse.BaseUri);
if (url !== '') {
rootURL = rootURL.replace(/\/$/, '');
}
// remove prefix from URL if it is already in url - i.e. /discourse/t/... -> /t/if rootURL is /discourse
// this sometimes happens when navigating to already visited location
if ((rootURL.length > 1) && (url.substring(0, rootURL.length + 1) === (rootURL + "/")))
{
url = url.substring(rootURL.length);
}
return rootURL + url;
},
willDestroy: function() {
var guid = Ember.guidFor(this);
Ember.$(window).unbind('popstate.ember-location-'+guid);
}
});
Ember.Location.registerImplementation('discourse_location', Ember.DiscourseLocation);
| gpl-2.0 |
haovn577/openemr | interface/globals.php | 17783 | <?php
/* $Id$ */
// ------------------------------------------------------------------------ //
// OpenEMR Electronic Medical Records System //
// Copyright (c) 2005-2010 oemr.org //
// <http://www.oemr.org/> //
// ------------------------------------------------------------------------ //
// This program is free software; you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation; either version 2 of the License, or //
// (at your option) any later version. //
// //
// You may not change or alter any portion of this comment or credits //
// of supporting developers from this source code or any supporting //
// source code which is considered copyrighted (c) material of the //
// original comment or credit authors. //
// //
// This program is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with this program; if not, write to the Free Software //
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA //
// ------------------------------------------------------------------------ //
// Is this windows or non-windows? Create a boolean definition.
if (!defined('IS_WINDOWS'))
define('IS_WINDOWS', (stripos(PHP_OS,'WIN') === 0));
// Some important php.ini overrides. Defaults for these values are often
// too small. You might choose to adjust them further.
//
ini_set('memory_limit', '64M');
ini_set('session.gc_maxlifetime', '14400');
/* If the includer didn't specify, assume they want us to "fake" register_globals. */
if (!isset($fake_register_globals)) {
$fake_register_globals = TRUE;
}
/* Pages with "myadmin" in the URL don't need register_globals. */
$fake_register_globals =
$fake_register_globals && (strpos($_SERVER['REQUEST_URI'],"myadmin") === FALSE);
// Emulates register_globals = On. Moved to here from the bottom of this file
// to address security issues. Need to change everything requiring this!
if ($fake_register_globals) {
extract($_GET);
extract($_POST);
}
// This is for sanitization of all escapes.
// (ie. reversing magic quotes if it's set)
if (isset($sanitize_all_escapes) && $sanitize_all_escapes) {
if (get_magic_quotes_gpc()) {
function undoMagicQuotes($array, $topLevel=true) {
$newArray = array();
foreach($array as $key => $value) {
if (!$topLevel) {
$key = stripslashes($key);
}
if (is_array($value)) {
$newArray[$key] = undoMagicQuotes($value, false);
}
else {
$newArray[$key] = stripslashes($value);
}
}
return $newArray;
}
$_GET = undoMagicQuotes($_GET);
$_POST = undoMagicQuotes($_POST);
$_COOKIE = undoMagicQuotes($_COOKIE);
$_REQUEST = undoMagicQuotes($_REQUEST);
}
}
//
// The webserver_root and web_root are now automatically collected.
// If not working, can set manually below.
// Auto collect the full absolute directory path for openemr.
$webserver_root = dirname(dirname(__FILE__));
if (IS_WINDOWS) {
//convert windows path separators
$webserver_root = str_replace("\\","/",$webserver_root);
}
// Auto collect the relative html path, i.e. what you would type into the web
// browser after the server address to get to OpenEMR.
$web_root = substr($webserver_root, strlen($_SERVER['DOCUMENT_ROOT']));
// Ensure web_root starts with a path separator
if (preg_match("/^[^\/]/",$web_root)) {
$web_root = "/".$web_root;
}
// The webserver_root and web_root are now automatically collected in
// real time per above code. If above is not working, can uncomment and
// set manually here:
// $webserver_root = "/var/www/openemr";
// $web_root = "/openemr";
//
// This is the directory that contains site-specific data. Change this
// only if you have some reason to.
$GLOBALS['OE_SITES_BASE'] = "$webserver_root/sites";
// The session name names a cookie stored in the browser.
// If you modify session_name, then need to place the identical name in
// the phpmyadmin file here: openemr/phpmyadmin/libraries/session.inc.php
// at line 71. This was required after embedded new phpmyadmin version on
// 05-12-2009 by Brady. Hopefully will figure out a more appropriate fix.
// Now that restore_session() is implemented in javaScript, session IDs are
// effectively saved in the top level browser window and there is no longer
// any need to change the session name for different OpenEMR instances.
session_name("OpenEMR");
session_start();
// Set the site ID if required. This must be done before any database
// access is attempted.
if (empty($_SESSION['site_id']) || !empty($_GET['site'])) {
if (!empty($_GET['site'])) {
$tmp = $_GET['site'];
}
else {
if (!$ignoreAuth) die("Site ID is missing from session data!");
$tmp = $_SERVER['HTTP_HOST'];
if (!is_dir($GLOBALS['OE_SITES_BASE'] . "/$tmp")) $tmp = "default";
}
if (empty($tmp) || preg_match('/[^A-Za-z0-9\\-.]/', $tmp))
die("Site ID '$tmp' contains invalid characters.");
if (!isset($_SESSION['site_id']) || $_SESSION['site_id'] != $tmp) {
$_SESSION['site_id'] = $tmp;
error_log("Session site ID has been set to '$tmp'"); // debugging
}
}
// Set the site-specific directory path.
$GLOBALS['OE_SITE_DIR'] = $GLOBALS['OE_SITES_BASE'] . "/" . $_SESSION['site_id'];
require_once($GLOBALS['OE_SITE_DIR'] . "/config.php");
// Collecting the utf8 disable flag from the sqlconf.php file in order
// to set the correct html encoding. utf8 vs iso-8859-1. If flag is set
// then set to iso-8859-1.
require_once(dirname(__FILE__) . "/../library/sqlconf.php");
if (!$disable_utf8_flag) {
ini_set('default_charset', 'utf-8');
$HTML_CHARSET = "UTF-8";
}
else {
ini_set('default_charset', 'iso-8859-1');
$HTML_CHARSET = "ISO-8859-1";
}
// Root directory, relative to the webserver root:
$GLOBALS['rootdir'] = "$web_root/interface";
$rootdir = $GLOBALS['rootdir'];
// Absolute path to the source code include and headers file directory (Full path):
$GLOBALS['srcdir'] = "$webserver_root/library";
// Absolute path to the location of documentroot directory for use with include statements:
$GLOBALS['fileroot'] = "$webserver_root";
// Absolute path to the location of interface directory for use with include statements:
$include_root = "$webserver_root/interface";
// Absolute path to the location of documentroot directory for use with include statements:
$GLOBALS['webroot'] = $web_root;
$GLOBALS['template_dir'] = $GLOBALS['fileroot'] . "/templates/";
$GLOBALS['incdir'] = $include_root;
// Location of the login screen file
$GLOBALS['login_screen'] = $GLOBALS['rootdir'] . "/login_screen.php";
// Variable set for Eligibility Verification [EDI-271] path
$GLOBALS['edi_271_file_path'] = $GLOBALS['OE_SITE_DIR'] . "/edi/";
// Include the translation engine. This will also call sql.inc to
// open the openemr mysql connection.
include_once (dirname(__FILE__) . "/../library/translation.inc.php");
// Include convenience functions with shorter names than "htmlspecialchars"
include_once (dirname(__FILE__) . "/../library/htmlspecialchars.inc.php");
// Include sanitization/checking function (for security)
include_once (dirname(__FILE__) . "/../library/sanitize.inc.php");
// Includes functions for date internationalization
include_once (dirname(__FILE__) . "/../library/date_functions.php");
// Defaults for specific applications.
$GLOBALS['athletic_team'] = false;
$GLOBALS['weight_loss_clinic'] = false;
$GLOBALS['ippf_specific'] = false;
$GLOBALS['cene_specific'] = false;
// Defaults for drugs and products.
$GLOBALS['inhouse_pharmacy'] = false;
$GLOBALS['sell_non_drug_products'] = 0;
$glrow = sqlQuery("SHOW TABLES LIKE 'globals'");
if (!empty($glrow)) {
// Collect user specific settings from user_settings table.
//
$gl_user = array();
if (!empty($_SESSION['authUserID'])) {
$glres_user = sqlStatement("SELECT `setting_label`, `setting_value` " .
"FROM `user_settings` " .
"WHERE `setting_user` = ? " .
"AND `setting_label` LIKE 'global:%'", array($_SESSION['authUserID']) );
for($iter=0; $row=sqlFetchArray($glres_user); $iter++) {
//remove global_ prefix from label
$row['setting_label'] = substr($row['setting_label'],7);
$gl_user[$iter]=$row;
}
}
// Set global parameters from the database globals table.
// Some parameters require custom handling.
//
$GLOBALS['language_menu_show'] = array();
$glres = sqlStatement("SELECT gl_name, gl_index, gl_value FROM globals " .
"ORDER BY gl_name, gl_index");
while ($glrow = sqlFetchArray($glres)) {
$gl_name = $glrow['gl_name'];
$gl_value = $glrow['gl_value'];
// Adjust for user specific settings
if (!empty($gl_user)) {
foreach ($gl_user as $setting) {
if ($gl_name == $setting['setting_label']) {
$gl_value = $setting['setting_value'];
}
}
}
if ($gl_name == 'language_menu_other') {
$GLOBALS['language_menu_show'][] = $gl_value;
}
else if ($gl_name == 'css_header') {
$GLOBALS[$gl_name] = "$rootdir/themes/" . $gl_value;
}
else if ($gl_name == 'specific_application') {
if ($gl_value == '1') $GLOBALS['athletic_team'] = true;
else if ($gl_value == '2') $GLOBALS['ippf_specific'] = true;
else if ($gl_value == '3') $GLOBALS['weight_loss_clinic'] = true;
}
else if ($gl_name == 'inhouse_pharmacy') {
if ($gl_value) $GLOBALS['inhouse_pharmacy'] = true;
if ($gl_value == '2') $GLOBALS['sell_non_drug_products'] = 1;
else if ($gl_value == '3') $GLOBALS['sell_non_drug_products'] = 2;
}
else {
$GLOBALS[$gl_name] = $gl_value;
}
}
// Language cleanup stuff.
$GLOBALS['language_menu_login'] = false;
if ((count($GLOBALS['language_menu_show']) >= 1) || $GLOBALS['language_menu_showall']) {
$GLOBALS['language_menu_login'] = true;
}
//
// End of globals table processing.
}
else {
// Temporary stuff to handle the case where the globals table does not
// exist yet. This will happen in sql_upgrade.php on upgrading to the
// first release containing this table.
$GLOBALS['language_menu_login'] = true;
$GLOBALS['language_menu_showall'] = true;
$GLOBALS['language_menu_show'] = array('English (Standard)','Swedish');
$GLOBALS['language_default'] = "English (Standard)";
$GLOBALS['translate_layout'] = true;
$GLOBALS['translate_lists'] = true;
$GLOBALS['translate_gacl_groups'] = true;
$GLOBALS['translate_form_titles'] = true;
$GLOBALS['translate_document_categories'] = true;
$GLOBALS['translate_appt_categories'] = true;
$GLOBALS['concurrent_layout'] = 2;
$timeout = 7200;
$openemr_name = 'OpenEMR';
$css_header = "$rootdir/themes/style_default.css";
$GLOBALS['css_header'] = $css_header;
$GLOBALS['schedule_start'] = 8;
$GLOBALS['schedule_end'] = 17;
$GLOBALS['calendar_interval'] = 15;
$GLOBALS['phone_country_code'] = '1';
$GLOBALS['disable_non_default_groups'] = true;
$GLOBALS['ippf_specific'] = false;
}
// If >0 this will enforce a separate PHP session for each top-level
// browser window. You must log in separately for each. This is not
// thoroughly tested yet and some browsers might have trouble with it,
// so make it 0 if you must. Alternatively, you can set it to 2 to be
// notified when the session ID changes.
$GLOBALS['restore_sessions'] = 1; // 0=no, 1=yes, 2=yes+debug
// Theme definition. All this stuff should be moved to CSS.
//
if ($GLOBALS['concurrent_layout']) {
$top_bg_line = ' bgcolor="#dddddd" ';
$GLOBALS['style']['BGCOLOR2'] = "#dddddd";
$bottom_bg_line = $top_bg_line;
$title_bg_line = ' bgcolor="#bbbbbb" ';
$nav_bg_line = ' bgcolor="#94d6e7" ';
} else {
$top_bg_line = ' bgcolor="#94d6e7" ';
$GLOBALS['style']['BGCOLOR2'] = "#94d6e7";
$bottom_bg_line = ' background="'.$rootdir.'/pic/aquabg.gif" ';
$title_bg_line = ' bgcolor="#aaffff" ';
$nav_bg_line = ' bgcolor="#94d6e7" ';
}
$login_filler_line = ' bgcolor="#f7f0d5" ';
$logocode = "<img src='$web_root/sites/" . $_SESSION['site_id'] . "/images/login_logo.gif'>";
$linepic = "$rootdir/pic/repeat_vline9.gif";
$table_bg = ' bgcolor="#cccccc" ';
$GLOBALS['style']['BGCOLOR1'] = "#cccccc";
$GLOBALS['style']['TEXTCOLOR11'] = "#222222";
$GLOBALS['style']['HIGHLIGHTCOLOR'] = "#dddddd";
$GLOBALS['style']['BOTTOM_BG_LINE'] = $bottom_bg_line;
// The height in pixels of the Logo bar at the top of the login page:
$GLOBALS['logoBarHeight'] = 110;
// The height in pixels of the Navigation bar:
$GLOBALS['navBarHeight'] = 22;
// The height in pixels of the Title bar:
$GLOBALS['titleBarHeight'] = 40;
// The assistant word, MORE printed next to titles that can be clicked:
// Note this label gets translated here via the xl function
// -if you don't want it translated, then strip the xl function away
$tmore = xl('(More)');
// The assistant word, BACK printed next to titles that return to previous screens:
// Note this label gets translated here via the xl function
// -if you don't want it translated, then strip the xl function away
$tback = xl('(Back)');
// This is the idle logout function:
// if a page has not been refreshed within this many seconds, the interface
// will return to the login page
if (!empty($special_timeout)) {
$timeout = intval($special_timeout);
}
//Version tag
require_once(dirname(__FILE__) . "/../version.php");
$patch_appending = "";
if ( ($v_realpatch != '0') && (!(empty($v_realpatch))) ) {
$patch_appending = " (".$v_realpatch.")";
}
$openemr_version = "$v_major.$v_minor.$v_patch".$v_tag.$patch_appending;
$srcdir = $GLOBALS['srcdir'];
$login_screen = $GLOBALS['login_screen'];
$GLOBALS['css_header'] = $css_header;
$GLOBALS['backpic'] = $backpic;
// 1 = send email message to given id for Emergency Login user activation,
// else 0.
$GLOBALS['Emergency_Login_email'] = $GLOBALS['Emergency_Login_email_id'] ? 1 : 0;
//set include_de_identification to enable De-identification (currently de-identification works fine only with linux machines)
//Run de_identification_upgrade.php script to upgrade OpenEMR database to include procedures,
//functions, tables for de-identification(Mysql root user and password is required for successful
//execution of the de-identification upgrade script)
$GLOBALS['include_de_identification']=0;
// Include the authentication module code here, but the rule is
// if the file has the word "login" in the source code file name,
// don't include the authentication module - we do this to avoid
// include loops.
if (!isset($ignoreAuth) || !$ignoreAuth) {
include_once("$srcdir/auth.inc");
}
// If you do not want your accounting system to have a customer added to it
// for each insurance company, then set this to true. SQL-Ledger currently
// (2005-03-21) does nothing useful with insurance companies as customers.
$GLOBALS['insurance_companies_are_not_customers'] = true;
// This is the background color to apply to form fields that are searchable.
// Currently it is applicable only to the "Search or Add Patient" form.
$GLOBALS['layout_search_color'] = '#ffff55';
//EMAIL SETTINGS
$SMTP_Auth = !empty($GLOBALS['SMTP_USER']);
// Customize these if you are using SQL-Ledger with OpenEMR, or if you are
// going to run sl_convert.php to convert from SQL-Ledger.
//
$sl_cash_acc = '1060'; // sql-ledger account number for checking account
$sl_ar_acc = '1200'; // sql-ledger account number for accounts receivable
$sl_income_acc = '4320'; // sql-ledger account number for medical services income
$sl_services_id = 'MS'; // sql-ledger parts table id for medical services
$sl_dbname = 'sql-ledger'; // sql-ledger database name
$sl_dbuser = 'sql-ledger'; // sql-ledger database login name
$sl_dbpass = 'secret'; // sql-ledger database login password
//////////////////////////////////////////////////////////////////
// Don't change anything below this line. ////////////////////////////
$encounter = empty($_SESSION['encounter']) ? 0 : $_SESSION['encounter'];
if (!empty($_GET['pid']) && empty($_SESSION['pid'])) {
$_SESSION['pid'] = $_GET['pid'];
}
elseif (!empty($_POST['pid']) && empty($_SESSION['pid'])) {
$_SESSION['pid'] = $_POST['pid'];
}
$pid = empty($_SESSION['pid']) ? 0 : $_SESSION['pid'];
$userauthorized = empty($_SESSION['userauthorized']) ? 0 : $_SESSION['userauthorized'];
$groupname = empty($_SESSION['authProvider']) ? 0 : $_SESSION['authProvider'];
// global interface function to format text length using ellipses
function strterm($string,$length) {
if (strlen($string) >= ($length-3)) {
return substr($string,0,$length-3) . "...";
} else {
return $string;
}
}
// Override temporary_files_dir if PHP >= 5.2.1.
if (version_compare(phpversion(), "5.2.1", ">=")) {
$GLOBALS['temporary_files_dir'] = rtrim(sys_get_temp_dir(),'/');
}
// turn off PHP compatibility warnings
ini_set("session.bug_compat_warn","off");
//////////////////////////////////////////////////////////////////
?>
| gpl-2.0 |
LORDofDOOM/MMOCore | src/server/scripts/Outland/BlackTemple/boss_bloodboil.cpp | 12349 | /*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Bloodboil
SD%Complete: 80
SDComment: Bloodboil not working correctly, missing enrage
SDCategory: Black Temple
EndScriptData */
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "black_temple.h"
enum Bloodboil
{
//Speech'n'Sound
SAY_AGGRO = 0,
SAY_SLAY = 1,
SAY_SPECIAL = 2,
SAY_ENRAGE = 3,
SAY_DEATH = 4,
//Spells
SPELL_ACID_GEYSER = 40630,
SPELL_ACIDIC_WOUND = 40481,
SPELL_ARCING_SMASH = 40599,
SPELL_BLOODBOIL = 42005, // This spell is AoE whereas it shouldn't be
SPELL_FEL_ACID = 40508,
SPELL_FEL_RAGE_SELF = 40594,
SPELL_FEL_RAGE_TARGET = 40604,
SPELL_FEL_RAGE_2 = 40616,
SPELL_FEL_RAGE_3 = 41625,
SPELL_BEWILDERING_STRIKE = 40491,
SPELL_EJECT1 = 40486, // 1000 Physical damage + knockback + script effect (should handle threat reduction I think)
SPELL_EJECT2 = 40597, // 1000 Physical damage + Stun (used in phase 2?)
SPELL_TAUNT_GURTOGG = 40603,
SPELL_INSIGNIFIGANCE = 40618,
SPELL_BERSERK = 45078
};
//This is used to sort the players by distance in preparation for the Bloodboil cast.
class boss_gurtogg_bloodboil : public CreatureScript
{
public:
boss_gurtogg_bloodboil() : CreatureScript("boss_gurtogg_bloodboil") { }
CreatureAI* GetAI(Creature* creature) const
{
return new boss_gurtogg_bloodboilAI (creature);
}
struct boss_gurtogg_bloodboilAI : public ScriptedAI
{
boss_gurtogg_bloodboilAI(Creature* creature) : ScriptedAI(creature)
{
instance = creature->GetInstanceScript();
}
InstanceScript* instance;
uint64 TargetGUID;
float TargetThreat;
uint32 BloodboilTimer;
uint32 BloodboilCount;
uint32 AcidGeyserTimer;
uint32 AcidicWoundTimer;
uint32 ArcingSmashTimer;
uint32 EnrageTimer;
uint32 FelAcidTimer;
uint32 EjectTimer;
uint32 BewilderingStrikeTimer;
uint32 PhaseChangeTimer;
bool Phase1;
void Reset()
{
if (instance)
instance->SetData(DATA_GURTOGGBLOODBOILEVENT, NOT_STARTED);
TargetGUID = 0;
TargetThreat = 0;
BloodboilTimer = 10000;
BloodboilCount = 0;
AcidGeyserTimer = 1000;
AcidicWoundTimer = 6000;
ArcingSmashTimer = 19000;
EnrageTimer = 600000;
FelAcidTimer = 25000;
EjectTimer = 10000;
BewilderingStrikeTimer = 15000;
PhaseChangeTimer = 60000;
Phase1 = true;
me->ApplySpellImmune(0, IMMUNITY_STATE, SPELL_AURA_MOD_TAUNT, false);
me->ApplySpellImmune(0, IMMUNITY_EFFECT, SPELL_EFFECT_ATTACK_ME, false);
}
void EnterCombat(Unit* /*who*/)
{
DoZoneInCombat();
Talk(SAY_AGGRO);
if (instance)
instance->SetData(DATA_GURTOGGBLOODBOILEVENT, IN_PROGRESS);
}
void KilledUnit(Unit* /*victim*/)
{
Talk(SAY_SLAY);
}
void JustDied(Unit* /*killer*/)
{
if (instance)
instance->SetData(DATA_GURTOGGBLOODBOILEVENT, DONE);
Talk(SAY_DEATH);
}
// Note: This seems like a very complicated fix. The fix needs to be handled by the core, as implementation of limited-target AoE spells are still not limited.
void CastBloodboil()
{
// Get the Threat List
std::list<HostileReference*> m_threatlist = me->getThreatManager().getThreatList();
if (m_threatlist.empty()) // He doesn't have anyone in his threatlist, useless to continue
return;
std::list<Unit*> targets;
std::list<HostileReference*>::const_iterator itr = m_threatlist.begin();
for (; itr!= m_threatlist.end(); ++itr) //store the threat list in a different container
{
Unit* target = Unit::GetUnit(*me, (*itr)->getUnitGuid());
//only on alive players
if (target && target->isAlive() && target->GetTypeId() == TYPEID_PLAYER)
targets.push_back(target);
}
//Sort the list of players
targets.sort(Trinity::ObjectDistanceOrderPred(me, false));
//Resize so we only get top 5
targets.resize(5);
//Aura each player in the targets list with Bloodboil. Aura code copied+pasted from Aura command in Level3.cpp
/*SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(SPELL_BLOODBOIL);
if (spellInfo)
{
for (std::list<Unit*>::const_iterator itr = targets.begin(); itr != targets.end(); ++itr)
{
Unit* target = *itr;
if (!target) return;
for (uint32 i = 0; i<3; ++i)
{
uint8 eff = spellInfo->Effect[i];
if (eff >= TOTAL_SPELL_EFFECTS)
continue;
Aura* Aur = new Aura(spellInfo, i, target, target, target);
target->AddAura(Aur);
}
}
}*/
}
void RevertThreatOnTarget(uint64 guid)
{
Unit* unit = NULL;
unit = Unit::GetUnit(*me, guid);
if (unit)
{
if (DoGetThreat(unit))
DoModifyThreatPercent(unit, -100);
if (TargetThreat)
me->AddThreat(unit, TargetThreat);
}
}
void UpdateAI(const uint32 diff)
{
if (!UpdateVictim())
return;
if (ArcingSmashTimer <= diff)
{
DoCast(me->getVictim(), SPELL_ARCING_SMASH);
ArcingSmashTimer = 10000;
} else ArcingSmashTimer -= diff;
if (FelAcidTimer <= diff)
{
DoCast(me->getVictim(), SPELL_FEL_ACID);
FelAcidTimer = 25000;
} else FelAcidTimer -= diff;
if (!me->HasAura(SPELL_BERSERK))
{
if (EnrageTimer <= diff)
{
DoCast(me, SPELL_BERSERK);
Talk(SAY_ENRAGE);
} else EnrageTimer -= diff;
}
if (Phase1)
{
if (BewilderingStrikeTimer <= diff)
{
DoCast(me->getVictim(), SPELL_BEWILDERING_STRIKE);
float mt_threat = DoGetThreat(me->getVictim());
if (Unit* target = SelectTarget(SELECT_TARGET_TOPAGGRO, 1))
me->AddThreat(target, mt_threat);
BewilderingStrikeTimer = 20000;
} else BewilderingStrikeTimer -= diff;
if (EjectTimer <= diff)
{
DoCast(me->getVictim(), SPELL_EJECT1);
DoModifyThreatPercent(me->getVictim(), -40);
EjectTimer = 15000;
} else EjectTimer -= diff;
if (AcidicWoundTimer <= diff)
{
DoCast(me->getVictim(), SPELL_ACIDIC_WOUND);
AcidicWoundTimer = 10000;
} else AcidicWoundTimer -= diff;
if (BloodboilTimer <= diff)
{
if (BloodboilCount < 5) // Only cast it five times.
{
//CastBloodboil(); // Causes issues on windows, so is commented out.
DoCast(me->getVictim(), SPELL_BLOODBOIL);
++BloodboilCount;
BloodboilTimer = 10000*BloodboilCount;
}
} else BloodboilTimer -= diff;
}
if (!Phase1)
{
if (AcidGeyserTimer <= diff)
{
DoCast(me->getVictim(), SPELL_ACID_GEYSER);
AcidGeyserTimer = 30000;
} else AcidGeyserTimer -= diff;
if (EjectTimer <= diff)
{
DoCast(me->getVictim(), SPELL_EJECT2);
EjectTimer = 15000;
} else EjectTimer -= diff;
}
if (PhaseChangeTimer <= diff)
{
if (Phase1)
{
Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0);
if (target && target->isAlive())
{
Phase1 = false;
TargetThreat = DoGetThreat(target);
TargetGUID = target->GetGUID();
target->CastSpell(me, SPELL_TAUNT_GURTOGG, true);
if (DoGetThreat(target))
DoModifyThreatPercent(target, -100);
me->AddThreat(target, 50000000.0f);
me->ApplySpellImmune(0, IMMUNITY_STATE, SPELL_AURA_MOD_TAUNT, true);
me->ApplySpellImmune(0, IMMUNITY_EFFECT, SPELL_EFFECT_ATTACK_ME, true);
// If VMaps are disabled, this spell can call the whole instance
DoCast(me, SPELL_INSIGNIFIGANCE, true);
DoCast(target, SPELL_FEL_RAGE_TARGET, true);
DoCast(target, SPELL_FEL_RAGE_2, true);
/* These spells do not work, comment them out for now.
DoCast(target, SPELL_FEL_RAGE_2, true);
DoCast(target, SPELL_FEL_RAGE_3, true);*/
//Cast this without triggered so that it appears in combat logs and shows visual.
DoCast(me, SPELL_FEL_RAGE_SELF);
Talk(SAY_SPECIAL);
AcidGeyserTimer = 1000;
PhaseChangeTimer = 30000;
}
} else // Encounter is a loop pretty much. Phase 1 -> Phase 2 -> Phase 1 -> Phase 2 till death or enrage
{
if (TargetGUID)
RevertThreatOnTarget(TargetGUID);
TargetGUID = 0;
Phase1 = true;
BloodboilTimer = 10000;
BloodboilCount = 0;
AcidicWoundTimer += 2000;
ArcingSmashTimer += 2000;
FelAcidTimer += 2000;
EjectTimer += 2000;
PhaseChangeTimer = 60000;
me->ApplySpellImmune(0, IMMUNITY_STATE, SPELL_AURA_MOD_TAUNT, false);
me->ApplySpellImmune(0, IMMUNITY_EFFECT, SPELL_EFFECT_ATTACK_ME, false);
}
} else PhaseChangeTimer -= diff;
DoMeleeAttackIfReady();
}
};
};
void AddSC_boss_gurtogg_bloodboil()
{
new boss_gurtogg_bloodboil();
}
| gpl-2.0 |
ProDataLab/veins-3a2-plus | src/base/modules/BaseMacLayer.cc | 10336 | /***************************************************************************
* file: BaseMacLayer.cc
*
* author: Daniel Willkomm
*
* copyright: (C) 2004 Telecommunication Networks Group (TKN) at
* Technische Universitaet Berlin, Germany.
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later
* version.
* For further information see file COPYING
* in the top level directory
***************************************************************************
* part of: framework implementation developed by tkn
* description: basic MAC layer class
* subclass to create your own MAC layer
**************************************************************************/
#include "BaseMacLayer.h"
#include <cassert>
#include <sstream>
#ifdef MIXIM_INET
#include <InterfaceTableAccess.h>
#endif
#include "Mapping.h"
#include "Signal_.h"
#include "MacToPhyInterface.h"
#include "MacToNetwControlInfo.h"
#include "NetwToMacControlInfo.h"
#include "MacToPhyControlInfo.h"
#include "AddressingInterface.h"
#include "base/connectionManager/ChannelAccess.h"
#include "FindModule.h"
#include "MacPkt_m.h"
using Veins::ChannelAccess;
Define_Module(BaseMacLayer);
/**
* First we have to initialize the module from which we derived ours,
* in this case BaseLayer.
*
**/
void BaseMacLayer::initialize(int stage)
{
BaseLayer::initialize(stage);
if(stage==0)
{
// get handle to phy layer
if ((phy = FindModule<MacToPhyInterface*>::findSubModule(getParentModule())) == NULL) {
error("Could not find a PHY module.");
}
headerLength = par("headerLength");
phyHeaderLength = phy->getPhyHeaderLength();
hasPar("coreDebug") ? coreDebug = par("coreDebug").boolValue() : coreDebug = false;
}
if (myMacAddr == LAddress::L2NULL) {
// see if there is an addressing module available
// otherwise use NIC modules id as MAC address
AddressingInterface* addrScheme = FindModule<AddressingInterface*>::findSubModule(findHost());
if(addrScheme) {
myMacAddr = addrScheme->myMacAddr(this);
} else {
const std::string addressString = par("address").stringValue();
if (addressString.empty() || addressString == "auto")
myMacAddr = LAddress::L2Type(getParentModule()->getId());
else
myMacAddr = LAddress::L2Type(addressString.c_str());
// use streaming operator for string conversion, this makes it more
// independent from the myMacAddr type
std::ostringstream oSS; oSS << myMacAddr;
par("address").setStringValue(oSS.str());
}
registerInterface();
}
}
void BaseMacLayer::registerInterface()
{
#ifdef MIXIM_INET
IInterfaceTable *ift = InterfaceTableAccess().getIfExists();
if (ift) {
cModule* nic = getParentModule();
InterfaceEntry *e = new InterfaceEntry();
// interface name: NIC module's name without special
// characters ([])
char *interfaceName = new char[strlen(nic->getFullName()) + 1];
char *d = interfaceName;
for (const char *s = nic->getFullName(); *s; s++)
if (isalnum(*s))
*d++ = *s;
*d = '\0';
e->setName(interfaceName);
delete [] interfaceName;
// this MAC address must be the same as the one in BaseMacLayer
e->setMACAddress(myMacAddr);
// generate interface identifier for IPv6
e->setInterfaceToken(myMacAddr.formInterfaceIdentifier());
// MTU on 802.11 = ?
e->setMtu(1500); // FIXME
// capabilities
e->setBroadcast(true);
e->setMulticast(true);
e->setPointToPoint(false);
// add
ift->addInterface(e, this);
}
#endif
}
/**
* Decapsulates the network packet from the received MacPkt
**/
cPacket* BaseMacLayer::decapsMsg(MacPkt* msg)
{
cPacket *m = msg->decapsulate();
setUpControlInfo(m, msg->getSrcAddr());
// delete the macPkt
delete msg;
coreEV << " message decapsulated " << endl;
return m;
}
/**
* Encapsulates the received NetwPkt into a MacPkt and set all needed
* header fields.
**/
MacPkt* BaseMacLayer::encapsMsg(cPacket *netwPkt)
{
MacPkt *pkt = new MacPkt(netwPkt->getName(), netwPkt->getKind());
pkt->setBitLength(headerLength);
// copy dest address from the Control Info attached to the network
// message by the network layer
cObject* cInfo = netwPkt->removeControlInfo();
coreEV <<"CInfo removed, mac addr="<< getUpperDestinationFromControlInfo(cInfo) << endl;
pkt->setDestAddr(getUpperDestinationFromControlInfo(cInfo));
//delete the control info
delete cInfo;
//set the src address to own mac address (nic module getId())
pkt->setSrcAddr(myMacAddr);
//encapsulate the network packet
pkt->encapsulate(netwPkt);
coreEV <<"pkt encapsulated\n";
return pkt;
}
/**
* Redefine this function if you want to process messages from upper
* layers before they are send to lower layers.
*
* To forward the message to lower layers after processing it please
* use @ref sendDown. It will take care of anything needed
**/
void BaseMacLayer::handleUpperMsg(cMessage *mac)
{
assert(dynamic_cast<cPacket*>(mac));
sendDown(encapsMsg(static_cast<cPacket*>(mac)));
}
/**
* This basic implementation just forwards all message that are
* broadcast (destAddr = L2BROADCAST) or destined for this node
* (destAddr = nic module getId()) to the network layer
*
* @sa sendUp
**/
void BaseMacLayer::handleLowerMsg(cMessage *msg)
{
MacPkt* mac = static_cast<MacPkt *>(msg);
LAddress::L2Type dest = mac->getDestAddr();
LAddress::L2Type src = mac->getSrcAddr();
//only foward to upper layer if message is for me or broadcast
if((dest == myMacAddr) || LAddress::isL2Broadcast(dest)) {
coreEV << "message with mac addr " << src
<< " for me (dest=" << dest
<< ") -> forward packet to upper layer\n";
sendUp(decapsMsg(mac));
}
else{
coreEV << "message with mac addr " << src
<< " not for me (dest=" << dest
<< ") -> delete (my MAC="<<myMacAddr<<")\n";
delete mac;
}
}
void BaseMacLayer::handleLowerControl(cMessage* msg)
{
switch (msg->getKind())
{
case MacToPhyInterface::TX_OVER:
msg->setKind(TX_OVER);
sendControlUp(msg);
break;
default:
EV << "BaseMacLayer does not handle control messages of this type (name was "<<msg->getName()<<")\n";
delete msg;
break;
}
}
Signal* BaseMacLayer::createSignal(simtime_t_cref start, simtime_t_cref length, double power, double bitrate)
{
simtime_t end = start + length;
//create signal with start at current simtime and passed length
Signal* s = new Signal(start, length);
//create and set tx power mapping
Mapping* txPowerMapping = createRectangleMapping(start, end, power);
s->setTransmissionPower(txPowerMapping);
//create and set bitrate mapping
Mapping* bitrateMapping = createConstantMapping(start, end, bitrate);
s->setBitrate(bitrateMapping);
return s;
}
Mapping* BaseMacLayer::createConstantMapping(simtime_t_cref start, simtime_t_cref end, Argument::mapped_type_cref value)
{
//create mapping over time
Mapping* m = MappingUtils::createMapping(Argument::MappedZero, DimensionSet::timeDomain, Mapping::LINEAR);
//set position Argument
Argument startPos(start);
//set mapping at position
m->setValue(startPos, value);
//set position Argument
Argument endPos(end);
//set mapping at position
m->setValue(endPos, value);
return m;
}
Mapping* BaseMacLayer::createRectangleMapping(simtime_t_cref start, simtime_t_cref end, Argument::mapped_type_cref value)
{
//create mapping over time
Mapping* m = MappingUtils::createMapping(DimensionSet::timeDomain, Mapping::LINEAR);
//set position Argument
Argument startPos(start);
//set discontinuity at position
MappingUtils::addDiscontinuity(m, startPos, Argument::MappedZero, MappingUtils::post(start), value);
//set position Argument
Argument endPos(end);
//set discontinuity at position
MappingUtils::addDiscontinuity(m, endPos, Argument::MappedZero, MappingUtils::pre(end), value);
return m;
}
ConstMapping* BaseMacLayer::createSingleFrequencyMapping(simtime_t_cref start,
simtime_t_cref end,
Argument::mapped_type_cref centerFreq,
Argument::mapped_type_cref halfBandwidth,
Argument::mapped_type_cref value)
{
Mapping* res = MappingUtils::createMapping(Argument::MappedZero, DimensionSet::timeFreqDomain, Mapping::LINEAR);
Argument pos(DimensionSet::timeFreqDomain);
pos.setArgValue(Dimension::frequency, centerFreq - halfBandwidth);
pos.setTime(start);
res->setValue(pos, value);
pos.setTime(end);
res->setValue(pos, value);
pos.setArgValue(Dimension::frequency, centerFreq + halfBandwidth);
res->setValue(pos, value);
pos.setTime(start);
res->setValue(pos, value);
return res;
}
BaseConnectionManager* BaseMacLayer::getConnectionManager() {
cModule* nic = getParentModule();
return ChannelAccess::getConnectionManager(nic);
}
const LAddress::L2Type& BaseMacLayer::getUpperDestinationFromControlInfo(const cObject *const pCtrlInfo) {
return NetwToMacControlInfo::getDestFromControlInfo(pCtrlInfo);
}
/**
* Attaches a "control info" (MacToNetw) structure (object) to the message pMsg.
*/
cObject *const BaseMacLayer::setUpControlInfo(cMessage *const pMsg, const LAddress::L2Type& pSrcAddr)
{
return MacToNetwControlInfo::setControlInfo(pMsg, pSrcAddr);
}
/**
* Attaches a "control info" (MacToPhy) structure (object) to the message pMsg.
*/
cObject *const BaseMacLayer::setDownControlInfo(cMessage *const pMsg, Signal *const pSignal)
{
return MacToPhyControlInfo::setControlInfo(pMsg, pSignal);
}
| gpl-2.0 |
qoswork/opennmszh | features/gwt-graph-resource-list/src/main/java/org/opennms/features/gwt/graph/resource/list/client/view/KscReportResourceChooser.java | 2384 | /*******************************************************************************
* This file is part of OpenNMS(R).
*
* Copyright (C) 2011-2012 The OpenNMS Group, Inc.
* OpenNMS(R) is Copyright (C) 1999-2012 The OpenNMS Group, Inc.
*
* OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc.
*
* OpenNMS(R) is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* OpenNMS(R) is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with OpenNMS(R). If not, see:
* http://www.gnu.org/licenses/
*
* For more information contact:
* OpenNMS(R) Licensing <[email protected]>
* http://www.opennms.org/
* http://www.opennms.com/
*******************************************************************************/
package org.opennms.features.gwt.graph.resource.list.client.view;
import org.opennms.features.gwt.graph.resource.list.client.presenter.KscGraphResourceListPresenter.ViewChoiceDisplay;
import com.google.gwt.event.dom.client.HasClickHandlers;
import com.google.gwt.user.client.ui.Button;
import com.google.gwt.user.client.ui.VerticalPanel;
import com.google.gwt.user.client.ui.Widget;
public class KscReportResourceChooser implements ViewChoiceDisplay {
VerticalPanel m_vertPanel;
private Button m_chooseBtn;
private Button m_viewBtn;
public KscReportResourceChooser() {
m_chooseBtn = new Button("Choose Child Resource");
m_viewBtn = new Button("View Child Resource");
m_vertPanel = new VerticalPanel();
m_vertPanel.setStyleName("onms-table-no-borders-margin");
m_vertPanel.add(m_viewBtn);
m_vertPanel.add(m_chooseBtn);
}
@Override
public HasClickHandlers getViewButton() {
return m_viewBtn;
}
@Override
public HasClickHandlers getChooseButton() {
return m_chooseBtn;
}
@Override
public Widget asWidget() {
return m_vertPanel.asWidget();
}
}
| gpl-2.0 |
BigBoss424/a-zplumbing | Magento-CE-2/vendor/magento/magento2-base/dev/tests/integration/framework/Magento/TestFramework/Bootstrap/MemoryFactory.php | 869 | <?php
/**
* Copyright © 2015 Magento. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\TestFramework\Bootstrap;
class MemoryFactory
{
/**
* @var \Magento\Framework\Shell
*/
private $shell;
/**
* @param \Magento\Framework\Shell $shell
*/
public function __construct(\Magento\Framework\Shell $shell)
{
$this->shell = $shell;
}
/**
* @param string $memUsageLimit
* @param string $memLeakLimit
* @return Memory
*/
public function create($memUsageLimit, $memLeakLimit)
{
return new \Magento\TestFramework\Bootstrap\Memory(
new \Magento\TestFramework\MemoryLimit(
$memUsageLimit,
$memLeakLimit,
new \Magento\TestFramework\Helper\Memory($this->shell)
)
);
}
}
| gpl-3.0 |
HossainKhademian/Studio3 | plugins/com.aptana.ui.io/src/com/aptana/ide/ui/io/navigator/actions/NewExternalFileWizard.java | 5548 | /**
* Aptana Studio
* Copyright (c) 2005-2011 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the GNU Public License (GPL) v3 (with exceptions).
* Please see the license.html included with this distribution for details.
* Any modifications to this file must keep this entire header intact.
*/
package com.aptana.ide.ui.io.navigator.actions;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.StringReader;
import org.eclipse.core.filesystem.EFS;
import org.eclipse.core.filesystem.IFileStore;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Path;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.wizard.Wizard;
import org.jruby.embed.io.ReaderInputStream;
import com.aptana.core.logging.IdeLog;
import com.aptana.core.util.IOUtil;
import com.aptana.editor.common.internal.scripting.NewFileWizard;
import com.aptana.editor.common.internal.scripting.TemplateSelectionPage;
import com.aptana.ide.ui.io.IOUIPlugin;
import com.aptana.ide.ui.io.Utils;
import com.aptana.scripting.model.TemplateElement;
import com.aptana.ui.util.UIUtils;
public class NewExternalFileWizard extends Wizard
{
protected static final String TEMPLATE_PAGE_NAME = "templatePage";//$NON-NLS-1$
protected static final String MAIN_PAGE_NAME = "mainPage";//$NON-NLS-1$
private String initialFilename;
private InputStream initialContent;
private IAdaptable selectedElement;
private TemplateElement template;
private WizardNewExternalFilePage mainPage;
public NewExternalFileWizard(String initialName, InputStream initialContent, IAdaptable selectedElement)
{
this(initialName, initialContent, selectedElement, null);
}
public NewExternalFileWizard(String initialName, InputStream initialContent, IAdaptable selectedElement,
TemplateElement template)
{
initialFilename = initialName;
this.initialContent = initialContent;
this.selectedElement = selectedElement;
this.template = template;
}
@Override
public void addPages()
{
addPage(mainPage = new WizardNewExternalFilePage(MAIN_PAGE_NAME, initialFilename, template == null));
mainPage.setTitle(Messages.NewExternalFileWizard_Title);
mainPage.setDescription(Messages.NewExternalFileWizard_Description);
if (initialContent == null && template == null)
{
addPage(new TemplateSelectionPage(TEMPLATE_PAGE_NAME));
}
}
@Override
public boolean canFinish()
{
if (getContainer().getCurrentPage() == mainPage)
{
if (mainPage.isPageComplete())
{
return true;
}
}
return super.canFinish();
}
@Override
public boolean performFinish()
{
final IFileStore parentStore = getSelectedDirectory();
final IFileStore newFile = parentStore.getChild(mainPage.getFileName());
if (Utils.exists(newFile))
{
if (!MessageDialog.openConfirm(getShell(), Messages.NewFileAction_Confirm_Title,
Messages.NewFileAction_Confirm_Message))
{
return false;
}
}
final InputStream in = getInitialContents(Path.fromOSString(newFile.toString()));
// run the file creation in a job
Job job = new Job(Messages.NewFileAction_JobTitle)
{
@Override
protected IStatus run(IProgressMonitor monitor)
{
try
{
OutputStream out = newFile.openOutputStream(EFS.NONE, monitor);
if (in != null)
{
// creates the initial contents
try
{
IOUtil.pipe(in, out);
}
catch (IOException e)
{
IdeLog.logError(IOUIPlugin.getDefault(), e);
}
finally
{
try
{
in.close();
}
catch (IOException e)
{
IdeLog.logError(IOUIPlugin.getDefault(), e);
}
}
}
try
{
out.close();
}
catch (IOException e)
{
IdeLog.logError(IOUIPlugin.getDefault(), e);
}
// opens it in the editor
EditorUtils.openFileInEditor(newFile, null);
// refreshes the parent folder
final IFileStore fileStore = Utils.getFileStore(selectedElement);
boolean selectionIsDirectory = Utils.isDirectory(selectedElement);
if (selectionIsDirectory)
{
IOUIPlugin.refreshNavigatorView(selectedElement);
}
else
{
IOUIPlugin.refreshNavigatorView(fileStore.getParent());
}
}
catch (CoreException e)
{
showError(e);
}
return Status.OK_STATUS;
}
};
job.setUser(true);
job.schedule();
return true;
}
private IFileStore getSelectedDirectory()
{
IFileStore fileStore = Utils.getFileStore(selectedElement);
boolean selectionIsDirectory = Utils.isDirectory(selectedElement);
if (!selectionIsDirectory && fileStore.getParent() != null)
{
return fileStore.getParent();
}
return fileStore;
}
private InputStream getInitialContents(IPath path)
{
if (initialContent != null)
{
return initialContent;
}
if (template != null)
{
String templateContent = NewFileWizard.getTemplateContent(template, path);
if (templateContent != null)
{
return new ReaderInputStream(new StringReader(templateContent), IOUtil.UTF_8);
}
}
return mainPage.getInitialContents();
}
private void showError(Exception exception)
{
UIUtils.showErrorMessage(exception.getLocalizedMessage(), exception);
}
}
| gpl-3.0 |
kylethayer/bioladder | wiki/tests/phpunit/languages/classes/LanguageTrTest.php | 1714 | <?php
/**
* @author Antoine Musso
* @copyright Copyright © 2011, Antoine Musso
* @file
*/
/**
* @covers LanguageTr
*/
class LanguageTrTest extends LanguageClassesTestCase {
/**
* See T30040
* Credits to irc://irc.freenode.net/wikipedia-tr users:
* - berm
* - []LuCkY[]
* - Emperyan
* @see https://en.wikipedia.org/wiki/Dotted_and_dotless_I
* @dataProvider provideDottedAndDotlessI
* @covers Language::ucfirst
* @covers Language::lcfirst
*/
public function testDottedAndDotlessI( $func, $input, $inputCase, $expected ) {
if ( $func == 'ucfirst' ) {
$res = $this->getLang()->ucfirst( $input );
} elseif ( $func == 'lcfirst' ) {
$res = $this->getLang()->lcfirst( $input );
} else {
throw new MWException( __METHOD__ . " given an invalid function name '$func'" );
}
$msg = "Converting $inputCase case '$input' with $func should give '$expected'";
$this->assertEquals( $expected, $res, $msg );
}
public static function provideDottedAndDotlessI() {
return [
# function, input, input case, expected
# Case changed:
[ 'ucfirst', 'ı', 'lower', 'I' ],
[ 'ucfirst', 'i', 'lower', 'İ' ],
[ 'lcfirst', 'I', 'upper', 'ı' ],
[ 'lcfirst', 'İ', 'upper', 'i' ],
# Already using the correct case
[ 'ucfirst', 'I', 'upper', 'I' ],
[ 'ucfirst', 'İ', 'upper', 'İ' ],
[ 'lcfirst', 'ı', 'lower', 'ı' ],
[ 'lcfirst', 'i', 'lower', 'i' ],
# A real example taken from T30040 using
# https://tr.wikipedia.org/wiki/%C4%B0Phone
[ 'lcfirst', 'iPhone', 'lower', 'iPhone' ],
# next case is valid in Turkish but are different words if we
# consider IPhone is English!
[ 'lcfirst', 'IPhone', 'upper', 'ıPhone' ],
];
}
}
| gpl-3.0 |
syslover33/ctank | java/android-sdk-linux_r24.4.1_src/sources/android-23/com/android/multidexlegacytestapp/manymethods/Big051.java | 53476 | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.android.multidexlegacytestapp.manymethods;
public class Big051 {
public int get0() {
return 0;
}
public int get1() {
return 1;
}
public int get2() {
return 2;
}
public int get3() {
return 3;
}
public int get4() {
return 4;
}
public int get5() {
return 5;
}
public int get6() {
return 6;
}
public int get7() {
return 7;
}
public int get8() {
return 8;
}
public int get9() {
return 9;
}
public int get10() {
return 10;
}
public int get11() {
return 11;
}
public int get12() {
return 12;
}
public int get13() {
return 13;
}
public int get14() {
return 14;
}
public int get15() {
return 15;
}
public int get16() {
return 16;
}
public int get17() {
return 17;
}
public int get18() {
return 18;
}
public int get19() {
return 19;
}
public int get20() {
return 20;
}
public int get21() {
return 21;
}
public int get22() {
return 22;
}
public int get23() {
return 23;
}
public int get24() {
return 24;
}
public int get25() {
return 25;
}
public int get26() {
return 26;
}
public int get27() {
return 27;
}
public int get28() {
return 28;
}
public int get29() {
return 29;
}
public int get30() {
return 30;
}
public int get31() {
return 31;
}
public int get32() {
return 32;
}
public int get33() {
return 33;
}
public int get34() {
return 34;
}
public int get35() {
return 35;
}
public int get36() {
return 36;
}
public int get37() {
return 37;
}
public int get38() {
return 38;
}
public int get39() {
return 39;
}
public int get40() {
return 40;
}
public int get41() {
return 41;
}
public int get42() {
return 42;
}
public int get43() {
return 43;
}
public int get44() {
return 44;
}
public int get45() {
return 45;
}
public int get46() {
return 46;
}
public int get47() {
return 47;
}
public int get48() {
return 48;
}
public int get49() {
return 49;
}
public int get50() {
return 50;
}
public int get51() {
return 51;
}
public int get52() {
return 52;
}
public int get53() {
return 53;
}
public int get54() {
return 54;
}
public int get55() {
return 55;
}
public int get56() {
return 56;
}
public int get57() {
return 57;
}
public int get58() {
return 58;
}
public int get59() {
return 59;
}
public int get60() {
return 60;
}
public int get61() {
return 61;
}
public int get62() {
return 62;
}
public int get63() {
return 63;
}
public int get64() {
return 64;
}
public int get65() {
return 65;
}
public int get66() {
return 66;
}
public int get67() {
return 67;
}
public int get68() {
return 68;
}
public int get69() {
return 69;
}
public int get70() {
return 70;
}
public int get71() {
return 71;
}
public int get72() {
return 72;
}
public int get73() {
return 73;
}
public int get74() {
return 74;
}
public int get75() {
return 75;
}
public int get76() {
return 76;
}
public int get77() {
return 77;
}
public int get78() {
return 78;
}
public int get79() {
return 79;
}
public int get80() {
return 80;
}
public int get81() {
return 81;
}
public int get82() {
return 82;
}
public int get83() {
return 83;
}
public int get84() {
return 84;
}
public int get85() {
return 85;
}
public int get86() {
return 86;
}
public int get87() {
return 87;
}
public int get88() {
return 88;
}
public int get89() {
return 89;
}
public int get90() {
return 90;
}
public int get91() {
return 91;
}
public int get92() {
return 92;
}
public int get93() {
return 93;
}
public int get94() {
return 94;
}
public int get95() {
return 95;
}
public int get96() {
return 96;
}
public int get97() {
return 97;
}
public int get98() {
return 98;
}
public int get99() {
return 99;
}
public int get100() {
return 100;
}
public int get101() {
return 101;
}
public int get102() {
return 102;
}
public int get103() {
return 103;
}
public int get104() {
return 104;
}
public int get105() {
return 105;
}
public int get106() {
return 106;
}
public int get107() {
return 107;
}
public int get108() {
return 108;
}
public int get109() {
return 109;
}
public int get110() {
return 110;
}
public int get111() {
return 111;
}
public int get112() {
return 112;
}
public int get113() {
return 113;
}
public int get114() {
return 114;
}
public int get115() {
return 115;
}
public int get116() {
return 116;
}
public int get117() {
return 117;
}
public int get118() {
return 118;
}
public int get119() {
return 119;
}
public int get120() {
return 120;
}
public int get121() {
return 121;
}
public int get122() {
return 122;
}
public int get123() {
return 123;
}
public int get124() {
return 124;
}
public int get125() {
return 125;
}
public int get126() {
return 126;
}
public int get127() {
return 127;
}
public int get128() {
return 128;
}
public int get129() {
return 129;
}
public int get130() {
return 130;
}
public int get131() {
return 131;
}
public int get132() {
return 132;
}
public int get133() {
return 133;
}
public int get134() {
return 134;
}
public int get135() {
return 135;
}
public int get136() {
return 136;
}
public int get137() {
return 137;
}
public int get138() {
return 138;
}
public int get139() {
return 139;
}
public int get140() {
return 140;
}
public int get141() {
return 141;
}
public int get142() {
return 142;
}
public int get143() {
return 143;
}
public int get144() {
return 144;
}
public int get145() {
return 145;
}
public int get146() {
return 146;
}
public int get147() {
return 147;
}
public int get148() {
return 148;
}
public int get149() {
return 149;
}
public int get150() {
return 150;
}
public int get151() {
return 151;
}
public int get152() {
return 152;
}
public int get153() {
return 153;
}
public int get154() {
return 154;
}
public int get155() {
return 155;
}
public int get156() {
return 156;
}
public int get157() {
return 157;
}
public int get158() {
return 158;
}
public int get159() {
return 159;
}
public int get160() {
return 160;
}
public int get161() {
return 161;
}
public int get162() {
return 162;
}
public int get163() {
return 163;
}
public int get164() {
return 164;
}
public int get165() {
return 165;
}
public int get166() {
return 166;
}
public int get167() {
return 167;
}
public int get168() {
return 168;
}
public int get169() {
return 169;
}
public int get170() {
return 170;
}
public int get171() {
return 171;
}
public int get172() {
return 172;
}
public int get173() {
return 173;
}
public int get174() {
return 174;
}
public int get175() {
return 175;
}
public int get176() {
return 176;
}
public int get177() {
return 177;
}
public int get178() {
return 178;
}
public int get179() {
return 179;
}
public int get180() {
return 180;
}
public int get181() {
return 181;
}
public int get182() {
return 182;
}
public int get183() {
return 183;
}
public int get184() {
return 184;
}
public int get185() {
return 185;
}
public int get186() {
return 186;
}
public int get187() {
return 187;
}
public int get188() {
return 188;
}
public int get189() {
return 189;
}
public int get190() {
return 190;
}
public int get191() {
return 191;
}
public int get192() {
return 192;
}
public int get193() {
return 193;
}
public int get194() {
return 194;
}
public int get195() {
return 195;
}
public int get196() {
return 196;
}
public int get197() {
return 197;
}
public int get198() {
return 198;
}
public int get199() {
return 199;
}
public int get200() {
return 200;
}
public int get201() {
return 201;
}
public int get202() {
return 202;
}
public int get203() {
return 203;
}
public int get204() {
return 204;
}
public int get205() {
return 205;
}
public int get206() {
return 206;
}
public int get207() {
return 207;
}
public int get208() {
return 208;
}
public int get209() {
return 209;
}
public int get210() {
return 210;
}
public int get211() {
return 211;
}
public int get212() {
return 212;
}
public int get213() {
return 213;
}
public int get214() {
return 214;
}
public int get215() {
return 215;
}
public int get216() {
return 216;
}
public int get217() {
return 217;
}
public int get218() {
return 218;
}
public int get219() {
return 219;
}
public int get220() {
return 220;
}
public int get221() {
return 221;
}
public int get222() {
return 222;
}
public int get223() {
return 223;
}
public int get224() {
return 224;
}
public int get225() {
return 225;
}
public int get226() {
return 226;
}
public int get227() {
return 227;
}
public int get228() {
return 228;
}
public int get229() {
return 229;
}
public int get230() {
return 230;
}
public int get231() {
return 231;
}
public int get232() {
return 232;
}
public int get233() {
return 233;
}
public int get234() {
return 234;
}
public int get235() {
return 235;
}
public int get236() {
return 236;
}
public int get237() {
return 237;
}
public int get238() {
return 238;
}
public int get239() {
return 239;
}
public int get240() {
return 240;
}
public int get241() {
return 241;
}
public int get242() {
return 242;
}
public int get243() {
return 243;
}
public int get244() {
return 244;
}
public int get245() {
return 245;
}
public int get246() {
return 246;
}
public int get247() {
return 247;
}
public int get248() {
return 248;
}
public int get249() {
return 249;
}
public int get250() {
return 250;
}
public int get251() {
return 251;
}
public int get252() {
return 252;
}
public int get253() {
return 253;
}
public int get254() {
return 254;
}
public int get255() {
return 255;
}
public int get256() {
return 256;
}
public int get257() {
return 257;
}
public int get258() {
return 258;
}
public int get259() {
return 259;
}
public int get260() {
return 260;
}
public int get261() {
return 261;
}
public int get262() {
return 262;
}
public int get263() {
return 263;
}
public int get264() {
return 264;
}
public int get265() {
return 265;
}
public int get266() {
return 266;
}
public int get267() {
return 267;
}
public int get268() {
return 268;
}
public int get269() {
return 269;
}
public int get270() {
return 270;
}
public int get271() {
return 271;
}
public int get272() {
return 272;
}
public int get273() {
return 273;
}
public int get274() {
return 274;
}
public int get275() {
return 275;
}
public int get276() {
return 276;
}
public int get277() {
return 277;
}
public int get278() {
return 278;
}
public int get279() {
return 279;
}
public int get280() {
return 280;
}
public int get281() {
return 281;
}
public int get282() {
return 282;
}
public int get283() {
return 283;
}
public int get284() {
return 284;
}
public int get285() {
return 285;
}
public int get286() {
return 286;
}
public int get287() {
return 287;
}
public int get288() {
return 288;
}
public int get289() {
return 289;
}
public int get290() {
return 290;
}
public int get291() {
return 291;
}
public int get292() {
return 292;
}
public int get293() {
return 293;
}
public int get294() {
return 294;
}
public int get295() {
return 295;
}
public int get296() {
return 296;
}
public int get297() {
return 297;
}
public int get298() {
return 298;
}
public int get299() {
return 299;
}
public int get300() {
return 300;
}
public int get301() {
return 301;
}
public int get302() {
return 302;
}
public int get303() {
return 303;
}
public int get304() {
return 304;
}
public int get305() {
return 305;
}
public int get306() {
return 306;
}
public int get307() {
return 307;
}
public int get308() {
return 308;
}
public int get309() {
return 309;
}
public int get310() {
return 310;
}
public int get311() {
return 311;
}
public int get312() {
return 312;
}
public int get313() {
return 313;
}
public int get314() {
return 314;
}
public int get315() {
return 315;
}
public int get316() {
return 316;
}
public int get317() {
return 317;
}
public int get318() {
return 318;
}
public int get319() {
return 319;
}
public int get320() {
return 320;
}
public int get321() {
return 321;
}
public int get322() {
return 322;
}
public int get323() {
return 323;
}
public int get324() {
return 324;
}
public int get325() {
return 325;
}
public int get326() {
return 326;
}
public int get327() {
return 327;
}
public int get328() {
return 328;
}
public int get329() {
return 329;
}
public int get330() {
return 330;
}
public int get331() {
return 331;
}
public int get332() {
return 332;
}
public int get333() {
return 333;
}
public int get334() {
return 334;
}
public int get335() {
return 335;
}
public int get336() {
return 336;
}
public int get337() {
return 337;
}
public int get338() {
return 338;
}
public int get339() {
return 339;
}
public int get340() {
return 340;
}
public int get341() {
return 341;
}
public int get342() {
return 342;
}
public int get343() {
return 343;
}
public int get344() {
return 344;
}
public int get345() {
return 345;
}
public int get346() {
return 346;
}
public int get347() {
return 347;
}
public int get348() {
return 348;
}
public int get349() {
return 349;
}
public int get350() {
return 350;
}
public int get351() {
return 351;
}
public int get352() {
return 352;
}
public int get353() {
return 353;
}
public int get354() {
return 354;
}
public int get355() {
return 355;
}
public int get356() {
return 356;
}
public int get357() {
return 357;
}
public int get358() {
return 358;
}
public int get359() {
return 359;
}
public int get360() {
return 360;
}
public int get361() {
return 361;
}
public int get362() {
return 362;
}
public int get363() {
return 363;
}
public int get364() {
return 364;
}
public int get365() {
return 365;
}
public int get366() {
return 366;
}
public int get367() {
return 367;
}
public int get368() {
return 368;
}
public int get369() {
return 369;
}
public int get370() {
return 370;
}
public int get371() {
return 371;
}
public int get372() {
return 372;
}
public int get373() {
return 373;
}
public int get374() {
return 374;
}
public int get375() {
return 375;
}
public int get376() {
return 376;
}
public int get377() {
return 377;
}
public int get378() {
return 378;
}
public int get379() {
return 379;
}
public int get380() {
return 380;
}
public int get381() {
return 381;
}
public int get382() {
return 382;
}
public int get383() {
return 383;
}
public int get384() {
return 384;
}
public int get385() {
return 385;
}
public int get386() {
return 386;
}
public int get387() {
return 387;
}
public int get388() {
return 388;
}
public int get389() {
return 389;
}
public int get390() {
return 390;
}
public int get391() {
return 391;
}
public int get392() {
return 392;
}
public int get393() {
return 393;
}
public int get394() {
return 394;
}
public int get395() {
return 395;
}
public int get396() {
return 396;
}
public int get397() {
return 397;
}
public int get398() {
return 398;
}
public int get399() {
return 399;
}
public int get400() {
return 400;
}
public int get401() {
return 401;
}
public int get402() {
return 402;
}
public int get403() {
return 403;
}
public int get404() {
return 404;
}
public int get405() {
return 405;
}
public int get406() {
return 406;
}
public int get407() {
return 407;
}
public int get408() {
return 408;
}
public int get409() {
return 409;
}
public int get410() {
return 410;
}
public int get411() {
return 411;
}
public int get412() {
return 412;
}
public int get413() {
return 413;
}
public int get414() {
return 414;
}
public int get415() {
return 415;
}
public int get416() {
return 416;
}
public int get417() {
return 417;
}
public int get418() {
return 418;
}
public int get419() {
return 419;
}
public int get420() {
return 420;
}
public int get421() {
return 421;
}
public int get422() {
return 422;
}
public int get423() {
return 423;
}
public int get424() {
return 424;
}
public int get425() {
return 425;
}
public int get426() {
return 426;
}
public int get427() {
return 427;
}
public int get428() {
return 428;
}
public int get429() {
return 429;
}
public int get430() {
return 430;
}
public int get431() {
return 431;
}
public int get432() {
return 432;
}
public int get433() {
return 433;
}
public int get434() {
return 434;
}
public int get435() {
return 435;
}
public int get436() {
return 436;
}
public int get437() {
return 437;
}
public int get438() {
return 438;
}
public int get439() {
return 439;
}
public int get440() {
return 440;
}
public int get441() {
return 441;
}
public int get442() {
return 442;
}
public int get443() {
return 443;
}
public int get444() {
return 444;
}
public int get445() {
return 445;
}
public int get446() {
return 446;
}
public int get447() {
return 447;
}
public int get448() {
return 448;
}
public int get449() {
return 449;
}
public int get450() {
return 450;
}
public int get451() {
return 451;
}
public int get452() {
return 452;
}
public int get453() {
return 453;
}
public int get454() {
return 454;
}
public int get455() {
return 455;
}
public int get456() {
return 456;
}
public int get457() {
return 457;
}
public int get458() {
return 458;
}
public int get459() {
return 459;
}
public int get460() {
return 460;
}
public int get461() {
return 461;
}
public int get462() {
return 462;
}
public int get463() {
return 463;
}
public int get464() {
return 464;
}
public int get465() {
return 465;
}
public int get466() {
return 466;
}
public int get467() {
return 467;
}
public int get468() {
return 468;
}
public int get469() {
return 469;
}
public int get470() {
return 470;
}
public int get471() {
return 471;
}
public int get472() {
return 472;
}
public int get473() {
return 473;
}
public int get474() {
return 474;
}
public int get475() {
return 475;
}
public int get476() {
return 476;
}
public int get477() {
return 477;
}
public int get478() {
return 478;
}
public int get479() {
return 479;
}
public int get480() {
return 480;
}
public int get481() {
return 481;
}
public int get482() {
return 482;
}
public int get483() {
return 483;
}
public int get484() {
return 484;
}
public int get485() {
return 485;
}
public int get486() {
return 486;
}
public int get487() {
return 487;
}
public int get488() {
return 488;
}
public int get489() {
return 489;
}
public int get490() {
return 490;
}
public int get491() {
return 491;
}
public int get492() {
return 492;
}
public int get493() {
return 493;
}
public int get494() {
return 494;
}
public int get495() {
return 495;
}
public int get496() {
return 496;
}
public int get497() {
return 497;
}
public int get498() {
return 498;
}
public int get499() {
return 499;
}
public int get500() {
return 500;
}
public int get501() {
return 501;
}
public int get502() {
return 502;
}
public int get503() {
return 503;
}
public int get504() {
return 504;
}
public int get505() {
return 505;
}
public int get506() {
return 506;
}
public int get507() {
return 507;
}
public int get508() {
return 508;
}
public int get509() {
return 509;
}
public int get510() {
return 510;
}
public int get511() {
return 511;
}
public int get512() {
return 512;
}
public int get513() {
return 513;
}
public int get514() {
return 514;
}
public int get515() {
return 515;
}
public int get516() {
return 516;
}
public int get517() {
return 517;
}
public int get518() {
return 518;
}
public int get519() {
return 519;
}
public int get520() {
return 520;
}
public int get521() {
return 521;
}
public int get522() {
return 522;
}
public int get523() {
return 523;
}
public int get524() {
return 524;
}
public int get525() {
return 525;
}
public int get526() {
return 526;
}
public int get527() {
return 527;
}
public int get528() {
return 528;
}
public int get529() {
return 529;
}
public int get530() {
return 530;
}
public int get531() {
return 531;
}
public int get532() {
return 532;
}
public int get533() {
return 533;
}
public int get534() {
return 534;
}
public int get535() {
return 535;
}
public int get536() {
return 536;
}
public int get537() {
return 537;
}
public int get538() {
return 538;
}
public int get539() {
return 539;
}
public int get540() {
return 540;
}
public int get541() {
return 541;
}
public int get542() {
return 542;
}
public int get543() {
return 543;
}
public int get544() {
return 544;
}
public int get545() {
return 545;
}
public int get546() {
return 546;
}
public int get547() {
return 547;
}
public int get548() {
return 548;
}
public int get549() {
return 549;
}
public int get550() {
return 550;
}
public int get551() {
return 551;
}
public int get552() {
return 552;
}
public int get553() {
return 553;
}
public int get554() {
return 554;
}
public int get555() {
return 555;
}
public int get556() {
return 556;
}
public int get557() {
return 557;
}
public int get558() {
return 558;
}
public int get559() {
return 559;
}
public int get560() {
return 560;
}
public int get561() {
return 561;
}
public int get562() {
return 562;
}
public int get563() {
return 563;
}
public int get564() {
return 564;
}
public int get565() {
return 565;
}
public int get566() {
return 566;
}
public int get567() {
return 567;
}
public int get568() {
return 568;
}
public int get569() {
return 569;
}
public int get570() {
return 570;
}
public int get571() {
return 571;
}
public int get572() {
return 572;
}
public int get573() {
return 573;
}
public int get574() {
return 574;
}
public int get575() {
return 575;
}
public int get576() {
return 576;
}
public int get577() {
return 577;
}
public int get578() {
return 578;
}
public int get579() {
return 579;
}
public int get580() {
return 580;
}
public int get581() {
return 581;
}
public int get582() {
return 582;
}
public int get583() {
return 583;
}
public int get584() {
return 584;
}
public int get585() {
return 585;
}
public int get586() {
return 586;
}
public int get587() {
return 587;
}
public int get588() {
return 588;
}
public int get589() {
return 589;
}
public int get590() {
return 590;
}
public int get591() {
return 591;
}
public int get592() {
return 592;
}
public int get593() {
return 593;
}
public int get594() {
return 594;
}
public int get595() {
return 595;
}
public int get596() {
return 596;
}
public int get597() {
return 597;
}
public int get598() {
return 598;
}
public int get599() {
return 599;
}
public int get600() {
return 600;
}
public int get601() {
return 601;
}
public int get602() {
return 602;
}
public int get603() {
return 603;
}
public int get604() {
return 604;
}
public int get605() {
return 605;
}
public int get606() {
return 606;
}
public int get607() {
return 607;
}
public int get608() {
return 608;
}
public int get609() {
return 609;
}
public int get610() {
return 610;
}
public int get611() {
return 611;
}
public int get612() {
return 612;
}
public int get613() {
return 613;
}
public int get614() {
return 614;
}
public int get615() {
return 615;
}
public int get616() {
return 616;
}
public int get617() {
return 617;
}
public int get618() {
return 618;
}
public int get619() {
return 619;
}
public int get620() {
return 620;
}
public int get621() {
return 621;
}
public int get622() {
return 622;
}
public int get623() {
return 623;
}
public int get624() {
return 624;
}
public int get625() {
return 625;
}
public int get626() {
return 626;
}
public int get627() {
return 627;
}
public int get628() {
return 628;
}
public int get629() {
return 629;
}
public int get630() {
return 630;
}
public int get631() {
return 631;
}
public int get632() {
return 632;
}
public int get633() {
return 633;
}
public int get634() {
return 634;
}
public int get635() {
return 635;
}
public int get636() {
return 636;
}
public int get637() {
return 637;
}
public int get638() {
return 638;
}
public int get639() {
return 639;
}
public int get640() {
return 640;
}
public int get641() {
return 641;
}
public int get642() {
return 642;
}
public int get643() {
return 643;
}
public int get644() {
return 644;
}
public int get645() {
return 645;
}
public int get646() {
return 646;
}
public int get647() {
return 647;
}
public int get648() {
return 648;
}
public int get649() {
return 649;
}
public int get650() {
return 650;
}
public int get651() {
return 651;
}
public int get652() {
return 652;
}
public int get653() {
return 653;
}
public int get654() {
return 654;
}
public int get655() {
return 655;
}
public int get656() {
return 656;
}
public int get657() {
return 657;
}
public int get658() {
return 658;
}
public int get659() {
return 659;
}
public int get660() {
return 660;
}
public int get661() {
return 661;
}
public int get662() {
return 662;
}
public int get663() {
return 663;
}
public int get664() {
return 664;
}
public int get665() {
return 665;
}
public int get666() {
return 666;
}
public int get667() {
return 667;
}
public int get668() {
return 668;
}
public int get669() {
return 669;
}
public int get670() {
return 670;
}
public int get671() {
return 671;
}
public int get672() {
return 672;
}
public int get673() {
return 673;
}
public int get674() {
return 674;
}
public int get675() {
return 675;
}
public int get676() {
return 676;
}
public int get677() {
return 677;
}
public int get678() {
return 678;
}
public int get679() {
return 679;
}
public int get680() {
return 680;
}
public int get681() {
return 681;
}
public int get682() {
return 682;
}
public int get683() {
return 683;
}
public int get684() {
return 684;
}
public int get685() {
return 685;
}
public int get686() {
return 686;
}
public int get687() {
return 687;
}
public int get688() {
return 688;
}
public int get689() {
return 689;
}
public int get690() {
return 690;
}
public int get691() {
return 691;
}
public int get692() {
return 692;
}
public int get693() {
return 693;
}
public int get694() {
return 694;
}
public int get695() {
return 695;
}
public int get696() {
return 696;
}
public int get697() {
return 697;
}
public int get698() {
return 698;
}
public int get699() {
return 699;
}
public int get700() {
return 700;
}
public int get701() {
return 701;
}
public int get702() {
return 702;
}
public int get703() {
return 703;
}
public int get704() {
return 704;
}
public int get705() {
return 705;
}
public int get706() {
return 706;
}
public int get707() {
return 707;
}
public int get708() {
return 708;
}
public int get709() {
return 709;
}
public int get710() {
return 710;
}
public int get711() {
return 711;
}
public int get712() {
return 712;
}
public int get713() {
return 713;
}
public int get714() {
return 714;
}
public int get715() {
return 715;
}
public int get716() {
return 716;
}
public int get717() {
return 717;
}
public int get718() {
return 718;
}
public int get719() {
return 719;
}
public int get720() {
return 720;
}
public int get721() {
return 721;
}
public int get722() {
return 722;
}
public int get723() {
return 723;
}
public int get724() {
return 724;
}
public int get725() {
return 725;
}
public int get726() {
return 726;
}
public int get727() {
return 727;
}
public int get728() {
return 728;
}
public int get729() {
return 729;
}
public int get730() {
return 730;
}
public int get731() {
return 731;
}
public int get732() {
return 732;
}
public int get733() {
return 733;
}
public int get734() {
return 734;
}
public int get735() {
return 735;
}
public int get736() {
return 736;
}
public int get737() {
return 737;
}
public int get738() {
return 738;
}
public int get739() {
return 739;
}
public int get740() {
return 740;
}
public int get741() {
return 741;
}
public int get742() {
return 742;
}
public int get743() {
return 743;
}
public int get744() {
return 744;
}
public int get745() {
return 745;
}
public int get746() {
return 746;
}
public int get747() {
return 747;
}
public int get748() {
return 748;
}
public int get749() {
return 749;
}
public int get750() {
return 750;
}
public int get751() {
return 751;
}
public int get752() {
return 752;
}
public int get753() {
return 753;
}
public int get754() {
return 754;
}
public int get755() {
return 755;
}
public int get756() {
return 756;
}
public int get757() {
return 757;
}
public int get758() {
return 758;
}
public int get759() {
return 759;
}
public int get760() {
return 760;
}
public int get761() {
return 761;
}
public int get762() {
return 762;
}
public int get763() {
return 763;
}
public int get764() {
return 764;
}
public int get765() {
return 765;
}
public int get766() {
return 766;
}
public int get767() {
return 767;
}
public int get768() {
return 768;
}
public int get769() {
return 769;
}
public int get770() {
return 770;
}
public int get771() {
return 771;
}
public int get772() {
return 772;
}
public int get773() {
return 773;
}
public int get774() {
return 774;
}
public int get775() {
return 775;
}
public int get776() {
return 776;
}
public int get777() {
return 777;
}
public int get778() {
return 778;
}
public int get779() {
return 779;
}
public int get780() {
return 780;
}
public int get781() {
return 781;
}
public int get782() {
return 782;
}
public int get783() {
return 783;
}
public int get784() {
return 784;
}
public int get785() {
return 785;
}
public int get786() {
return 786;
}
public int get787() {
return 787;
}
public int get788() {
return 788;
}
public int get789() {
return 789;
}
public int get790() {
return 790;
}
public int get791() {
return 791;
}
public int get792() {
return 792;
}
public int get793() {
return 793;
}
public int get794() {
return 794;
}
public int get795() {
return 795;
}
public int get796() {
return 796;
}
public int get797() {
return 797;
}
public int get798() {
return 798;
}
public int get799() {
return 799;
}
public int get800() {
return 800;
}
public int get801() {
return 801;
}
public int get802() {
return 802;
}
public int get803() {
return 803;
}
public int get804() {
return 804;
}
public int get805() {
return 805;
}
public int get806() {
return 806;
}
public int get807() {
return 807;
}
public int get808() {
return 808;
}
public int get809() {
return 809;
}
public int get810() {
return 810;
}
public int get811() {
return 811;
}
public int get812() {
return 812;
}
public int get813() {
return 813;
}
public int get814() {
return 814;
}
public int get815() {
return 815;
}
public int get816() {
return 816;
}
public int get817() {
return 817;
}
public int get818() {
return 818;
}
public int get819() {
return 819;
}
public int get820() {
return 820;
}
public int get821() {
return 821;
}
public int get822() {
return 822;
}
public int get823() {
return 823;
}
public int get824() {
return 824;
}
public int get825() {
return 825;
}
public int get826() {
return 826;
}
public int get827() {
return 827;
}
public int get828() {
return 828;
}
public int get829() {
return 829;
}
public int get830() {
return 830;
}
public int get831() {
return 831;
}
public int get832() {
return 832;
}
public int get833() {
return 833;
}
public int get834() {
return 834;
}
public int get835() {
return 835;
}
public int get836() {
return 836;
}
public int get837() {
return 837;
}
public int get838() {
return 838;
}
public int get839() {
return 839;
}
public int get840() {
return 840;
}
public int get841() {
return 841;
}
public int get842() {
return 842;
}
public int get843() {
return 843;
}
public int get844() {
return 844;
}
public int get845() {
return 845;
}
public int get846() {
return 846;
}
public int get847() {
return 847;
}
public int get848() {
return 848;
}
public int get849() {
return 849;
}
public int get850() {
return 850;
}
public int get851() {
return 851;
}
public int get852() {
return 852;
}
public int get853() {
return 853;
}
public int get854() {
return 854;
}
public int get855() {
return 855;
}
public int get856() {
return 856;
}
public int get857() {
return 857;
}
public int get858() {
return 858;
}
public int get859() {
return 859;
}
public int get860() {
return 860;
}
public int get861() {
return 861;
}
public int get862() {
return 862;
}
public int get863() {
return 863;
}
public int get864() {
return 864;
}
public int get865() {
return 865;
}
public int get866() {
return 866;
}
public int get867() {
return 867;
}
public int get868() {
return 868;
}
public int get869() {
return 869;
}
public int get870() {
return 870;
}
public int get871() {
return 871;
}
public int get872() {
return 872;
}
public int get873() {
return 873;
}
public int get874() {
return 874;
}
public int get875() {
return 875;
}
public int get876() {
return 876;
}
public int get877() {
return 877;
}
public int get878() {
return 878;
}
public int get879() {
return 879;
}
public int get880() {
return 880;
}
public int get881() {
return 881;
}
public int get882() {
return 882;
}
public int get883() {
return 883;
}
public int get884() {
return 884;
}
public int get885() {
return 885;
}
public int get886() {
return 886;
}
public int get887() {
return 887;
}
public int get888() {
return 888;
}
public int get889() {
return 889;
}
public int get890() {
return 890;
}
public int get891() {
return 891;
}
public int get892() {
return 892;
}
public int get893() {
return 893;
}
public int get894() {
return 894;
}
public int get895() {
return 895;
}
public int get896() {
return 896;
}
public int get897() {
return 897;
}
public int get898() {
return 898;
}
public int get899() {
return 899;
}
public int get900() {
return 900;
}
public int get901() {
return 901;
}
public int get902() {
return 902;
}
public int get903() {
return 903;
}
public int get904() {
return 904;
}
public int get905() {
return 905;
}
public int get906() {
return 906;
}
public int get907() {
return 907;
}
public int get908() {
return 908;
}
public int get909() {
return 909;
}
public int get910() {
return 910;
}
public int get911() {
return 911;
}
public int get912() {
return 912;
}
public int get913() {
return 913;
}
public int get914() {
return 914;
}
public int get915() {
return 915;
}
public int get916() {
return 916;
}
public int get917() {
return 917;
}
public int get918() {
return 918;
}
public int get919() {
return 919;
}
public int get920() {
return 920;
}
public int get921() {
return 921;
}
public int get922() {
return 922;
}
public int get923() {
return 923;
}
public int get924() {
return 924;
}
public int get925() {
return 925;
}
public int get926() {
return 926;
}
public int get927() {
return 927;
}
public int get928() {
return 928;
}
public int get929() {
return 929;
}
public int get930() {
return 930;
}
public int get931() {
return 931;
}
public int get932() {
return 932;
}
public int get933() {
return 933;
}
public int get934() {
return 934;
}
public int get935() {
return 935;
}
public int get936() {
return 936;
}
public int get937() {
return 937;
}
public int get938() {
return 938;
}
public int get939() {
return 939;
}
public int get940() {
return 940;
}
public int get941() {
return 941;
}
public int get942() {
return 942;
}
public int get943() {
return 943;
}
public int get944() {
return 944;
}
public int get945() {
return 945;
}
public int get946() {
return 946;
}
public int get947() {
return 947;
}
public int get948() {
return 948;
}
public int get949() {
return 949;
}
public int get950() {
return 950;
}
public int get951() {
return 951;
}
public int get952() {
return 952;
}
public int get953() {
return 953;
}
public int get954() {
return 954;
}
public int get955() {
return 955;
}
public int get956() {
return 956;
}
public int get957() {
return 957;
}
public int get958() {
return 958;
}
public int get959() {
return 959;
}
public int get960() {
return 960;
}
public int get961() {
return 961;
}
public int get962() {
return 962;
}
public int get963() {
return 963;
}
public int get964() {
return 964;
}
public int get965() {
return 965;
}
public int get966() {
return 966;
}
public int get967() {
return 967;
}
public int get968() {
return 968;
}
public int get969() {
return 969;
}
public int get970() {
return 970;
}
public int get971() {
return 971;
}
public int get972() {
return 972;
}
public int get973() {
return 973;
}
public int get974() {
return 974;
}
public int get975() {
return 975;
}
public int get976() {
return 976;
}
public int get977() {
return 977;
}
public int get978() {
return 978;
}
public int get979() {
return 979;
}
public int get980() {
return 980;
}
public int get981() {
return 981;
}
public int get982() {
return 982;
}
public int get983() {
return 983;
}
public int get984() {
return 984;
}
public int get985() {
return 985;
}
public int get986() {
return 986;
}
public int get987() {
return 987;
}
public int get988() {
return 988;
}
public int get989() {
return 989;
}
public int get990() {
return 990;
}
public int get991() {
return 991;
}
public int get992() {
return 992;
}
public int get993() {
return 993;
}
public int get994() {
return 994;
}
public int get995() {
return 995;
}
public int get996() {
return 996;
}
public int get997() {
return 997;
}
public int get998() {
return 998;
}
public int get999() {
return 999;
}
}
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GL/SGIX/ycrcb_subsample.py | 503 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SGIX_ycrcb_subsample'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIX_ycrcb_subsample',error_checker=_errors._error_checker)
| gpl-3.0 |
Jeffrey-P-McAteer/openemr | library/classes/rulesets/Cqm/reports/NFQ_0043/InitialPatientPopulation.php | 774 | <?php
// Copyright (C) 2011 Ken Chapple <[email protected]>
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
class NFQ_0043_InitialPatientPopulation implements CqmFilterIF
{
public function getTitle()
{
return "Initial Patient Population";
}
public function test(CqmPatient $patient, $beginDate, $endDate)
{
if ($patient->calculateAgeOnDate($beginDate) >= 65 && (Helper::checkEncounter(Encounter::ENC_OUTPATIENT, $patient, $beginDate, $endDate))) {
return true;
}
return false;
}
}
| gpl-3.0 |
khs26/pele | playground/oxdna/dump.py | 3744 | import os
from optparse import OptionParser
import oxdnagmin_ as GMIN
from pele.storage.database import Database
import numpy as np
from pele.utils import rotations
from pele.utils.rbtools import CoordsAdapter
TO_PDB="python /home/vr274/opt/oxDNA/UTILS/traj2vis.py pdb %s gmindnatop"
def export_xyz(fl, coords):
ca = CoordsAdapter(nrigid=coords.size/6, coords = coords)
fl.write("%d\n\n"%(2*ca.nrigid))
for i in xrange(ca.nrigid):
a = np.dot(rotations.aa2mx(ca.rotRigid[i]), np.array([1., 0., 0.]))
x_back = ca.posRigid[i] - 0.4*a # backbone bead
x_stack = ca.posRigid[i] + 0.4*a
fl.write("C %f %f %f\n"%(x_back[0], x_back[1], x_back[2]))
fl.write("H %f %f %f\n"%(x_stack[0], x_stack[1], x_stack[2]))
def main():
# add some program options
parser = OptionParser(usage = "usage: %prog [options] storage")
parser.add_option("--write-disconnect",
dest="writeDPS", action="store_true",
help="generate min.dat and ts.dat to use with disconnectDPS")
parser.add_option("-m",
dest="writeMinima", action="store_true",
help="dump minima to screen")
parser.add_option("-t",
dest="writeTS", action="store_true",
help="dump transition states to screen")
parser.add_option("--coords",
dest="writeCoords", action="store_true",
help="export coordinates files")
parser.add_option("--xyz",
dest="writeXYZ", action="store_true",
help="export xyz files")
(options, args) = parser.parse_args()
# print help if no input file is given
if(len(args) != 1):
parser.print_help()
exit(-1)
db = Database(db=args[0])
if(options.writeMinima):
print "List of minima:"
print "---------------"
for m in db.minima():
print "%f\t\tid %d"%(m.energy, m._id)
print "END\n"
if(options.writeTS):
print "List of transition states:"
print "--------------------------"
for ts in db.transition_states():
print "%d\t<->\t%d\tid %d\tenergies %f %f %f"%\
(ts.minimum1._id, ts.minimum2._id, ts._id, ts.minimum1.energy, ts.energy, ts.minimum2.energy)
print "END\n"
if(options.writeDPS):
writeDPS(db)
if(options.writeCoords):
GMIN.initialize()
i=0
for m in db.minima():
i+=1
filename = "lowest/lowest%03d.cif"%(i)
print "minimum",i, "energy",m.energy,"to",filename
GMIN.userpot_dump(filename, m.coords)
if(not TO_PDB is None):
os.system(TO_PDB%filename)
np.savetxt("lowest/coords_%03d.txt"%(i), m.coords)
if(options.writeXYZ):
traj=open("lowest/traj.xyz", "w")
i=0
for m in db.minima():
i+=1
filename = "lowest/lowest%03d.xyz"%(i)
print "minimum",i, "energy",m.energy,"to",filename
export_xyz(open(filename, "w"), m.coords)
export_xyz(traj, m.coords)
traj.close()
def writeDPS(db):
minindex={}
out = open("min.data", "w")
i=1
for m in db.minima():
minindex[m]=i
i+=1
out.write("%f 0.0 1 0.0 0.0 0.0\n"%(m.energy))
out = open("ts.data", "w")
ti=0
for ts in db.transition_states():
ti+=1
out.write("%f 0.0 1 %d %d 0.0 0.0 0.0\n"%(ts.energy, minindex[ts.minimum1], minindex[ts.minimum2]))
print "Written %d minima and %d transition states"%(i, ti)
if __name__ == "__main__":
main() | gpl-3.0 |
DragonZX/fdm2 | Gecko.SDK/2.0/bin/components/nsSearchSuggestions.js | 19842 | /* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Google Suggest Autocomplete Implementation for Firefox.
*
* The Initial Developer of the Original Code is Google Inc.
* Portions created by the Initial Developer are Copyright (C) 2006
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Ben Goodger <[email protected]>
* Mike Connor <[email protected]>
* Joe Hughes <[email protected]>
* Pamela Greene <[email protected]>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
const SEARCH_RESPONSE_SUGGESTION_JSON = "application/x-suggestions+json";
const BROWSER_SUGGEST_PREF = "browser.search.suggest.enabled";
const XPCOM_SHUTDOWN_TOPIC = "xpcom-shutdown";
const NS_PREFBRANCH_PREFCHANGE_TOPIC_ID = "nsPref:changed";
const SEARCH_BUNDLE = "chrome://global/locale/search/search.properties";
const Cc = Components.classes;
const Ci = Components.interfaces;
const Cr = Components.results;
const Cu = Components.utils;
const HTTP_OK = 200;
const HTTP_INTERNAL_SERVER_ERROR = 500;
const HTTP_BAD_GATEWAY = 502;
const HTTP_SERVICE_UNAVAILABLE = 503;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/nsFormAutoCompleteResult.jsm");
/**
* SuggestAutoComplete is a base class that implements nsIAutoCompleteSearch
* and can collect results for a given search by using the search URL supplied
* by the subclass. We do it this way since the AutoCompleteController in
* Mozilla requires a unique XPCOM Service for every search provider, even if
* the logic for two providers is identical.
* @constructor
*/
function SuggestAutoComplete() {
this._init();
}
SuggestAutoComplete.prototype = {
_init: function() {
this._addObservers();
this._loadSuggestPref();
},
/**
* this._strings is the string bundle for message internationalization.
*/
get _strings() {
if (!this.__strings) {
var sbs = Cc["@mozilla.org/intl/stringbundle;1"].
getService(Ci.nsIStringBundleService);
this.__strings = sbs.createBundle(SEARCH_BUNDLE);
}
return this.__strings;
},
__strings: null,
/**
* Search suggestions will be shown if this._suggestEnabled is true.
*/
_loadSuggestPref: function SAC_loadSuggestPref() {
var prefService = Cc["@mozilla.org/preferences-service;1"].
getService(Ci.nsIPrefBranch);
this._suggestEnabled = prefService.getBoolPref(BROWSER_SUGGEST_PREF);
},
_suggestEnabled: null,
/*************************************************************************
* Server request backoff implementation fields below
* These allow us to throttle requests if the server is getting hammered.
**************************************************************************/
/**
* This is an array that contains the timestamps (in unixtime) of
* the last few backoff-triggering errors.
*/
_serverErrorLog: [],
/**
* If we receive this number of backoff errors within the amount of time
* specified by _serverErrorPeriod, then we initiate backoff.
*/
_maxErrorsBeforeBackoff: 3,
/**
* If we receive enough consecutive errors (where "enough" is defined by
* _maxErrorsBeforeBackoff above) within this time period,
* we trigger the backoff behavior.
*/
_serverErrorPeriod: 600000, // 10 minutes in milliseconds
/**
* If we get another backoff error immediately after timeout, we increase the
* backoff to (2 x old period) + this value.
*/
_serverErrorTimeoutIncrement: 600000, // 10 minutes in milliseconds
/**
* The current amount of time to wait before trying a server request
* after receiving a backoff error.
*/
_serverErrorTimeout: 0,
/**
* Time (in unixtime) after which we're allowed to try requesting again.
*/
_nextRequestTime: 0,
/**
* The last engine we requested against (so that we can tell if the
* user switched engines).
*/
_serverErrorEngine: null,
/**
* The XMLHttpRequest object.
* @private
*/
_request: null,
/**
* The object implementing nsIAutoCompleteObserver that we notify when
* we have found results
* @private
*/
_listener: null,
/**
* If this is true, we'll integrate form history results with the
* suggest results.
*/
_includeFormHistory: true,
/**
* True if a request for remote suggestions was sent. This is used to
* differentiate between the "_request is null because the request has
* already returned a result" and "_request is null because no request was
* sent" cases.
*/
_sentSuggestRequest: false,
/**
* This is the callback for the suggest timeout timer.
*/
notify: function SAC_notify(timer) {
// FIXME: bug 387341
// Need to break the cycle between us and the timer.
this._formHistoryTimer = null;
// If this._listener is null, we've already sent out suggest results, so
// nothing left to do here.
if (!this._listener)
return;
// Otherwise, the XMLHTTPRequest for suggest results is taking too long,
// so send out the form history results and cancel the request.
this._listener.onSearchResult(this, this._formHistoryResult);
this._reset();
},
/**
* This determines how long (in ms) we should wait before giving up on
* the suggestions and just showing local form history results.
*/
_suggestionTimeout: 500,
/**
* This is the callback for that the form history service uses to
* send us results.
*/
onSearchResult: function SAC_onSearchResult(search, result) {
this._formHistoryResult = result;
if (this._request) {
// We still have a pending request, wait a bit to give it a chance to
// finish.
this._formHistoryTimer = Cc["@mozilla.org/timer;1"].
createInstance(Ci.nsITimer);
this._formHistoryTimer.initWithCallback(this, this._suggestionTimeout,
Ci.nsITimer.TYPE_ONE_SHOT);
} else if (!this._sentSuggestRequest) {
// We didn't send a request, so just send back the form history results.
this._listener.onSearchResult(this, this._formHistoryResult);
this._reset();
}
},
/**
* This is the URI that the last suggest request was sent to.
*/
_suggestURI: null,
/**
* Autocomplete results from the form history service get stored here.
*/
_formHistoryResult: null,
/**
* This holds the suggest server timeout timer, if applicable.
*/
_formHistoryTimer: null,
/**
* This clears all the per-request state.
*/
_reset: function SAC_reset() {
// Don't let go of our listener and form history result if the timer is
// still pending, the timer will call _reset() when it fires.
if (!this._formHistoryTimer) {
this._listener = null;
this._formHistoryResult = null;
}
this._request = null;
},
/**
* This sends an autocompletion request to the form history service,
* which will call onSearchResults with the results of the query.
*/
_startHistorySearch: function SAC_SHSearch(searchString, searchParam) {
var formHistory =
Cc["@mozilla.org/autocomplete/search;1?name=form-history"].
createInstance(Ci.nsIAutoCompleteSearch);
formHistory.startSearch(searchString, searchParam, this._formHistoryResult, this);
},
/**
* Makes a note of the fact that we've received a backoff-triggering
* response, so that we can adjust the backoff behavior appropriately.
*/
_noteServerError: function SAC__noteServeError() {
var currentTime = Date.now();
this._serverErrorLog.push(currentTime);
if (this._serverErrorLog.length > this._maxErrorsBeforeBackoff)
this._serverErrorLog.shift();
if ((this._serverErrorLog.length == this._maxErrorsBeforeBackoff) &&
((currentTime - this._serverErrorLog[0]) < this._serverErrorPeriod)) {
// increase timeout, and then don't request until timeout is over
this._serverErrorTimeout = (this._serverErrorTimeout * 2) +
this._serverErrorTimeoutIncrement;
this._nextRequestTime = currentTime + this._serverErrorTimeout;
}
},
/**
* Resets the backoff behavior; called when we get a successful response.
*/
_clearServerErrors: function SAC__clearServerErrors() {
this._serverErrorLog = [];
this._serverErrorTimeout = 0;
this._nextRequestTime = 0;
},
/**
* This checks whether we should send a server request (i.e. we're not
* in a error-triggered backoff period.
*
* @private
*/
_okToRequest: function SAC__okToRequest() {
return Date.now() > this._nextRequestTime;
},
/**
* This checks to see if the new search engine is different
* from the previous one, and if so clears any error state that might
* have accumulated for the old engine.
*
* @param engine The engine that the suggestion request would be sent to.
* @private
*/
_checkForEngineSwitch: function SAC__checkForEngineSwitch(engine) {
if (engine == this._serverErrorEngine)
return;
// must've switched search providers, clear old errors
this._serverErrorEngine = engine;
this._clearServerErrors();
},
/**
* This returns true if the status code of the HTTP response
* represents a backoff-triggering error.
*
* @param status The status code from the HTTP response
* @private
*/
_isBackoffError: function SAC__isBackoffError(status) {
return ((status == HTTP_INTERNAL_SERVER_ERROR) ||
(status == HTTP_BAD_GATEWAY) ||
(status == HTTP_SERVICE_UNAVAILABLE));
},
/**
* Called when the 'readyState' of the XMLHttpRequest changes. We only care
* about state 4 (COMPLETED) - handle the response data.
* @private
*/
onReadyStateChange: function() {
// xxx use the real const here
if (!this._request || this._request.readyState != 4)
return;
try {
var status = this._request.status;
} catch (e) {
// The XML HttpRequest can throw NS_ERROR_NOT_AVAILABLE.
return;
}
if (this._isBackoffError(status)) {
this._noteServerError();
return;
}
var responseText = this._request.responseText;
if (status != HTTP_OK || responseText == "")
return;
this._clearServerErrors();
var serverResults = JSON.parse(responseText);
var searchString = serverResults[0] || "";
var results = serverResults[1] || [];
var comments = []; // "comments" column values for suggestions
var historyResults = [];
var historyComments = [];
// If form history is enabled and has results, add them to the list.
if (this._includeFormHistory && this._formHistoryResult &&
(this._formHistoryResult.searchResult ==
Ci.nsIAutoCompleteResult.RESULT_SUCCESS)) {
for (var i = 0; i < this._formHistoryResult.matchCount; ++i) {
var term = this._formHistoryResult.getValueAt(i);
// we don't want things to appear in both history and suggestions
var dupIndex = results.indexOf(term);
if (dupIndex != -1)
results.splice(dupIndex, 1);
historyResults.push(term);
historyComments.push("");
}
}
// fill out the comment column for the suggestions
for (var i = 0; i < results.length; ++i)
comments.push("");
// if we have any suggestions, put a label at the top
if (comments.length > 0)
comments[0] = this._strings.GetStringFromName("suggestion_label");
// now put the history results above the suggestions
var finalResults = historyResults.concat(results);
var finalComments = historyComments.concat(comments);
// Notify the FE of our new results
this.onResultsReady(searchString, finalResults, finalComments,
this._formHistoryResult);
// Reset our state for next time.
this._reset();
},
/**
* Notifies the front end of new results.
* @param searchString the user's query string
* @param results an array of results to the search
* @param comments an array of metadata corresponding to the results
* @private
*/
onResultsReady: function(searchString, results, comments,
formHistoryResult) {
if (this._listener) {
var result = new FormAutoCompleteResult(
searchString,
Ci.nsIAutoCompleteResult.RESULT_SUCCESS,
0,
"",
results,
results,
comments,
formHistoryResult);
this._listener.onSearchResult(this, result);
// Null out listener to make sure we don't notify it twice, in case our
// timer callback still hasn't run.
this._listener = null;
}
},
/**
* Initiates the search result gathering process. Part of
* nsIAutoCompleteSearch implementation.
*
* @param searchString the user's query string
* @param searchParam unused, "an extra parameter"; even though
* this parameter and the next are unused, pass
* them through in case the form history
* service wants them
* @param previousResult unused, a client-cached store of the previous
* generated resultset for faster searching.
* @param listener object implementing nsIAutoCompleteObserver which
* we notify when results are ready.
*/
startSearch: function(searchString, searchParam, previousResult, listener) {
// Don't reuse a previous form history result when it no longer applies.
if (!previousResult)
this._formHistoryResult = null;
var searchService = Cc["@mozilla.org/browser/search-service;1"].
getService(Ci.nsIBrowserSearchService);
// If there's an existing request, stop it. There is no smart filtering
// here as there is when looking through history/form data because the
// result set returned by the server is different for every typed value -
// "ocean breathes" does not return a subset of the results returned for
// "ocean", for example. This does nothing if there is no current request.
this.stopSearch();
this._listener = listener;
var engine = searchService.currentEngine;
this._checkForEngineSwitch(engine);
if (!searchString ||
!this._suggestEnabled ||
!engine.supportsResponseType(SEARCH_RESPONSE_SUGGESTION_JSON) ||
!this._okToRequest()) {
// We have an empty search string (user pressed down arrow to see
// history), or search suggestions are disabled, or the current engine
// has no suggest functionality, or we're in backoff mode; so just use
// local history.
this._sentSuggestRequest = false;
this._startHistorySearch(searchString, searchParam);
return;
}
// Actually do the search
this._request = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].
createInstance(Ci.nsIXMLHttpRequest);
var submission = engine.getSubmission(searchString,
SEARCH_RESPONSE_SUGGESTION_JSON);
this._suggestURI = submission.uri;
var method = (submission.postData ? "POST" : "GET");
this._request.open(method, this._suggestURI.spec, true);
this._request.channel.notificationCallbacks = new SearchSuggestLoadListener();
var self = this;
function onReadyStateChange() {
self.onReadyStateChange();
}
this._request.onreadystatechange = onReadyStateChange;
this._request.send(submission.postData);
if (this._includeFormHistory) {
this._sentSuggestRequest = true;
this._startHistorySearch(searchString, searchParam);
}
},
/**
* Ends the search result gathering process. Part of nsIAutoCompleteSearch
* implementation.
*/
stopSearch: function() {
if (this._request) {
this._request.abort();
this._reset();
}
},
/**
* nsIObserver
*/
observe: function SAC_observe(aSubject, aTopic, aData) {
switch (aTopic) {
case NS_PREFBRANCH_PREFCHANGE_TOPIC_ID:
this._loadSuggestPref();
break;
case XPCOM_SHUTDOWN_TOPIC:
this._removeObservers();
break;
}
},
_addObservers: function SAC_addObservers() {
var prefService2 = Cc["@mozilla.org/preferences-service;1"].
getService(Ci.nsIPrefBranch2);
prefService2.addObserver(BROWSER_SUGGEST_PREF, this, false);
var os = Cc["@mozilla.org/observer-service;1"].
getService(Ci.nsIObserverService);
os.addObserver(this, XPCOM_SHUTDOWN_TOPIC, false);
},
_removeObservers: function SAC_removeObservers() {
var prefService2 = Cc["@mozilla.org/preferences-service;1"].
getService(Ci.nsIPrefBranch2);
prefService2.removeObserver(BROWSER_SUGGEST_PREF, this);
var os = Cc["@mozilla.org/observer-service;1"].
getService(Ci.nsIObserverService);
os.removeObserver(this, XPCOM_SHUTDOWN_TOPIC);
},
// nsISupports
QueryInterface: XPCOMUtils.generateQI([Ci.nsIAutoCompleteSearch,
Ci.nsIAutoCompleteObserver])
};
function SearchSuggestLoadListener() {
}
SearchSuggestLoadListener.prototype = {
// nsIBadCertListener2
notifyCertProblem: function SSLL_certProblem(socketInfo, status, targetSite) {
return true;
},
// nsISSLErrorListener
notifySSLError: function SSLL_SSLError(socketInfo, error, targetSite) {
return true;
},
// nsIInterfaceRequestor
getInterface: function SSLL_getInterface(iid) {
return this.QueryInterface(iid);
},
// nsISupports
QueryInterface: XPCOMUtils.generateQI([Ci.nsIBadCertListener2,
Ci.nsISSLErrorListener,
Ci.nsIInterfaceRequestor])
};
/**
* SearchSuggestAutoComplete is a service implementation that handles suggest
* results specific to web searches.
* @constructor
*/
function SearchSuggestAutoComplete() {
// This calls _init() in the parent class (SuggestAutoComplete) via the
// prototype, below.
this._init();
}
SearchSuggestAutoComplete.prototype = {
classID: Components.ID("{aa892eb4-ffbf-477d-9f9a-06c995ae9f27}"),
__proto__: SuggestAutoComplete.prototype,
serviceURL: ""
};
var component = [SearchSuggestAutoComplete];
var NSGetFactory = XPCOMUtils.generateNSGetFactory(component);
| gpl-3.0 |
levisre/de4dot | de4dot.code/renamer/DerivedFrom.cs | 1869 | /*
Copyright (C) 2011-2015 [email protected]
This file is part of de4dot.
de4dot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
de4dot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with de4dot. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using de4dot.code.renamer.asmmodules;
namespace de4dot.code.renamer {
public class DerivedFrom {
Dictionary<string, bool> classNames = new Dictionary<string, bool>(StringComparer.Ordinal);
Dictionary<MTypeDef, bool> results = new Dictionary<MTypeDef, bool>();
public DerivedFrom(string className) => AddName(className);
public DerivedFrom(string[] classNames) {
foreach (var className in classNames)
AddName(className);
}
void AddName(string className) => classNames[className] = true;
public bool Check(MTypeDef type) => Check(type, 0);
public bool Check(MTypeDef type, int recurseCount) {
if (recurseCount >= 100)
return false;
if (results.ContainsKey(type))
return results[type];
bool val;
if (classNames.ContainsKey(type.TypeDef.FullName))
val = true;
else if (type.baseType == null) {
if (type.TypeDef.BaseType != null)
val = classNames.ContainsKey(type.TypeDef.BaseType.FullName);
else
val = false;
}
else
val = Check(type.baseType.typeDef, recurseCount + 1);
results[type] = val;
return val;
}
}
}
| gpl-3.0 |
Ni3znajomy/AMXBans_amx | Web/include/geoip2/phpunit/phpunit/src/TextUI/Command.php | 32860 | <?php
/**
* PHPUnit
*
* Copyright (c) 2001-2014, Sebastian Bergmann <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Sebastian Bergmann nor the names of his
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @package PHPUnit
* @subpackage TextUI
* @author Sebastian Bergmann <[email protected]>
* @copyright 2001-2014 Sebastian Bergmann <[email protected]>
* @license http://www.opensource.org/licenses/BSD-3-Clause The BSD 3-Clause License
* @link http://www.phpunit.de/
* @since File available since Release 3.0.0
*/
/**
* A TestRunner for the Command Line Interface (CLI)
* PHP SAPI Module.
*
* @package PHPUnit
* @subpackage TextUI
* @author Sebastian Bergmann <[email protected]>
* @copyright 2001-2014 Sebastian Bergmann <[email protected]>
* @license http://www.opensource.org/licenses/BSD-3-Clause The BSD 3-Clause License
* @link http://www.phpunit.de/
* @since Class available since Release 3.0.0
*/
class PHPUnit_TextUI_Command
{
/**
* @var array
*/
protected $arguments = array(
'listGroups' => false,
'loader' => null,
'useDefaultConfiguration' => true
);
/**
* @var array
*/
protected $options = array();
/**
* @var array
*/
protected $longOptions = array(
'colors' => null,
'bootstrap=' => null,
'configuration=' => null,
'coverage-clover=' => null,
'coverage-crap4j=' => null,
'coverage-html=' => null,
'coverage-php=' => null,
'coverage-text==' => null,
'coverage-xml=' => null,
'debug' => null,
'exclude-group=' => null,
'filter=' => null,
'testsuite=' => null,
'group=' => null,
'help' => null,
'include-path=' => null,
'list-groups' => null,
'loader=' => null,
'log-json=' => null,
'log-junit=' => null,
'log-tap=' => null,
'process-isolation' => null,
'repeat=' => null,
'stderr' => null,
'stop-on-error' => null,
'stop-on-failure' => null,
'stop-on-incomplete' => null,
'stop-on-risky' => null,
'stop-on-skipped' => null,
'report-useless-tests' => null,
'strict-coverage' => null,
'disallow-test-output' => null,
'enforce-time-limit' => null,
'disallow-todo-tests' => null,
'strict' => null,
'tap' => null,
'testdox' => null,
'testdox-html=' => null,
'testdox-text=' => null,
'test-suffix=' => null,
'no-configuration' => null,
'no-globals-backup' => null,
'printer=' => null,
'static-backup' => null,
'verbose' => null,
'version' => null
);
/**
* @var boolean
*/
private $versionStringPrinted = false;
/**
* @param boolean $exit
*/
public static function main($exit = true)
{
$command = new static;
return $command->run($_SERVER['argv'], $exit);
}
/**
* @param array $argv
* @param boolean $exit
* @return integer
*/
public function run(array $argv, $exit = true)
{
$this->handleArguments($argv);
$runner = $this->createRunner();
if (is_object($this->arguments['test']) &&
$this->arguments['test'] instanceof PHPUnit_Framework_Test) {
$suite = $this->arguments['test'];
} else {
$suite = $runner->getTest(
$this->arguments['test'],
$this->arguments['testFile'],
$this->arguments['testSuffixes']
);
}
if ($this->arguments['listGroups']) {
$this->printVersionString();
print "Available test group(s):\n";
$groups = $suite->getGroups();
sort($groups);
foreach ($groups as $group) {
print " - $group\n";
}
if ($exit) {
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
} else {
return PHPUnit_TextUI_TestRunner::SUCCESS_EXIT;
}
}
unset($this->arguments['test']);
unset($this->arguments['testFile']);
try {
$result = $runner->doRun($suite, $this->arguments);
} catch (PHPUnit_Framework_Exception $e) {
print $e->getMessage() . "\n";
}
$ret = PHPUnit_TextUI_TestRunner::FAILURE_EXIT;
if (isset($result) && $result->wasSuccessful()) {
$ret = PHPUnit_TextUI_TestRunner::SUCCESS_EXIT;
} elseif (!isset($result) || $result->errorCount() > 0) {
$ret = PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT;
}
if ($exit) {
exit($ret);
} else {
return $ret;
}
}
/**
* Create a TestRunner, override in subclasses.
*
* @return PHPUnit_TextUI_TestRunner
* @since Method available since Release 3.6.0
*/
protected function createRunner()
{
return new PHPUnit_TextUI_TestRunner($this->arguments['loader']);
}
/**
* Handles the command-line arguments.
*
* A child class of PHPUnit_TextUI_Command can hook into the argument
* parsing by adding the switch(es) to the $longOptions array and point to a
* callback method that handles the switch(es) in the child class like this
*
* <code>
* <?php
* class MyCommand extends PHPUnit_TextUI_Command
* {
* public function __construct()
* {
* // my-switch won't accept a value, it's an on/off
* $this->longOptions['my-switch'] = 'myHandler';
* // my-secondswitch will accept a value - note the equals sign
* $this->longOptions['my-secondswitch='] = 'myOtherHandler';
* }
*
* // --my-switch -> myHandler()
* protected function myHandler()
* {
* }
*
* // --my-secondswitch foo -> myOtherHandler('foo')
* protected function myOtherHandler ($value)
* {
* }
*
* // You will also need this - the static keyword in the
* // PHPUnit_TextUI_Command will mean that it'll be
* // PHPUnit_TextUI_Command that gets instantiated,
* // not MyCommand
* public static function main($exit = true)
* {
* $command = new static;
*
* return $command->run($_SERVER['argv'], $exit);
* }
*
* }
* </code>
*
* @param array $argv
*/
protected function handleArguments(array $argv)
{
if (defined('__PHPUNIT_PHAR__')) {
$this->longOptions['selfupdate'] = null;
$this->longOptions['self-update'] = null;
}
try {
$this->options = PHPUnit_Util_Getopt::getopt(
$argv,
'd:c:hv',
array_keys($this->longOptions)
);
} catch (PHPUnit_Framework_Exception $e) {
$this->showError($e->getMessage());
}
foreach ($this->options[0] as $option) {
switch ($option[0]) {
case '--colors': {
$this->arguments['colors'] = true;
}
break;
case '--bootstrap': {
$this->arguments['bootstrap'] = $option[1];
}
break;
case 'c':
case '--configuration': {
$this->arguments['configuration'] = $option[1];
}
break;
case '--coverage-clover': {
$this->arguments['coverageClover'] = $option[1];
}
break;
case '--coverage-crap4j': {
$this->arguments['coverageCrap4J'] = $option[1];
}
break;
case '--coverage-html': {
$this->arguments['coverageHtml'] = $option[1];
}
break;
case '--coverage-php': {
$this->arguments['coveragePHP'] = $option[1];
}
break;
case '--coverage-text': {
if ($option[1] === null) {
$option[1] = 'php://stdout';
}
$this->arguments['coverageText'] = $option[1];
$this->arguments['coverageTextShowUncoveredFiles'] = false;
$this->arguments['coverageTextShowOnlySummary'] = false;
}
break;
case '--coverage-xml': {
$this->arguments['coverageXml'] = $option[1];
}
break;
case 'd': {
$ini = explode('=', $option[1]);
if (isset($ini[0])) {
if (isset($ini[1])) {
ini_set($ini[0], $ini[1]);
} else {
ini_set($ini[0], true);
}
}
}
break;
case '--debug': {
$this->arguments['debug'] = true;
}
break;
case 'h':
case '--help': {
$this->showHelp();
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
}
break;
case '--filter': {
$this->arguments['filter'] = $option[1];
}
break;
case '--testsuite': {
$this->arguments['testsuite'] = $option[1];
}
break;
case '--group': {
$this->arguments['groups'] = explode(',', $option[1]);
}
break;
case '--exclude-group': {
$this->arguments['excludeGroups'] = explode(
',', $option[1]
);
}
break;
case '--test-suffix': {
$this->arguments['testSuffixes'] = explode(
',', $option[1]
);
}
break;
case '--include-path': {
$includePath = $option[1];
}
break;
case '--list-groups': {
$this->arguments['listGroups'] = true;
}
break;
case '--printer': {
$this->arguments['printer'] = $option[1];
}
break;
case '--loader': {
$this->arguments['loader'] = $option[1];
}
break;
case '--log-json': {
$this->arguments['jsonLogfile'] = $option[1];
}
break;
case '--log-junit': {
$this->arguments['junitLogfile'] = $option[1];
}
break;
case '--log-tap': {
$this->arguments['tapLogfile'] = $option[1];
}
break;
case '--process-isolation': {
$this->arguments['processIsolation'] = true;
}
break;
case '--repeat': {
$this->arguments['repeat'] = (int) $option[1];
}
break;
case '--stderr': {
$this->arguments['stderr'] = true;
}
break;
case '--stop-on-error': {
$this->arguments['stopOnError'] = true;
}
break;
case '--stop-on-failure': {
$this->arguments['stopOnFailure'] = true;
}
break;
case '--stop-on-incomplete': {
$this->arguments['stopOnIncomplete'] = true;
}
break;
case '--stop-on-risky': {
$this->arguments['stopOnRisky'] = true;
}
break;
case '--stop-on-skipped': {
$this->arguments['stopOnSkipped'] = true;
}
break;
case '--tap': {
$this->arguments['printer'] = new PHPUnit_Util_Log_TAP;
}
break;
case '--testdox': {
$this->arguments['printer'] = new PHPUnit_Util_TestDox_ResultPrinter_Text;
}
break;
case '--testdox-html': {
$this->arguments['testdoxHTMLFile'] = $option[1];
}
break;
case '--testdox-text': {
$this->arguments['testdoxTextFile'] = $option[1];
}
break;
case '--no-configuration': {
$this->arguments['useDefaultConfiguration'] = false;
}
break;
case '--no-globals-backup': {
$this->arguments['backupGlobals'] = false;
}
break;
case '--static-backup': {
$this->arguments['backupStaticAttributes'] = true;
}
break;
case 'v':
case '--verbose': {
$this->arguments['verbose'] = true;
}
break;
case '--version': {
$this->printVersionString();
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
}
break;
case '--report-useless-tests': {
$this->arguments['reportUselessTests'] = true;
}
break;
case '--strict-coverage': {
$this->arguments['strictCoverage'] = true;
}
break;
case '--disallow-test-output': {
$this->arguments['disallowTestOutput'] = true;
}
break;
case '--enforce-time-limit': {
$this->arguments['enforceTimeLimit'] = true;
}
break;
case '--disallow-todo-tests': {
$this->arguments['disallowTodoAnnotatedTests'] = true;
}
break;
case '--strict': {
$this->arguments['reportUselessTests'] = true;
$this->arguments['strictCoverage'] = true;
$this->arguments['disallowTestOutput'] = true;
$this->arguments['enforceTimeLimit'] = true;
$this->arguments['disallowTodoAnnotatedTests'] = true;
}
break;
case '--selfupdate':
case '--self-update': {
$this->handleSelfUpdate();
}
break;
default: {
$optionName = str_replace('--', '', $option[0]);
if (isset($this->longOptions[$optionName])) {
$handler = $this->longOptions[$optionName];
} elseif (isset($this->longOptions[$optionName . '='])) {
$handler = $this->longOptions[$optionName . '='];
}
if (isset($handler) && is_callable(array($this, $handler))) {
$this->$handler($option[1]);
}
}
}
}
$this->handleCustomTestSuite();
if (!isset($this->arguments['test'])) {
if (isset($this->options[1][0])) {
$this->arguments['test'] = $this->options[1][0];
}
if (isset($this->options[1][1])) {
$this->arguments['testFile'] = realpath($this->options[1][1]);
} else {
$this->arguments['testFile'] = '';
}
if (isset($this->arguments['test']) &&
is_file($this->arguments['test']) &&
substr($this->arguments['test'], -5, 5) != '.phpt') {
$this->arguments['testFile'] = realpath($this->arguments['test']);
$this->arguments['test'] = substr($this->arguments['test'], 0, strrpos($this->arguments['test'], '.'));
}
}
if (!isset($this->arguments['testSuffixes'])) {
$this->arguments['testSuffixes'] = array('Test.php', '.phpt');
}
if (isset($includePath)) {
ini_set(
'include_path',
$includePath . PATH_SEPARATOR . ini_get('include_path')
);
}
if (isset($this->arguments['bootstrap'])) {
$this->handleBootstrap($this->arguments['bootstrap']);
}
if (isset($this->arguments['printer']) &&
is_string($this->arguments['printer'])) {
$this->arguments['printer'] = $this->handlePrinter($this->arguments['printer']);
}
if ($this->arguments['loader'] !== null) {
$this->arguments['loader'] = $this->handleLoader($this->arguments['loader']);
}
if (isset($this->arguments['configuration']) &&
is_dir($this->arguments['configuration'])) {
$configurationFile = $this->arguments['configuration'] .
'/phpunit.xml';
if (file_exists($configurationFile)) {
$this->arguments['configuration'] = realpath(
$configurationFile
);
} elseif (file_exists($configurationFile . '.dist')) {
$this->arguments['configuration'] = realpath(
$configurationFile . '.dist'
);
}
} elseif (!isset($this->arguments['configuration']) &&
$this->arguments['useDefaultConfiguration']) {
if (file_exists('phpunit.xml')) {
$this->arguments['configuration'] = realpath('phpunit.xml');
} elseif (file_exists('phpunit.xml.dist')) {
$this->arguments['configuration'] = realpath(
'phpunit.xml.dist'
);
}
}
if (isset($this->arguments['configuration'])) {
try {
$configuration = PHPUnit_Util_Configuration::getInstance(
$this->arguments['configuration']
);
} catch (Exception $e) {
print $e->getMessage() . "\n";
exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
}
$phpunit = $configuration->getPHPUnitConfiguration();
$configuration->handlePHPConfiguration();
if (!isset($this->arguments['bootstrap']) && isset($phpunit['bootstrap'])) {
$this->handleBootstrap($phpunit['bootstrap']);
}
/**
* Issue #657
*/
if (isset($phpunit['stderr']) && $phpunit['stderr'] == true) {
$this->arguments['printer'] = new PHPUnit_TextUI_ResultPrinter(
'php://stderr',
isset($this->arguments['verbose']) ? $this->arguments['verbose'] : false
);
}
if (isset($phpunit['printerClass'])) {
if (isset($phpunit['printerFile'])) {
$file = $phpunit['printerFile'];
} else {
$file = '';
}
$this->arguments['printer'] = $this->handlePrinter(
$phpunit['printerClass'], $file
);
}
if (isset($phpunit['testSuiteLoaderClass'])) {
if (isset($phpunit['testSuiteLoaderFile'])) {
$file = $phpunit['testSuiteLoaderFile'];
} else {
$file = '';
}
$this->arguments['loader'] = $this->handleLoader(
$phpunit['testSuiteLoaderClass'], $file
);
}
$browsers = $configuration->getSeleniumBrowserConfiguration();
if (!empty($browsers) &&
class_exists('PHPUnit_Extensions_SeleniumTestCase')) {
PHPUnit_Extensions_SeleniumTestCase::$browsers = $browsers;
}
if (!isset($this->arguments['test'])) {
$testSuite = $configuration->getTestSuiteConfiguration(isset($this->arguments['testsuite']) ? $this->arguments['testsuite'] : null);
if ($testSuite !== null) {
$this->arguments['test'] = $testSuite;
}
}
}
if (isset($this->arguments['test']) && is_string($this->arguments['test']) && substr($this->arguments['test'], -5, 5) == '.phpt') {
$test = new PHPUnit_Extensions_PhptTestCase($this->arguments['test']);
$this->arguments['test'] = new PHPUnit_Framework_TestSuite;
$this->arguments['test']->addTest($test);
}
if (!isset($this->arguments['test']) ||
(isset($this->arguments['testDatabaseLogRevision']) && !isset($this->arguments['testDatabaseDSN']))) {
$this->showHelp();
exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
}
}
/**
* Handles the loading of the PHPUnit_Runner_TestSuiteLoader implementation.
*
* @param string $loaderClass
* @param string $loaderFile
* @return PHPUnit_Runner_TestSuiteLoader
*/
protected function handleLoader($loaderClass, $loaderFile = '')
{
if (!class_exists($loaderClass, false)) {
if ($loaderFile == '') {
$loaderFile = PHPUnit_Util_Filesystem::classNameToFilename(
$loaderClass
);
}
$loaderFile = stream_resolve_include_path($loaderFile);
if ($loaderFile) {
require $loaderFile;
}
}
if (class_exists($loaderClass, false)) {
$class = new ReflectionClass($loaderClass);
if ($class->implementsInterface('PHPUnit_Runner_TestSuiteLoader') &&
$class->isInstantiable()) {
return $class->newInstance();
}
}
if ($loaderClass == 'PHPUnit_Runner_StandardTestSuiteLoader') {
return;
}
$this->showError(
sprintf(
'Could not use "%s" as loader.',
$loaderClass
)
);
}
/**
* Handles the loading of the PHPUnit_Util_Printer implementation.
*
* @param string $printerClass
* @param string $printerFile
* @return PHPUnit_Util_Printer
*/
protected function handlePrinter($printerClass, $printerFile = '')
{
if (!class_exists($printerClass, false)) {
if ($printerFile == '') {
$printerFile = PHPUnit_Util_Filesystem::classNameToFilename(
$printerClass
);
}
$printerFile = stream_resolve_include_path($printerFile);
if ($printerFile) {
require $printerFile;
}
}
if (class_exists($printerClass)) {
$class = new ReflectionClass($printerClass);
if ($class->implementsInterface('PHPUnit_Framework_TestListener') &&
$class->isSubclassOf('PHPUnit_Util_Printer') &&
$class->isInstantiable()) {
if ($class->isSubclassOf('PHPUnit_TextUI_ResultPrinter')) {
return $printerClass;
}
return $class->newInstance();
}
}
$this->showError(
sprintf(
'Could not use "%s" as printer.',
$printerClass
)
);
}
/**
* Loads a bootstrap file.
*
* @param string $filename
*/
protected function handleBootstrap($filename)
{
try {
PHPUnit_Util_Fileloader::checkAndLoad($filename);
} catch (PHPUnit_Framework_Exception $e) {
$this->showError($e->getMessage());
}
}
/**
* @since Method available since Release 4.0.0
*/
protected function handleSelfUpdate()
{
$this->printVersionString();
if (!extension_loaded('openssl')) {
print "The OpenSSL extension is not loaded.\n";
exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
}
$remoteFilename = sprintf(
'https://phar.phpunit.de/phpunit%s.phar',
PHPUnit_Runner_Version::getReleaseChannel()
);
$localFilename = realpath($_SERVER['argv'][0]);
$tempFilename = basename($localFilename, '.phar') . '-temp.phar';
// Workaround for https://bugs.php.net/bug.php?id=65538
$caFile = dirname($tempFilename) . '/ca.pem';
copy(__PHPUNIT_PHAR_ROOT__ . '/ca.pem', $caFile);
print 'Updating the PHPUnit PHAR ... ';
$options = array(
'ssl' => array(
'allow_self_signed' => false,
'cafile' => $caFile,
'verify_peer' => true
)
);
if (PHP_VERSION_ID < 50600) {
$options['ssl']['CN_match'] = 'phar.phpunit.de';
$options['ssl']['SNI_server_name'] = 'phar.phpunit.de';
}
file_put_contents(
$tempFilename,
file_get_contents(
$remoteFilename,
false,
stream_context_create($options)
)
);
chmod($tempFilename, 0777 & ~umask());
try {
$phar = new Phar($tempFilename);
unset($phar);
rename($tempFilename, $localFilename);
unlink($caFile);
} catch (Exception $e) {
unlink($caFile);
unlink($tempFilename);
print " done\n\n" . $e->getMessage() . "\n";
exit(2);
}
print " done\n";
exit(0);
}
/**
* Show the help message.
*/
protected function showHelp()
{
$this->printVersionString();
print <<<EOT
Usage: phpunit [options] UnitTest [UnitTest.php]
phpunit [options] <directory>
Code Coverage Options:
--coverage-clover <file> Generate code coverage report in Clover XML format.
--coverage-crap4j <file> Generate code coverage report in Crap4J XML format.
--coverage-html <dir> Generate code coverage report in HTML format.
--coverage-php <file> Export PHP_CodeCoverage object to file.
--coverage-text=<file> Generate code coverage report in text format.
Default: Standard output.
--coverage-xml <dir> Generate code coverage report in PHPUnit XML format.
Logging Options:
--log-junit <file> Log test execution in JUnit XML format to file.
--log-tap <file> Log test execution in TAP format to file.
--log-json <file> Log test execution in JSON format.
--testdox-html <file> Write agile documentation in HTML format to file.
--testdox-text <file> Write agile documentation in Text format to file.
Test Selection Options:
--filter <pattern> Filter which tests to run.
--testsuite <pattern> Filter which testsuite to run.
--group ... Only runs tests from the specified group(s).
--exclude-group ... Exclude tests from the specified group(s).
--list-groups List available test groups.
--test-suffix ... Only search for test in files with specified
suffix(es). Default: Test.php,.phpt
Test Execution Options:
--report-useless-tests Be strict about tests that do not test anything.
--strict-coverage Be strict about unintentionally covered code.
--disallow-test-output Be strict about output during tests.
--enforce-time-limit Enforce time limit based on test size.
--disallow-todo-tests Disallow @todo-annotated tests.
--strict Run tests in strict mode (enables all of the above).
--process-isolation Run each test in a separate PHP process.
--no-globals-backup Do not backup and restore \$GLOBALS for each test.
--static-backup Backup and restore static attributes for each test.
--colors Use colors in output.
--stderr Write to STDERR instead of STDOUT.
--stop-on-error Stop execution upon first error.
--stop-on-failure Stop execution upon first error or failure.
--stop-on-risky Stop execution upon first risky test.
--stop-on-skipped Stop execution upon first skipped test.
--stop-on-incomplete Stop execution upon first incomplete test.
-v|--verbose Output more verbose information.
--debug Display debugging information during test execution.
--loader <loader> TestSuiteLoader implementation to use.
--repeat <times> Runs the test(s) repeatedly.
--tap Report test execution progress in TAP format.
--testdox Report test execution progress in TestDox format.
--printer <printer> TestListener implementation to use.
Configuration Options:
--bootstrap <file> A "bootstrap" PHP file that is run before the tests.
-c|--configuration <file> Read configuration from XML file.
--no-configuration Ignore default configuration file (phpunit.xml).
--include-path <path(s)> Prepend PHP's include_path with given path(s).
-d key[=value] Sets a php.ini value.
Miscellaneous Options:
-h|--help Prints this usage information.
--version Prints the version and exits.
EOT;
if (defined('__PHPUNIT_PHAR__')) {
print "\n --self-update Update PHPUnit to the latest version.\n";
}
}
/**
* Custom callback for test suite discovery.
*/
protected function handleCustomTestSuite()
{
}
private function printVersionString()
{
if ($this->versionStringPrinted) {
return;
}
print PHPUnit_Runner_Version::getVersionString() . "\n\n";
$this->versionStringPrinted = true;
}
/**
*/
private function showError($message)
{
$this->printVersionString();
print $message . "\n";
exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
}
}
| gpl-3.0 |
minichate/dashboard | core/PEAR/Auth/Container/LDAP.php | 29506 | <?php
/* vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 foldmethod=marker: */
/**
* Storage driver for use against an LDAP server
*
* PHP versions 4 and 5
*
* LICENSE: This source file is subject to version 3.01 of the PHP license
* that is available through the world-wide-web at the following URI:
* http://www.php.net/license/3_01.txt. If you did not receive a copy of
* the PHP License and are unable to obtain it through the web, please
* send a note to [email protected] so we can mail you a copy immediately.
*
* @category Authentication
* @package Auth
* @author Jan Wagner <[email protected]>
* @author Adam Ashley <[email protected]>
* @author Hugues Peeters <[email protected]>
* @copyright 2001-2006 The PHP Group
* @license http://www.php.net/license/3_01.txt PHP License 3.01
* @version CVS: $Id: LDAP.php,v 1.43 2007/06/12 03:11:26 aashley Exp $
* @link http://pear.php.net/package/Auth
*/
/**
* Include Auth_Container base class
*/
require_once "Auth/Container.php";
/**
* Include PEAR package for error handling
*/
require_once "PEAR.php";
/**
* Storage driver for fetching login data from LDAP
*
* This class is heavily based on the DB and File containers. By default it
* connects to localhost:389 and searches for uid=$username with the scope
* "sub". If no search base is specified, it will try to determine it via
* the namingContexts attribute. It takes its parameters in a hash, connects
* to the ldap server, binds anonymously, searches for the user, and tries
* to bind as the user with the supplied password. When a group was set, it
* will look for group membership of the authenticated user. If all goes
* well the authentication was successful.
*
* Parameters:
*
* host: localhost (default), ldap.netsols.de or 127.0.0.1
* port: 389 (default) or 636 or whereever your server runs
* url: ldap://localhost:389/
* useful for ldaps://, works only with openldap2 ?
* it will be preferred over host and port
* version: LDAP version to use, ususally 2 (default) or 3,
* must be an integer!
* referrals: If set, determines whether the LDAP library automatically
* follows referrals returned by LDAP servers or not. Possible
* values are true (default) or false.
* binddn: If set, searching for user will be done after binding
* as this user, if not set the bind will be anonymous.
* This is reported to make the container work with MS
* Active Directory, but should work with any server that
* is configured this way.
* This has to be a complete dn for now (basedn and
* userdn will not be appended).
* bindpw: The password to use for binding with binddn
* basedn: the base dn of your server
* userdn: gets prepended to basedn when searching for user
* userscope: Scope for user searching: one, sub (default), or base
* userattr: the user attribute to search for (default: uid)
* userfilter: filter that will be added to the search filter
* this way: (&(userattr=username)(userfilter))
* default: (objectClass=posixAccount)
* attributes: array of additional attributes to fetch from entry.
* these will added to auth data and can be retrieved via
* Auth::getAuthData(). An empty array will fetch all attributes,
* array('') will fetch no attributes at all (default)
* If you add 'dn' as a value to this array, the users DN that was
* used for binding will be added to auth data as well.
* attrformat: The returned format of the additional data defined in the
* 'attributes' option. Two formats are available.
* LDAP returns data formatted in a
* multidimensional array where each array starts with a
* 'count' element providing the number of attributes in the
* entry, or the number of values for attributes. When set
* to this format, the only way to retrieve data from the
* Auth object is by calling getAuthData('attributes').
* AUTH returns data formatted in a
* structure more compliant with other Auth Containers,
* where each attribute element can be directly called by
* getAuthData() method from Auth.
* For compatibily with previous LDAP container versions,
* the default format is LDAP.
* groupdn: gets prepended to basedn when searching for group
* groupattr: the group attribute to search for (default: cn)
* groupfilter: filter that will be added to the search filter when
* searching for a group:
* (&(groupattr=group)(memberattr=username)(groupfilter))
* default: (objectClass=groupOfUniqueNames)
* memberattr : the attribute of the group object where the user dn
* may be found (default: uniqueMember)
* memberisdn: whether the memberattr is the dn of the user (default)
* or the value of userattr (usually uid)
* group: the name of group to search for
* groupscope: Scope for group searching: one, sub (default), or base
* start_tls: enable/disable the use of START_TLS encrypted connection
* (default: false)
* debug: Enable/Disable debugging output (default: false)
* try_all: Whether to try all user accounts returned from the search
* or just the first one. (default: false)
*
* To use this storage container, you have to use the following syntax:
*
* <?php
* ...
*
* $a1 = new Auth("LDAP", array(
* 'host' => 'localhost',
* 'port' => '389',
* 'version' => 3,
* 'basedn' => 'o=netsols,c=de',
* 'userattr' => 'uid'
* 'binddn' => 'cn=admin,o=netsols,c=de',
* 'bindpw' => 'password'));
*
* $a2 = new Auth('LDAP', array(
* 'url' => 'ldaps://ldap.netsols.de',
* 'basedn' => 'o=netsols,c=de',
* 'userscope' => 'one',
* 'userdn' => 'ou=People',
* 'groupdn' => 'ou=Groups',
* 'groupfilter' => '(objectClass=posixGroup)',
* 'memberattr' => 'memberUid',
* 'memberisdn' => false,
* 'group' => 'admin'
* ));
*
* $a3 = new Auth('LDAP', array(
* 'host' => 'ldap.netsols.de',
* 'port' => 389,
* 'version' => 3,
* 'referrals' => false,
* 'basedn' => 'dc=netsols,dc=de',
* 'binddn' => 'cn=Jan Wagner,cn=Users,dc=netsols,dc=de',
* 'bindpw' => 'password',
* 'userattr' => 'samAccountName',
* 'userfilter' => '(objectClass=user)',
* 'attributes' => array(''),
* 'group' => 'testing',
* 'groupattr' => 'samAccountName',
* 'groupfilter' => '(objectClass=group)',
* 'memberattr' => 'member',
* 'memberisdn' => true,
* 'groupdn' => 'cn=Users',
* 'groupscope' => 'one',
* 'debug' => true);
*
* The parameter values have to correspond
* to the ones for your LDAP server of course.
*
* When talking to a Microsoft ActiveDirectory server you have to
* use 'samaccountname' as the 'userattr' and follow special rules
* to translate the ActiveDirectory directory names into 'basedn'.
* The 'basedn' for the default 'Users' folder on an ActiveDirectory
* server for the ActiveDirectory Domain (which is not related to
* its DNS name) "win2000.example.org" would be:
* "CN=Users, DC=win2000, DC=example, DC=org'
* where every component of the domain name becomes a DC attribute
* of its own. If you want to use a custom users folder you have to
* replace "CN=Users" with a sequence of "OU" attributes that specify
* the path to your custom folder in reverse order.
* So the ActiveDirectory folder
* "win2000.example.org\Custom\Accounts"
* would become
* "OU=Accounts, OU=Custom, DC=win2000, DC=example, DC=org'
*
* It seems that binding anonymously to an Active Directory
* is not allowed, so you have to set binddn and bindpw for
* user searching.
*
* LDAP Referrals need to be set to false for AD to work sometimes.
*
* Example a3 shows a full blown and tested example for connection to
* Windows 2000 Active Directory with group mebership checking
*
* Note also that if you want an encrypted connection to an MS LDAP
* server, then, on your webserver, you must specify
* TLS_REQCERT never
* in /etc/ldap/ldap.conf or in the webserver user's ~/.ldaprc (which
* may or may not be read depending on your configuration).
*
*
* @category Authentication
* @package Auth
* @author Jan Wagner <[email protected]>
* @author Adam Ashley <[email protected]>
* @author Hugues Peeters <[email protected]>
* @copyright 2001-2006 The PHP Group
* @license http://www.php.net/license/3_01.txt PHP License 3.01
* @version Release: 1.6.1 File: $Revision: 1.43 $
* @link http://pear.php.net/package/Auth
*/
class Auth_Container_LDAP extends Auth_Container
{
// {{{ properties
/**
* Options for the class
* @var array
*/
var $options = array();
/**
* Connection ID of LDAP Link
* @var string
*/
var $conn_id = false;
// }}}
// {{{ Auth_Container_LDAP() [constructor]
/**
* Constructor of the container class
*
* @param $params, associative hash with host,port,basedn and userattr key
* @return object Returns an error object if something went wrong
*/
function Auth_Container_LDAP($params)
{
if (false === extension_loaded('ldap')) {
return PEAR::raiseError('Auth_Container_LDAP: LDAP Extension not loaded',
41, PEAR_ERROR_DIE);
}
$this->_setDefaults();
if (is_array($params)) {
$this->_parseOptions($params);
}
}
// }}}
// {{{ _prepare()
/**
* Prepare LDAP connection
*
* This function checks if we have already opened a connection to
* the LDAP server. If that's not the case, a new connection is opened.
*
* @access private
* @return mixed True or a PEAR error object.
*/
function _prepare()
{
if (!$this->_isValidLink()) {
$res = $this->_connect();
if (PEAR::isError($res)) {
return $res;
}
}
return true;
}
// }}}
// {{{ _connect()
/**
* Connect to the LDAP server using the global options
*
* @access private
* @return object Returns a PEAR error object if an error occurs.
*/
function _connect()
{
$this->log('Auth_Container_LDAP::_connect() called.', AUTH_LOG_DEBUG);
// connect
if (isset($this->options['url']) && $this->options['url'] != '') {
$this->log('Connecting with URL', AUTH_LOG_DEBUG);
$conn_params = array($this->options['url']);
} else {
$this->log('Connecting with host:port', AUTH_LOG_DEBUG);
$conn_params = array($this->options['host'], $this->options['port']);
}
if (($this->conn_id = @call_user_func_array('ldap_connect', $conn_params)) === false) {
$this->log('Connection to server failed.', AUTH_LOG_DEBUG);
$this->log('LDAP ERROR: '.ldap_errno($this->conn_id).': '.ldap_error($this->conn_id), AUTH_LOG_DEBUG);
return PEAR::raiseError('Auth_Container_LDAP: Could not connect to server.', 41);
}
$this->log('Successfully connected to server', AUTH_LOG_DEBUG);
// switch LDAP version
if (is_numeric($this->options['version']) && $this->options['version'] > 2) {
$this->log("Switching to LDAP version {$this->options['version']}", AUTH_LOG_DEBUG);
@ldap_set_option($this->conn_id, LDAP_OPT_PROTOCOL_VERSION, $this->options['version']);
// start TLS if available
if (isset($this->options['start_tls']) && $this->options['start_tls']) {
$this->log("Starting TLS session", AUTH_LOG_DEBUG);
if (@ldap_start_tls($this->conn_id) === false) {
$this->log('Could not start TLS session', AUTH_LOG_DEBUG);
$this->log('LDAP ERROR: '.ldap_errno($this->conn_id).': '.ldap_error($this->conn_id), AUTH_LOG_DEBUG);
return PEAR::raiseError('Auth_Container_LDAP: Could not start tls.', 41);
}
}
}
// switch LDAP referrals
if (is_bool($this->options['referrals'])) {
$this->log("Switching LDAP referrals to " . (($this->options['referrals']) ? 'true' : 'false'), AUTH_LOG_DEBUG);
if (@ldap_set_option($this->conn_id, LDAP_OPT_REFERRALS, $this->options['referrals']) === false) {
$this->log('Could not change LDAP referrals options', AUTH_LOG_DEBUG);
$this->log('LDAP ERROR: '.ldap_errno($this->conn_id).': '.ldap_error($this->conn_id), AUTH_LOG_DEBUG);
}
}
// bind with credentials or anonymously
if (strlen($this->options['binddn']) && strlen($this->options['bindpw'])) {
$this->log('Binding with credentials', AUTH_LOG_DEBUG);
$bind_params = array($this->conn_id, $this->options['binddn'], $this->options['bindpw']);
} else {
$this->log('Binding anonymously', AUTH_LOG_DEBUG);
$bind_params = array($this->conn_id);
}
// bind for searching
if ((@call_user_func_array('ldap_bind', $bind_params)) === false) {
$this->log('Bind failed', AUTH_LOG_DEBUG);
$this->log('LDAP ERROR: '.ldap_errno($this->conn_id).': '.ldap_error($this->conn_id), AUTH_LOG_DEBUG);
$this->_disconnect();
return PEAR::raiseError("Auth_Container_LDAP: Could not bind to LDAP server.", 41);
}
$this->log('Binding was successful', AUTH_LOG_DEBUG);
return true;
}
// }}}
// {{{ _disconnect()
/**
* Disconnects (unbinds) from ldap server
*
* @access private
*/
function _disconnect()
{
$this->log('Auth_Container_LDAP::_disconnect() called.', AUTH_LOG_DEBUG);
if ($this->_isValidLink()) {
$this->log('disconnecting from server');
@ldap_unbind($this->conn_id);
}
}
// }}}
// {{{ _getBaseDN()
/**
* Tries to find Basedn via namingContext Attribute
*
* @access private
*/
function _getBaseDN()
{
$this->log('Auth_Container_LDAP::_getBaseDN() called.', AUTH_LOG_DEBUG);
$err = $this->_prepare();
if ($err !== true) {
return PEAR::raiseError($err->getMessage(), $err->getCode());
}
if ($this->options['basedn'] == "" && $this->_isValidLink()) {
$this->log("basedn not set, searching via namingContexts.", AUTH_LOG_DEBUG);
$result_id = @ldap_read($this->conn_id, "", "(objectclass=*)", array("namingContexts"));
if (@ldap_count_entries($this->conn_id, $result_id) == 1) {
$this->log("got result for namingContexts", AUTH_LOG_DEBUG);
$entry_id = @ldap_first_entry($this->conn_id, $result_id);
$attrs = @ldap_get_attributes($this->conn_id, $entry_id);
$basedn = $attrs['namingContexts'][0];
if ($basedn != "") {
$this->log("result for namingContexts was $basedn", AUTH_LOG_DEBUG);
$this->options['basedn'] = $basedn;
}
}
@ldap_free_result($result_id);
}
// if base ist still not set, raise error
if ($this->options['basedn'] == "") {
return PEAR::raiseError("Auth_Container_LDAP: LDAP search base not specified!", 41);
}
return true;
}
// }}}
// {{{ _isValidLink()
/**
* determines whether there is a valid ldap conenction or not
*
* @accessd private
* @return boolean
*/
function _isValidLink()
{
if (is_resource($this->conn_id)) {
if (get_resource_type($this->conn_id) == 'ldap link') {
return true;
}
}
return false;
}
// }}}
// {{{ _setDefaults()
/**
* Set some default options
*
* @access private
*/
function _setDefaults()
{
$this->options['url'] = '';
$this->options['host'] = 'localhost';
$this->options['port'] = '389';
$this->options['version'] = 2;
$this->options['referrals'] = true;
$this->options['binddn'] = '';
$this->options['bindpw'] = '';
$this->options['basedn'] = '';
$this->options['userdn'] = '';
$this->options['userscope'] = 'sub';
$this->options['userattr'] = 'uid';
$this->options['userfilter'] = '(objectClass=posixAccount)';
$this->options['attributes'] = array(''); // no attributes
$this->options['attrformat'] = 'AUTH'; // returns attribute like other Auth containers
$this->options['group'] = '';
$this->options['groupdn'] = '';
$this->options['groupscope'] = 'sub';
$this->options['groupattr'] = 'cn';
$this->options['groupfilter'] = '(objectClass=groupOfUniqueNames)';
$this->options['memberattr'] = 'uniqueMember';
$this->options['memberisdn'] = true;
$this->options['start_tls'] = false;
$this->options['debug'] = false;
$this->options['try_all'] = false; // Try all user ids returned not just the first one
}
// }}}
// {{{ _parseOptions()
/**
* Parse options passed to the container class
*
* @access private
* @param array
*/
function _parseOptions($array)
{
$array = $this->_setV12OptionsToV13($array);
foreach ($array as $key => $value) {
if (array_key_exists($key, $this->options)) {
if ($key == 'attributes') {
if (is_array($value)) {
$this->options[$key] = $value;
} else {
$this->options[$key] = explode(',', $value);
}
} else {
$this->options[$key] = $value;
}
}
}
}
// }}}
// {{{ _setV12OptionsToV13()
/**
* Adapt deprecated options from Auth 1.2 LDAP to Auth 1.3 LDAP
*
* @author Hugues Peeters <[email protected]>
* @access private
* @param array
* @return array
*/
function _setV12OptionsToV13($array)
{
if (isset($array['useroc']))
$array['userfilter'] = "(objectClass=".$array['useroc'].")";
if (isset($array['groupoc']))
$array['groupfilter'] = "(objectClass=".$array['groupoc'].")";
if (isset($array['scope']))
$array['userscope'] = $array['scope'];
return $array;
}
// }}}
// {{{ _scope2function()
/**
* Get search function for scope
*
* @param string scope
* @return string ldap search function
*/
function _scope2function($scope)
{
switch($scope) {
case 'one':
$function = 'ldap_list';
break;
case 'base':
$function = 'ldap_read';
break;
default:
$function = 'ldap_search';
break;
}
return $function;
}
// }}}
// {{{ fetchData()
/**
* Fetch data from LDAP server
*
* Searches the LDAP server for the given username/password
* combination. Escapes all LDAP meta characters in username
* before performing the query.
*
* @param string Username
* @param string Password
* @return boolean
*/
function fetchData($username, $password)
{
$this->log('Auth_Container_LDAP::fetchData() called.', AUTH_LOG_DEBUG);
$err = $this->_prepare();
if ($err !== true) {
return PEAR::raiseError($err->getMessage(), $err->getCode());
}
$err = $this->_getBaseDN();
if ($err !== true) {
return PEAR::raiseError($err->getMessage(), $err->getCode());
}
// UTF8 Encode username for LDAPv3
if (@ldap_get_option($this->conn_id, LDAP_OPT_PROTOCOL_VERSION, $ver) && $ver == 3) {
$this->log('UTF8 encoding username for LDAPv3', AUTH_LOG_DEBUG);
$username = utf8_encode($username);
}
// make search filter
$filter = sprintf('(&(%s=%s)%s)',
$this->options['userattr'],
$this->_quoteFilterString($username),
$this->options['userfilter']);
// make search base dn
$search_basedn = $this->options['userdn'];
if ($search_basedn != '' && substr($search_basedn, -1) != ',') {
$search_basedn .= ',';
}
$search_basedn .= $this->options['basedn'];
// attributes
$searchAttributes = $this->options['attributes'];
// make functions params array
$func_params = array($this->conn_id, $search_basedn, $filter, $searchAttributes);
// search function to use
$func_name = $this->_scope2function($this->options['userscope']);
$this->log("Searching with $func_name and filter $filter in $search_basedn", AUTH_LOG_DEBUG);
// search
if (($result_id = @call_user_func_array($func_name, $func_params)) === false) {
$this->log('User not found', AUTH_LOG_DEBUG);
} elseif (@ldap_count_entries($this->conn_id, $result_id) >= 1) { // did we get some possible results?
$this->log('User(s) found', AUTH_LOG_DEBUG);
$first = true;
$entry_id = null;
do {
// then get the user dn
if ($first) {
$entry_id = @ldap_first_entry($this->conn_id, $result_id);
$first = false;
} else {
$entry_id = @ldap_next_entry($this->conn_id, $entry_id);
if ($entry_id === false)
break;
}
$user_dn = @ldap_get_dn($this->conn_id, $entry_id);
// as the dn is not fetched as an attribute, we save it anyway
if (is_array($searchAttributes) && in_array('dn', $searchAttributes)) {
$this->log('Saving DN to AuthData', AUTH_LOG_DEBUG);
$this->_auth_obj->setAuthData('dn', $user_dn);
}
// fetch attributes
if ($attributes = @ldap_get_attributes($this->conn_id, $entry_id)) {
if (is_array($attributes) && isset($attributes['count']) &&
$attributes['count'] > 0) {
// ldap_get_attributes() returns a specific multi dimensional array
// format containing all the attributes and where each array starts
// with a 'count' element providing the number of attributes in the
// entry, or the number of values for attribute. For compatibility
// reasons, it remains the default format returned by LDAP container
// setAuthData().
// The code below optionally returns attributes in another format,
// more compliant with other Auth containers, where each attribute
// element are directly set in the 'authData' list. This option is
// enabled by setting 'attrformat' to
// 'AUTH' in the 'options' array.
// eg. $this->options['attrformat'] = 'AUTH'
if ( strtoupper($this->options['attrformat']) == 'AUTH' ) {
$this->log('Saving attributes to Auth data in AUTH format', AUTH_LOG_DEBUG);
unset ($attributes['count']);
foreach ($attributes as $attributeName => $attributeValue ) {
if (is_int($attributeName)) continue;
if (is_array($attributeValue) && isset($attributeValue['count'])) {
unset ($attributeValue['count']);
}
if (count($attributeValue)<=1) $attributeValue = $attributeValue[0];
$this->log('Storing additional field: '.$attributeName, AUTH_LOG_DEBUG);
$this->_auth_obj->setAuthData($attributeName, $attributeValue);
}
}
else
{
$this->log('Saving attributes to Auth data in LDAP format', AUTH_LOG_DEBUG);
$this->_auth_obj->setAuthData('attributes', $attributes);
}
}
}
@ldap_free_result($result_id);
// need to catch an empty password as openldap seems to return TRUE
// if anonymous binding is allowed
if ($password != "") {
$this->log("Bind as $user_dn", AUTH_LOG_DEBUG);
// try binding as this user with the supplied password
if (@ldap_bind($this->conn_id, $user_dn, $password)) {
$this->log('Bind successful', AUTH_LOG_DEBUG);
// check group if appropiate
if (strlen($this->options['group'])) {
// decide whether memberattr value is a dn or the username
$this->log('Checking group membership', AUTH_LOG_DEBUG);
$return = $this->checkGroup(($this->options['memberisdn']) ? $user_dn : $username);
$this->_disconnect();
return $return;
} else {
$this->log('Authenticated', AUTH_LOG_DEBUG);
$this->_disconnect();
return true; // user authenticated
} // checkGroup
} // bind
} // non-empty password
} while ($this->options['try_all'] == true); // interate through entries
} // get results
// default
$this->log('NOT authenticated!', AUTH_LOG_DEBUG);
$this->_disconnect();
return false;
}
// }}}
// {{{ checkGroup()
/**
* Validate group membership
*
* Searches the LDAP server for group membership of the
* supplied username. Quotes all LDAP filter meta characters in
* the user name before querying the LDAP server.
*
* @param string Distinguished Name of the authenticated User
* @return boolean
*/
function checkGroup($user)
{
$this->log('Auth_Container_LDAP::checkGroup() called.', AUTH_LOG_DEBUG);
$err = $this->_prepare();
if ($err !== true) {
return PEAR::raiseError($err->getMessage(), $err->getCode());
}
// make filter
$filter = sprintf('(&(%s=%s)(%s=%s)%s)',
$this->options['groupattr'],
$this->options['group'],
$this->options['memberattr'],
$this->_quoteFilterString($user),
$this->options['groupfilter']);
// make search base dn
$search_basedn = $this->options['groupdn'];
if ($search_basedn != '' && substr($search_basedn, -1) != ',') {
$search_basedn .= ',';
}
$search_basedn .= $this->options['basedn'];
$func_params = array($this->conn_id, $search_basedn, $filter,
array($this->options['memberattr']));
$func_name = $this->_scope2function($this->options['groupscope']);
$this->log("Searching with $func_name and filter $filter in $search_basedn", AUTH_LOG_DEBUG);
// search
if (($result_id = @call_user_func_array($func_name, $func_params)) != false) {
if (@ldap_count_entries($this->conn_id, $result_id) == 1) {
@ldap_free_result($result_id);
$this->log('User is member of group', AUTH_LOG_DEBUG);
return true;
}
}
// default
$this->log('User is NOT member of group', AUTH_LOG_DEBUG);
return false;
}
// }}}
// {{{ _quoteFilterString()
/**
* Escapes LDAP filter special characters as defined in RFC 2254.
*
* @access private
* @param string Filter String
*/
function _quoteFilterString($filter_str)
{
$metas = array( '\\', '*', '(', ')', "\x00");
$quoted_metas = array('\\\\', '\*', '\(', '\)', "\\\x00");
return str_replace($metas, $quoted_metas, $filter_str);
}
// }}}
}
?>
| gpl-3.0 |
Kittychanley/TFCraft | src/Common/com/bioxx/tfc/WorldGen/GenLayers/Biome/GenLayerIslandTFC.java | 932 | package com.bioxx.tfc.WorldGen.GenLayers.Biome;
import net.minecraft.world.gen.layer.IntCache;
import com.bioxx.tfc.WorldGen.GenLayers.GenLayerTFC;
public class GenLayerIslandTFC extends GenLayerTFC
{
public GenLayerIslandTFC(long par1)
{
super(par1);
}
/**
* Returns a list of integer values generated by this layer. These may be interpreted as temperatures, rainfall
* amounts, or biomeList[] indices based on the particular GenLayer subclass.
*/
@Override
public int[] getInts(int par1, int par2, int maxX, int maxZ)
{
int[] var5 = IntCache.getIntCache(maxX * maxZ);
for (int z = 0; z < maxZ; ++z)
{
for (int x = 0; x < maxX; ++x)
{
this.initChunkSeed(par1 + x, par2 + z);
var5[x + z * maxX] = this.nextInt(4) == 0 ? 1 : 0;
}
}
if (par1 > -maxX && par1 <= 0 && par2 > -maxZ && par2 <= 0)
var5[-par1 + -par2 * maxX] = 1;
return var5;
}
}
| gpl-3.0 |
FlixMaster/mwEmbed | modules/KalturaSupport/RequestHelper.php | 8819 | <?php
class RequestHelper {
var $ks = null;
var $noCache = false;
var $debug = false;
var $utility = null;
/**
* Variables set by the Frame request:
*/
public $urlParameters = array(
'cache_st' => null,
'p' => null,
'partner_id' => null,
'wid' => null,
'uiconf_id' => null,
'entry_id' => null,
'flashvars' => null,
'playlist_id' => null,
'urid' => null,
// Custom service url properties ( only used when wgKalturaAllowIframeRemoteService is set to true )
'ServiceUrl'=> null,
'ServiceBase'=>null,
'CdnUrl'=> null,
'UseManifestUrls' => null,
'ks' => null,
'debug' => null,
// for thumbnails
'width' => null,
'height'=> null,
'playerId' => null,
'vid_sec' => null,
'vid_slices' => null,
'inlineScript' => null
);
function __construct( $utility ){
if(!$utility)
throw new Exception("Error missing utility object");
$this->utility = $utility;
//parse input:
$this->parseRequest();
// Set KS if available in URL parameter or flashvar
$this->setKSIfExists();
}
// Parse the embedFrame request and sanitize input
private function parseRequest(){
global $wgEnableScriptDebug, $wgKalturaUseAppleAdaptive,
$wgKalturaPartnerDisableAppleAdaptive;
// Support /key/value path request:
if( isset( $_SERVER['PATH_INFO'] ) ){
$urlParts = explode( '/', $_SERVER['PATH_INFO'] );
foreach( $urlParts as $inx => $urlPart ){
foreach( $this->urlParameters as $attributeKey => $na){
if( $urlPart == $attributeKey && isset( $urlParts[$inx+1] ) ){
$_REQUEST[ $attributeKey ] = $urlParts[$inx+1];
}
}
}
}
// TODO refactor this parameter sanitation
foreach( $this->urlParameters as $attributeKey => $na){
if( isset( $_REQUEST[ $attributeKey ] ) ){
// set the url parameter and don't let any html in:
$this->urlParameters[ $attributeKey ] = $_REQUEST[ $attributeKey ];
}
}
// support CORS for IE9 and lower
global $HTTP_RAW_POST_DATA;
if ( count($_POST) == 0 && count( $HTTP_RAW_POST_DATA) > 0 ){
parse_str($HTTP_RAW_POST_DATA, (
html_entity_decode(
preg_replace("/%u([0-9a-f]{3,4})/i", "&#x\\1;",
urldecode($HTTP_RAW_POST_DATA)
),
null,
'UTF-8')
));
foreach( $data as $k => $v){
$this->urlParameters[ $k ] = $v;
}
}
// string to boolean
foreach( $this->urlParameters as $k=>$v){
if( $v == 'false'){
$this->urlParameters[$k] = false;
}
if( $v == 'true' ){
$this->urlParameters[$k] = true;
}
}
if( isset( $this->urlParameters['p'] ) && !isset( $this->urlParameters['wid'] ) ){
$this->urlParameters['wid'] = '_' . $this->urlParameters['p'];
}
if( isset( $this->urlParameters['partner_id'] ) && !isset( $this->urlParameters['wid'] ) ){
$this->urlParameters['wid'] = '_' . $this->urlParameters['partner_id'];
}
// Check for debug flag
if( isset( $_REQUEST['debug'] ) ){
$this->debug = true;
$wgEnableScriptDebug = true;
}
// Check for no cache flag
if( isset( $_REQUEST['nocache'] ) && $_REQUEST['nocache'] == 'true' ) {
$this->noCache = true;
}
// Check for required config
if( $this->urlParameters['wid'] == null ){
//throw new Exception( 'Can not display player, missing widget id' );
}
}
function get( $name = null ) {
if( $name && isset( $this->urlParameters[ $name ] ) ) {
return $this->urlParameters[ $name ];
}
return null;
}
function set( $key = null, $val = null ) {
if( $key && $val ) {
$this->urlParameters[ $key ] = $val;
return true;
}
return false;
}
function getServiceConfig( $name ){
global $wgKalturaAllowIframeRemoteService;
// Check if we allow URL override:
if(( $wgKalturaAllowIframeRemoteService == true ) || $this->isEmbedServicesEnabled()){
// Check for urlParameters
if( $this->get( $name ) ){
return $this->get( $name );
}
}
// Else use the global config:
switch( $name ){
case 'ServiceUrl' :
global $wgKalturaServiceUrl;
return $wgKalturaServiceUrl;
break;
case 'ServiceBase':
global $wgKalturaServiceBase;
return $wgKalturaServiceBase;
break;
case 'CdnUrl':
global $wgKalturaCDNUrl;
return $wgKalturaCDNUrl;
break;
case 'UseManifestUrls':
global $wgKalturaUseManifestUrls;
return $wgKalturaUseManifestUrls;
break;
}
}
function isEmbedServicesEnabled(){
global $wgEnableKalturaEmbedServicesRouting, $wgKalturaAuthEmbedServicesDomains;
if ($wgEnableKalturaEmbedServicesRouting){
return true;
} else {
return false;
}
}
function isEmbedServicesRequest(){
$proxyData = $this->getFlashVars("proxyData");
return (isset($proxyData) && !empty($proxyData));
}
function getEmbedServicesRequest(){
return $this->getFlashVars("proxyData");
}
public function getUserAgent() {
return isset($_SERVER['HTTP_USER_AGENT']) ? $_SERVER['HTTP_USER_AGENT'] : '';
}
public function getReferer(){
global $wgKalturaForceReferer;
if( $wgKalturaForceReferer !== false ){
return $wgKalturaForceReferer;
}
if( isset( $_SERVER['HTTP_REFERER'] ) ){
$urlParts = parse_url( $_SERVER['HTTP_REFERER'] );
if (isset( $urlParts['scheme'] ) && isset( $urlParts['host']) ) {
return $urlParts['scheme'] . "://" . $urlParts['host'] . "/";
}
}
return 'http://www.kaltura.com/';
}
// Check if private IP
private function isIpPrivate($ip){
$privateRanges = array(
'10.0.0.0|10.255.255.255',
'172.16.0.0|172.31.255.255',
'192.168.0.0|192.168.255.255',
'169.254.0.0|169.254.255.255',
'127.0.0.0|127.255.255.255',
);
$longIp = ip2long($ip);
if ($longIp && $longIp != -1)
{
foreach ($privateRanges as $range)
{
list($start, $end) = explode('|', $range);
if ($longIp >= ip2long($start) && $longIp <= ip2long($end)) {
return true;
}
}
}
return false;
}
// Get the first real IP
private function getRealIP( $headerIPs ){
$remote_addr = null;
$headerIPs = trim( $headerIPs, ',' );
$headerIPs = explode(',', $headerIPs);
foreach( $headerIPs as $ip ) {
// ignore any string after the ip address
preg_match('/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/', trim($ip), $matches);
if (!isset($matches[0]))
continue;
$tempAddr = trim($matches[0]);
if ($this->isIpPrivate($tempAddr)) // verify that ip is not from a private range
continue;
$remote_addr = $tempAddr;
break;
}
return $remote_addr;
}
public function getRemoteAddrHeader(){
global $wgKalturaRemoteAddressSalt, $wgKalturaForceIP;
if( $wgKalturaRemoteAddressSalt === false ){
return '';
}
$ip = null;
// Check for x-forward-for and x-real-ip headers
$requestHeaders = getallheaders();
if( isset( $requestHeaders['X-Forwarded-For'] ) ){
$ip = $this->getRealIP( $requestHeaders['X-Forwarded-For'] );
}
// Check for x-real-ip
if( !$ip && isset( $requestHeaders['X-Real-IP'] ) ){
// also trim any white space
list( $ip ) = explode( ',', $requestHeaders['X-Real-IP'] );
}
if( !$ip ){
$ip = $_SERVER['REMOTE_ADDR'];
}
if( $wgKalturaForceIP ){
$ip = $wgKalturaForceIP;
}
// make sure there is no white space
$ip = trim( $ip );
$s = $ip . "," . time() . "," . microtime( true );
return "X-KALTURA-REMOTE-ADDR: " . $s . ',' . md5( $s . "," . $wgKalturaRemoteAddressSalt );
}
public function getCacheSt(){
return ( $this->get('cache_st') ) ? $this->get('cache_st') : '';
}
public function getUiConfId(){
return $this->get('uiconf_id');
}
public function getWidgetId() {
return $this->get('wid');
}
public function getEntryId(){
return $this->get('entry_id');
}
public function getReferenceId() {
if ( $this->getFlashVars('referenceId') ) {
return $this->getFlashVars('referenceId');
}
return false;
}
/**
* getFlashVars
* returns flashVars from the request
* If no key passed, return the entire flashVars array
*/
public function getFlashVars( $key = null, $default = null ) {
if( $this->get('flashvars') ) {
$flashVars = $this->get('flashvars');
if( ! is_null( $key ) ) {
if(is_array($flashVars) && isset($flashVars[$key]) ) {
return $this->utility->formatString($flashVars[$key]);
} else {
return $default;
}
}
return is_array($flashVars) ? $flashVars : array();
}
return (!is_null($key)) ? $default : array();
}
private function setKSIfExists() {
$ks = null;
if( $this->getFlashVars('ks') ) {
$ks = $this->getFlashVars('ks');
} else if( $this->get('ks') ) {
$ks = $this->get('ks');
}
// check for empty ks
if( $ks && trim($ks) != '' ){
$this->ks = $ks;
}
}
public function hasKS() {
global $wgForceCache;
return $wgForceCache ? false : isset($this->ks);
}
public function getKS() {
return $this->ks;
}
} | agpl-3.0 |
acbodine/koding | go/src/koding/klientctl/endpoint/team/team.go | 2178 | package team
import (
"errors"
"sync"
"koding/kites/kloud/stack"
"koding/kites/kloud/team"
"koding/klientctl/ctlcli"
"koding/klientctl/endpoint/kloud"
)
var DefaultClient = &Client{}
func init() {
ctlcli.CloseOnExit(DefaultClient)
}
// ListOptions are options available for `team list` command.
type ListOptions struct {
Slug string // Limit to a specific team with a given name.
}
type Team struct {
Name string `json:"name"`
}
func (t *Team) Valid() error {
if t.Name == "" {
return errors.New("invalid empty team name")
}
return nil
}
type Client struct {
Kloud *kloud.Client
once sync.Once // for c.init()
used Team
}
func (c *Client) Use(team *Team) {
c.init()
c.used = *team
}
func (c *Client) Used() *Team {
c.init()
return &c.used
}
// List returns the list of user's teams.
func (c *Client) List(opts *ListOptions) ([]*team.Team, error) {
c.init()
req := &stack.TeamListRequest{}
if opts != nil {
req.Slug = opts.Slug
}
resp := stack.TeamListResponse{}
if err := c.kloud().Call("team.list", req, &resp); err != nil {
return nil, err
}
return resp.Teams, nil
}
func (c *Client) Whoami() (*stack.WhoamiResponse, error) {
c.init()
var resp stack.WhoamiResponse
if err := c.kloud().Call("team.whoami", nil, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Close() (err error) {
if c.used.Valid() == nil {
err = c.kloud().Cache().ReadWrite().SetValue("team.used", &c.used)
}
return err
}
func (c *Client) init() {
c.once.Do(c.readCache)
}
func (c *Client) readCache() {
// Ignoring read error, if it's non-nil then empty cache is going to
// be used instead.
_ = c.kloud().Cache().ReadOnly().GetValue("team.used", &c.used)
}
func (c *Client) kloud() *kloud.Client {
if c.Kloud != nil {
return c.Kloud
}
return kloud.DefaultClient
}
func Use(team *Team) { DefaultClient.Use(team) }
func Used() *Team { return DefaultClient.Used() }
func List(opts *ListOptions) ([]*team.Team, error) { return DefaultClient.List(opts) }
func Whoami() (*stack.WhoamiResponse, error) { return DefaultClient.Whoami() }
| agpl-3.0 |
murugamsm/webmail-lite | libraries/afterlogic/common/managers/users/storages/nodb/storage.php | 3519 | <?php
/*
* Copyright 2004-2015, AfterLogic Corp.
* Licensed under AGPLv3 license or AfterLogic license
* if commercial version of the product was purchased.
* See the LICENSE file for a full license statement.
*/
/**
* @internal
*
* @package Users
* @subpackage Storages
*/
class CApiUsersNodbStorage extends CApiUsersStorage
{
const SESS_ACCOUNT_STORAGE = 'sess-acct-storage';
const SESS_CAL_USERS_STORAGE = 'sess-cal-user-storage';
/**
* @param CApiGlobalManager &$oManager
*/
public function __construct(CApiGlobalManager &$oManager)
{
parent::__construct('nodb', $oManager);
CSession::$sSessionName = API_SESSION_WEBMAIL_NAME;
}
/**
* Retrieves information on WebMail Pro account. Account ID is used for look up.
*
* @param int $iAccountId Account identifier.
*
* @return CAccount
*/
public function getAccountById($iAccountId)
{
$oAccount = CSession::get(CApiUsersNodbStorage::SESS_ACCOUNT_STORAGE, null);
return ($oAccount && $iAccountId === $oAccount->IdAccount) ? clone $oAccount : null;
}
/**
* Creates WebMail account.
*
* @param CAccount &$oAccount Object instance with prepopulated account properties.
*
* @return bool
*/
public function createAccount(CAccount &$oAccount)
{
$oAccount->IdAccount = 1;
$oAccount->IdUser = 1;
$oAccount->User->IdUser = 1;
$oAccount->PreviousMailPassword = '';
$oAccount->FlushObsolete('PreviousMailPassword');
CSession::Set(CApiUsersNodbStorage::SESS_ACCOUNT_STORAGE, $oAccount);
return true;
}
/**
* Retrieves list of accounts for given WebMail Pro user.
*
* @param int $iUserId User identifier.
*
* @return array | false
*/
public function getAccountIdList($iUserId)
{
$oAccount = CSession::get(CApiUsersNodbStorage::SESS_ACCOUNT_STORAGE, null);
return ($oAccount && $iUserId === $oAccount->IdUser) ? array($iUserId) : false;
}
/**
* Checks if particular account exists.
*
* @param CAccount $oAccount Object instance with prepopulated account properties.
*
* @return bool
*/
public function accountExists(CAccount $oAccount)
{
return false;
}
/**
* Saves changes made to the account.
*
* @param CAccount &$oAccount Account object containing data to be saved.
*
* @return bool
*/
public function updateAccount(CAccount $oAccount)
{
$oAccount->PreviousMailPassword = '';
$oAccount->FlushObsolete('PreviousMailPassword');
CSession::Set(CApiUsersNodbStorage::SESS_ACCOUNT_STORAGE, $oAccount);
return true;
}
/**
* Creates calendar user in storage.
*
* @param CCalUser &$oCalUser CCalUser object.
*
* @return bool
*/
public function createCalUser(CCalUser &$oCalUser)
{
$oCalUser->IdCalUser = 1;
$oCalUser->IdUser = 1;
CSession::Set(CApiUsersNodbStorage::SESS_CAL_USERS_STORAGE, $oCalUser);
return true;
}
/**
* Obtains CCalUser object that contains calendar settings for specified user. User identifier is used for look up.
*
* @param int $iUserId User identifier.
*
* @return CCalUser
*/
public function getCalUser($iUserId)
{
$oCalUser = CSession::get(CApiUsersNodbStorage::SESS_ACCOUNT_STORAGE, null);
return ($oCalUser && $iUserId === $oCalUser->IdUser) ? clone $oCalUser : null;
}
/**
* Updates calendar user settings.
*
* @param CCalUser $oCalUser CCalUser object.
*
* @return bool
*/
public function updateCalUser(CCalUser $oCalUser)
{
CSession::Set(CApiUsersNodbStorage::SESS_CAL_USERS_STORAGE, $oCalUser);
return true;
}
}
| agpl-3.0 |
sysraj86/carnivalcrm | custom/include/PHPExcel/Shared/OLE/ChainedBlockStream.php | 6434 | <?php
/**
* PHPExcel
*
* Copyright (C) 2006 - 2011 PHPExcel
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category PHPExcel
* @package PHPExcel_Shared_OLE
* @copyright Copyright (c) 2006 - 2007 Christian Schmidt
* @license http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt LGPL
* @version 1.7.6, 2011-02-27
*/
/**
* PHPExcel_Shared_OLE_ChainedBlockStream
*
* Stream wrapper for reading data stored in an OLE file. Implements methods
* for PHP's stream_wrapper_register(). For creating streams using this
* wrapper, use PHPExcel_Shared_OLE_PPS_File::getStream().
*
* @category PHPExcel
* @package PHPExcel_Shared_OLE
*/
class PHPExcel_Shared_OLE_ChainedBlockStream
{
/**
* The OLE container of the file that is being read.
* @var OLE
*/
public $ole;
/**
* Parameters specified by fopen().
* @var array
*/
public $params;
/**
* The binary data of the file.
* @var string
*/
public $data;
/**
* The file pointer.
* @var int byte offset
*/
public $pos;
/**
* Implements support for fopen().
* For creating streams using this wrapper, use OLE_PPS_File::getStream().
* @param string resource name including scheme, e.g.
* ole-chainedblockstream://oleInstanceId=1
* @param string only "r" is supported
* @param int mask of STREAM_REPORT_ERRORS and STREAM_USE_PATH
* @param string absolute path of the opened stream (out parameter)
* @return bool true on success
*/
public function stream_open($path, $mode, $options, &$openedPath)
{
if ($mode != 'r') {
if ($options & STREAM_REPORT_ERRORS) {
trigger_error('Only reading is supported', E_USER_WARNING);
}
return false;
}
// 25 is length of "ole-chainedblockstream://"
parse_str(substr($path, 25), $this->params);
if (!isset($this->params['oleInstanceId'],
$this->params['blockId'],
$GLOBALS['_OLE_INSTANCES'][$this->params['oleInstanceId']])) {
if ($options & STREAM_REPORT_ERRORS) {
trigger_error('OLE stream not found', E_USER_WARNING);
}
return false;
}
$this->ole = $GLOBALS['_OLE_INSTANCES'][$this->params['oleInstanceId']];
$blockId = $this->params['blockId'];
$this->data = '';
if (isset($this->params['size']) &&
$this->params['size'] < $this->ole->bigBlockThreshold &&
$blockId != $this->ole->root->_StartBlock) {
// Block id refers to small blocks
$rootPos = $this->ole->_getBlockOffset($this->ole->root->_StartBlock);
while ($blockId != -2) {
$pos = $rootPos + $blockId * $this->ole->bigBlockSize;
$blockId = $this->ole->sbat[$blockId];
fseek($this->ole->_file_handle, $pos);
$this->data .= fread($this->ole->_file_handle, $this->ole->bigBlockSize);
}
} else {
// Block id refers to big blocks
while ($blockId != -2) {
$pos = $this->ole->_getBlockOffset($blockId);
fseek($this->ole->_file_handle, $pos);
$this->data .= fread($this->ole->_file_handle, $this->ole->bigBlockSize);
$blockId = $this->ole->bbat[$blockId];
}
}
if (isset($this->params['size'])) {
$this->data = substr($this->data, 0, $this->params['size']);
}
if ($options & STREAM_USE_PATH) {
$openedPath = $path;
}
return true;
}
/**
* Implements support for fclose().
* @return string
*/
public function stream_close()
{
$this->ole = null;
unset($GLOBALS['_OLE_INSTANCES']);
}
/**
* Implements support for fread(), fgets() etc.
* @param int maximum number of bytes to read
* @return string
*/
public function stream_read($count)
{
if ($this->stream_eof()) {
return false;
}
$s = substr($this->data, $this->pos, $count);
$this->pos += $count;
return $s;
}
/**
* Implements support for feof().
* @return bool TRUE if the file pointer is at EOF; otherwise FALSE
*/
public function stream_eof()
{
$eof = $this->pos >= strlen($this->data);
// Workaround for bug in PHP 5.0.x: http://bugs.php.net/27508
if (version_compare(PHP_VERSION, '5.0', '>=') &&
version_compare(PHP_VERSION, '5.1', '<')) {
$eof = !$eof;
}
return $eof;
}
/**
* Returns the position of the file pointer, i.e. its offset into the file
* stream. Implements support for ftell().
* @return int
*/
public function stream_tell()
{
return $this->pos;
}
/**
* Implements support for fseek().
* @param int byte offset
* @param int SEEK_SET, SEEK_CUR or SEEK_END
* @return bool
*/
public function stream_seek($offset, $whence)
{
if ($whence == SEEK_SET && $offset >= 0) {
$this->pos = $offset;
} elseif ($whence == SEEK_CUR && -$offset <= $this->pos) {
$this->pos += $offset;
} elseif ($whence == SEEK_END && -$offset <= sizeof($this->data)) {
$this->pos = strlen($this->data) + $offset;
} else {
return false;
}
return true;
}
/**
* Implements support for fstat(). Currently the only supported field is
* "size".
* @return array
*/
public function stream_stat()
{
return array(
'size' => strlen($this->data),
);
}
// Methods used by stream_wrapper_register() that are not implemented:
// bool stream_flush ( void )
// int stream_write ( string data )
// bool rename ( string path_from, string path_to )
// bool mkdir ( string path, int mode, int options )
// bool rmdir ( string path, int options )
// bool dir_opendir ( string path, int options )
// array url_stat ( string path, int flags )
// string dir_readdir ( void )
// bool dir_rewinddir ( void )
// bool dir_closedir ( void )
}
| agpl-3.0 |
davecheney/juju | service/service.go | 7071 | // Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package service
import (
"strings"
"time"
"github.com/juju/errors"
"github.com/juju/loggo"
"github.com/juju/utils"
"github.com/juju/utils/series"
"github.com/juju/juju/juju/paths"
"github.com/juju/juju/service/common"
"github.com/juju/juju/service/systemd"
"github.com/juju/juju/service/upstart"
"github.com/juju/juju/service/windows"
)
var (
logger = loggo.GetLogger("juju.service")
)
// These are the names of the init systems regognized by juju.
const (
InitSystemSystemd = "systemd"
InitSystemUpstart = "upstart"
InitSystemWindows = "windows"
)
// linuxInitSystems lists the names of the init systems that juju might
// find on a linux host.
var linuxInitSystems = []string{
InitSystemSystemd,
InitSystemUpstart,
}
// ServiceActions represents the actions that may be requested for
// an init system service.
type ServiceActions interface {
// Start will try to start the service.
Start() error
// Stop will try to stop the service.
Stop() error
// Install installs a service.
Install() error
// Remove will remove the service.
Remove() error
}
// Service represents a service in the init system running on a host.
type Service interface {
ServiceActions
// Name returns the service's name.
Name() string
// Conf returns the service's conf data.
Conf() common.Conf
// Running returns a boolean value that denotes
// whether or not the service is running.
Running() (bool, error)
// Exists returns whether the service configuration exists in the
// init directory with the same content that this Service would have
// if installed.
Exists() (bool, error)
// Installed will return a boolean value that denotes
// whether or not the service is installed.
Installed() (bool, error)
// TODO(ericsnow) Move all the commands into a separate interface.
// InstallCommands returns the list of commands to run on a
// (remote) host to install the service.
InstallCommands() ([]string, error)
// StartCommands returns the list of commands to run on a
// (remote) host to start the service.
StartCommands() ([]string, error)
}
// RestartableService is a service that directly supports restarting.
type RestartableService interface {
// Restart restarts the service.
Restart() error
}
// TODO(ericsnow) bug #1426458
// Eliminate the need to pass an empty conf for most service methods
// and several helper functions.
// NewService returns a new Service based on the provided info.
func NewService(name string, conf common.Conf, series string) (Service, error) {
if name == "" {
return nil, errors.New("missing name")
}
initSystem, err := versionInitSystem(series)
if err != nil {
return nil, errors.Trace(err)
}
return newService(name, conf, initSystem, series)
}
func newService(name string, conf common.Conf, initSystem, series string) (Service, error) {
switch initSystem {
case InitSystemWindows:
svc, err := windows.NewService(name, conf)
if err != nil {
return nil, errors.Annotatef(err, "failed to wrap service %q", name)
}
return svc, nil
case InitSystemUpstart:
return upstart.NewService(name, conf), nil
case InitSystemSystemd:
dataDir, err := paths.DataDir(series)
if err != nil {
return nil, errors.Annotatef(err, "failed to find juju data dir for service %q", name)
}
svc, err := systemd.NewService(name, conf, dataDir)
if err != nil {
return nil, errors.Annotatef(err, "failed to wrap service %q", name)
}
return svc, nil
default:
return nil, errors.NotFoundf("init system %q", initSystem)
}
}
// ListServices lists all installed services on the running system
func ListServices() ([]string, error) {
initName, err := VersionInitSystem(series.HostSeries())
if err != nil {
return nil, errors.Trace(err)
}
switch initName {
case InitSystemWindows:
services, err := windows.ListServices()
if err != nil {
return nil, errors.Annotatef(err, "failed to list %s services", initName)
}
return services, nil
case InitSystemUpstart:
services, err := upstart.ListServices()
if err != nil {
return nil, errors.Annotatef(err, "failed to list %s services", initName)
}
return services, nil
case InitSystemSystemd:
services, err := systemd.ListServices()
if err != nil {
return nil, errors.Annotatef(err, "failed to list %s services", initName)
}
return services, nil
default:
return nil, errors.NotFoundf("init system %q", initName)
}
}
// ListServicesScript returns the commands that should be run to get
// a list of service names on a host.
func ListServicesScript() string {
commands := []string{
"init_system=$(" + DiscoverInitSystemScript() + ")",
// If the init system is not identified then the script will
// "exit 1". This is correct since the script should fail if no
// init system can be identified.
newShellSelectCommand("init_system", "exit 1", listServicesCommand),
}
return strings.Join(commands, "\n")
}
func listServicesCommand(initSystem string) (string, bool) {
switch initSystem {
case InitSystemWindows:
return windows.ListCommand(), true
case InitSystemUpstart:
return upstart.ListCommand(), true
case InitSystemSystemd:
return systemd.ListCommand(), true
default:
return "", false
}
}
// installStartRetryAttempts defines how much InstallAndStart retries
// upon Start failures.
var installStartRetryAttempts = utils.AttemptStrategy{
Total: 1 * time.Second,
Delay: 250 * time.Millisecond,
}
// InstallAndStart installs the provided service and tries starting it.
// The first few Start failures are ignored.
func InstallAndStart(svc ServiceActions) error {
if err := svc.Install(); err != nil {
return errors.Trace(err)
}
// For various reasons the init system may take a short time to
// realise that the service has been installed.
var err error
for attempt := installStartRetryAttempts.Start(); attempt.Next(); {
if err != nil {
logger.Errorf("retrying start request (%v)", errors.Cause(err))
}
if err = svc.Start(); err == nil {
break
}
}
return errors.Trace(err)
}
// discoverService is patched out during some tests.
var discoverService = func(name string) (Service, error) {
return DiscoverService(name, common.Conf{})
}
// TODO(ericsnow) Add one-off helpers for Start and Stop too?
// Restart restarts the named service.
func Restart(name string) error {
svc, err := discoverService(name)
if err != nil {
return errors.Annotatef(err, "failed to find service %q", name)
}
if err := restart(svc); err != nil {
return errors.Annotatef(err, "failed to restart service %q", name)
}
return nil
}
func restart(svc Service) error {
// Use the Restart method, if there is one.
if svc, ok := svc.(RestartableService); ok {
if err := svc.Restart(); err != nil {
return errors.Trace(err)
}
return nil
}
// Otherwise explicitly stop and start the service.
if err := svc.Stop(); err != nil {
return errors.Trace(err)
}
if err := svc.Start(); err != nil {
return errors.Trace(err)
}
return nil
}
| agpl-3.0 |
Edraak/edx-platform | common/djangoapps/course_modes/migrations/0005_auto_20181210_1034.py | 824 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0004_auto_20151113_1457'),
]
operations = [
migrations.RemoveField(
model_name='coursemode',
name='expiration_datetime',
),
migrations.AddField(
model_name='coursemode',
name='_expiration_datetime',
field=models.DateTimeField(db_column=b'expiration_datetime', default=None, blank=True, help_text='OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. Leave this blank if users can enroll in this mode until enrollment closes for the course.', null=True, verbose_name='Upgrade Deadline'),
),
]
| agpl-3.0 |
CDJ11/CDJ | app/models/legislation.rb | 77 | module Legislation
def self.table_name_prefix
'legislation_'
end
end
| agpl-3.0 |
shunwang/sql-layer-1 | fdb-sql-layer-core/src/main/java/com/foundationdb/server/rowdata/encoding/FixedWidthEncoding.java | 1316 | /**
* Copyright (C) 2009-2013 FoundationDB, LLC
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.foundationdb.server.rowdata.encoding;
import com.foundationdb.ais.model.Column;
import com.foundationdb.server.rowdata.FieldDef;
abstract class FixedWidthEncoding implements Encoding {
@Override
public int widthFromObject(FieldDef fieldDef, Object value) {
return fieldDef.getMaxStorageSize();
}
@Override
public long getMaxKeyStorageSize(Column column) {
return maxKeyStorageSize;
}
FixedWidthEncoding(long maxKeyStorageSize) {
this.maxKeyStorageSize = maxKeyStorageSize;
}
private final long maxKeyStorageSize;
}
| agpl-3.0 |
sawenzel/root | tutorials/math/kdTreeBinning.C | 5780 | /// \file
/// \ingroup tutorial_math
///
/// kdTreeBinning tutorial: bin the data in cells of equal content using a kd-tree
///
/// Using TKDTree wrapper class as a data binning structure
/// Plot the 2D data using the TH2Poly class
///
/// \macro_image
/// \macro_output
/// \macro_code
///
/// \author Bartolomeu Rabacal
#include <math.h>
#include "TKDTreeBinning.h"
#include "TH2D.h"
#include "TH2Poly.h"
#include "TStyle.h"
#include "TGraph2D.h"
#include "TRandom3.h"
#include "TCanvas.h"
#include <iostream>
void kdTreeBinning() {
// -----------------------------------------------------------------------------------------------
// C r e a t e r a n d o m s a m p l e w i t h r e g u l a r b i n n i n g p l o t t i n g
// -----------------------------------------------------------------------------------------------
const UInt_t DATASZ = 10000;
const UInt_t DATADIM = 2;
const UInt_t NBINS = 50;
Double_t smp[DATASZ * DATADIM];
double mu[2] = {0,2};
double sig[2] = {2,3};
TRandom3 r;
r.SetSeed(1);
for (UInt_t i = 0; i < DATADIM; ++i)
for (UInt_t j = 0; j < DATASZ; ++j)
smp[DATASZ * i + j] = r.Gaus(mu[i], sig[i]);
UInt_t h1bins = (UInt_t) sqrt(NBINS);
TH2D* h1 = new TH2D("h1BinTest", "Regular binning", h1bins, -5., 5., h1bins, -5., 5.);
for (UInt_t j = 0; j < DATASZ; ++j)
h1->Fill(smp[j], smp[DATASZ + j]);
// ---------------------------------------------------------------------------------------------
// C r e a t e K D T r e e B i n n i n g o b j e c t w i t h T H 2 P o l y p l o t t i n g
// ---------------------------------------------------------------------------------------------
TKDTreeBinning* kdBins = new TKDTreeBinning(DATASZ, DATADIM, smp, NBINS);
UInt_t nbins = kdBins->GetNBins();
UInt_t dim = kdBins->GetDim();
const Double_t* binsMinEdges = kdBins->GetBinsMinEdges();
const Double_t* binsMaxEdges = kdBins->GetBinsMaxEdges();
TH2Poly* h2pol = new TH2Poly("h2PolyBinTest", "KDTree binning", kdBins->GetDataMin(0), kdBins->GetDataMax(0), kdBins->GetDataMin(1), kdBins->GetDataMax(1));
for (UInt_t i = 0; i < nbins; ++i) {
UInt_t edgeDim = i * dim;
h2pol->AddBin(binsMinEdges[edgeDim], binsMinEdges[edgeDim + 1], binsMaxEdges[edgeDim], binsMaxEdges[edgeDim + 1]);
}
for (UInt_t i = 1; i <= kdBins->GetNBins(); ++i)
h2pol->SetBinContent(i, kdBins->GetBinDensity(i - 1));
std::cout << "Bin with minimum density: " << kdBins->GetBinMinDensity() << std::endl;
std::cout << "Bin with maximum density: " << kdBins->GetBinMaxDensity() << std::endl;
TCanvas* c1 = new TCanvas("glc1", "TH2Poly from a kdTree",0,0,600,800);
c1->Divide(1,3);
c1->cd(1);
h1->Draw("lego");
c1->cd(2);
h2pol->Draw("COLZ L");
c1->Update();
/* Draw an equivalent plot showing the data points */
/*-------------------------------------------------*/
std::vector<Double_t> z = std::vector<Double_t>(DATASZ, 0.);
for (UInt_t i = 0; i < DATASZ; ++i)
z[i] = (Double_t) h2pol->GetBinContent(h2pol->FindBin(smp[i], smp[DATASZ + i]));
TGraph2D *g = new TGraph2D(DATASZ, smp, &smp[DATASZ], &z[0]);
gStyle->SetPalette(1);
g->SetMarkerStyle(20);
c1->cd(3);
g->Draw("pcol");
c1->Update();
// ---------------------------------------------------------
// make a new TH2Poly where bins are ordered by the density
// ---------------------------------------------------------
TH2Poly* h2polrebin = new TH2Poly("h2PolyBinTest", "KDTree binning", kdBins->GetDataMin(0), kdBins->GetDataMax(0), kdBins->GetDataMin(1), kdBins->GetDataMax(1));
h2polrebin->SetFloat();
/*---------------------------------*/
/* Sort the bins by their density */
/*---------------------------------*/
kdBins->SortBinsByDensity();
for (UInt_t i = 0; i < kdBins->GetNBins(); ++i) {
const Double_t* binMinEdges = kdBins->GetBinMinEdges(i);
const Double_t* binMaxEdges = kdBins->GetBinMaxEdges(i);
h2polrebin->AddBin(binMinEdges[0], binMinEdges[1], binMaxEdges[0], binMaxEdges[1]);
}
for (UInt_t i = 1; i <= kdBins->GetNBins(); ++i){
h2polrebin->SetBinContent(i, kdBins->GetBinDensity(i - 1));}
std::cout << "Bin with minimum density: " << kdBins->GetBinMinDensity() << std::endl;
std::cout << "Bin with maximum density: " << kdBins->GetBinMaxDensity() << std::endl;
// now make a vector with bin number vs position
for (UInt_t i = 0; i < DATASZ; ++i)
z[i] = (Double_t) h2polrebin->FindBin(smp[i], smp[DATASZ + i]);
TGraph2D *g2 = new TGraph2D(DATASZ, smp, &smp[DATASZ], &z[0]);
g2->SetMarkerStyle(20);
// plot new TH2Poly (ordered one) and TGraph2D
// The new TH2Poly has to be same as old one and the TGraph2D should be similar to
// the previous one. It is now made using as z value the bin number
TCanvas* c4 = new TCanvas("glc4", "TH2Poly from a kdTree (Ordered)",50,50,800,800);
c4->Divide(2,2);
c4->cd(1);
h2polrebin->Draw("COLZ L"); // draw as scatter plot
c4->cd(2);
g2->Draw("pcol");
c4->Update();
// make also the 1D binned histograms
TKDTreeBinning* kdX = new TKDTreeBinning(DATASZ, 1, &smp[0], 20);
TKDTreeBinning* kdY = new TKDTreeBinning(DATASZ, 1, &smp[DATASZ], 40);
kdX->SortOneDimBinEdges();
kdY->SortOneDimBinEdges();
TH1* hX=new TH1F("hX", "X projection", kdX->GetNBins(), kdX->GetOneDimBinEdges());
for(int i=0; i<kdX->GetNBins(); ++i){
hX->SetBinContent(i+1, kdX->GetBinDensity(i));
}
TH1* hY=new TH1F("hY", "Y Projection", kdY->GetNBins(), kdY->GetOneDimBinEdges());
for(int i=0; i<kdY->GetNBins(); ++i){
hY->SetBinContent(i+1, kdY->GetBinDensity(i));
}
c4->cd(3);
hX->Draw();
c4->cd(4);
hY->Draw();
}
| lgpl-2.1 |
abj27/ContinuousTests | src/AutoTest.Profiler/ProfilerData.cs | 6825 | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using AutoTest.Profiler.Database;
using AutoTest.Graphs;
namespace AutoTest.Profiler
{
public class ProfilerData
{
private readonly string _filename;
private readonly IProfilerDataParser _parser;
private readonly IContextChangeFinder _finder;
private readonly decimal _compressRatio;
private readonly TestRunInformationDatabase _database;
private readonly TestRunInfoAssembler _assembler;
private readonly CouplingCountAndNameProjection _counts;
private readonly object _lock = new object();
public ProfilerData(string filename, IProfilerDataParser parser, IContextChangeFinder finder) : this(filename, parser, finder, 0.5m)
{
}
public ProfilerData(string filename, IProfilerDataParser parser, IContextChangeFinder finder, decimal compressRatio)
{
_assembler = new TestRunInfoAssembler(finder);
_database = new TestRunInformationDatabase(filename);
_counts = new CouplingCountAndNameProjection();
_finder = finder;
_filename = filename;
_parser = parser;
_compressRatio = compressRatio;
}
public int TotalEntries
{
get { return _database.TotalEntries; }
}
public long Waste
{
get { return _database.FileWaste; }
}
public double TotalSize
{
get { return _database.TotalSize; }
}
public void UpdateInfo(string filename, ITestInformationEnricher enricher)
{
try
{
if (Monitor.TryEnter(_lock, 5000))
{
try
{
using (var f = File.Open(filename, FileMode.OpenOrCreate, FileAccess.Read, FileShare.Read))
{
enricher.ClearCache();
var items = enricher.Enrich(_parser.Parse(f));
items = Printall(items);
var infos = _assembler.Assemble(items);
//var infos2 = printInfos(infos);
_database.AddNewEntries(infos);
_database.TakeSnapshot();
enricher.ClearCache();
}
}
finally
{
Monitor.Exit(_lock);
}
}
}
catch(Exception ex)
{
throw ex;
}
}
public CountsAndTimes GetCountsAndTimesFor(string name)
{
return _counts.GetRuntimeCallTimingsFor(name);
}
private IEnumerable<TestRunInformation> printInfos(IEnumerable<TestRunInformation> infos)
{
foreach (var info in infos)
{
Debug("TestInfo: " + info.Name + " setups = " + info.Setups.Count + " teardowns=" + info.Teardowns.Count);
yield return info;
}
}
public AffectedGraph GetProfiledGraphFor(string method)
{
var graph = new AffectedGraph();
var tests = GetTestsFor(method);
if (tests == null || !tests.Any()) return graph;
var paths = new List<IEnumerable<string>>();
foreach(var test in tests)
{
var info = GetTestInformationFor(test);
if (info != null)
{
var path = PathFinder.FindPathsTo(info, method);
paths.AddRange(path);
}
}
return GraphBuilder.BuildGraphFor(paths);
}
public Dictionary<string, bool> GetProfiledNodesFor(string method)
{
var ret = new Dictionary<string, bool>();
var tests = _counts.GetTestsFor(method);
foreach (var node in
tests.Select(t => _database.LookUpByName(t)).Select(test => PathFinder.FindPathsTo(test, method)).SelectMany(paths => paths.SelectMany(path => path)))
{
ret[node] = true;
}
return ret;
}
public void Compress()
{
if(!Monitor.TryEnter(_lock, 5000)) throw new Exception("Unable to acquire profiler compression lock.");
try
{
Debug("Compressing database");
_database.Compress();
}
finally
{
Monitor.Exit(_lock);
}
}
public IEnumerable<string> GetTestsFor(string cacheName)
{
Debug("getting test counts for " + cacheName + " there are " + _counts.TotalMethods + " in count database.");
return _counts.GetTestsFor(cacheName);
}
public IEnumerable<string> GetProfiledMethods()
{
return _counts.GetMethods();
}
public TestRunInformation GetTestInformationFor(string name)
{
return _database.LookUpByName(name);
}
public void Remove(string key)
{
if(!Monitor.TryEnter(_lock,5000))
{
throw new Exception("Unable to acquire lock");
}
try
{
_database.RemoveEntryIfExist(key);
}
finally
{
Monitor.Exit(_lock);
}
}
public event EventHandler<ProfilerLogEventArgs> DebugMessage;
private IEnumerable<ProfilerEntry> Printall(IEnumerable<ProfilerEntry> items)
{
foreach(var item in items)
{
Debug("'" + item.Method + "' isnewfixture=" + item.IsFixtureConstructor +" runtime=" + item.Runtime + " found=" + item.Found + " istest=" + item.IsTest + " su=" + item.IsSetup + " td=" + item.IsTeardown);
yield return item;
}
}
public void Debug(string message)
{
if(DebugMessage != null) DebugMessage(this, new ProfilerLogEventArgs(message));
}
public void Load()
{
lock (_lock)
{
_database.AttachProjectionWithSnapshotting(_counts);
}
}
public void DeleteAllData()
{
_database.DeleteAllData();
}
}
public class ProfilerLogEventArgs : EventArgs
{
public string Message { get; set; }
public ProfilerLogEventArgs(string message)
{
Message = message;
}
}
} | lgpl-2.1 |
sergiofbsilva/fenixedu-academic | src/main/java/org/fenixedu/academic/domain/phd/PhdProcessState.java | 4807 | /**
* Copyright © 2002 Instituto Superior Técnico
*
* This file is part of FenixEdu Academic.
*
* FenixEdu Academic is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* FenixEdu Academic is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FenixEdu Academic. If not, see <http://www.gnu.org/licenses/>.
*/
package org.fenixedu.academic.domain.phd;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Locale;
import org.fenixedu.academic.domain.Person;
import org.fenixedu.academic.domain.exceptions.DomainException;
import org.fenixedu.academic.domain.phd.exceptions.PhdDomainOperationException;
import org.fenixedu.academic.ui.struts.action.phd.PhdProcessStateBean;
import org.fenixedu.academic.util.Bundle;
import org.fenixedu.bennu.core.domain.Bennu;
import org.fenixedu.bennu.core.i18n.BundleUtil;
import org.joda.time.DateTime;
import pt.ist.fenixframework.Atomic;
abstract public class PhdProcessState extends PhdProcessState_Base {
static final public Comparator<PhdProcessState> COMPARATOR_BY_DATE = new Comparator<PhdProcessState>() {
@Override
public int compare(PhdProcessState o1, PhdProcessState o2) {
int result = o1.getStateDate().compareTo(o2.getStateDate());
return result != 0 ? result : o1.getExternalId().compareTo(o2.getExternalId());
}
};
protected PhdProcessState() {
super();
setRootDomainObject(Bennu.getInstance());
setWhenCreated(new DateTime());
}
protected void init(final Person person, final String remarks, final DateTime stateDate, final PhdProcessStateType type) {
String[] args = {};
if (person == null) {
throw new DomainException("error.PhdProcessState.invalid.person", args);
}
String[] args1 = {};
if (stateDate == null) {
throw new DomainException("error.PhdProcessState.invalid.stateDate", args1);
}
setStateDate(stateDate);
checkStateDate(stateDate, type);
setPerson(person);
setRemarks(remarks);
}
private void checkStateDate(DateTime stateDate, final PhdProcessStateType type) {
Collection<? extends PhdProcessState> orderedStates = getProcess().getOrderedStates();
for (PhdProcessState phdProcessState : orderedStates) {
if (phdProcessState == this) {
continue;
}
if (phdProcessState.getStateDate() != null && phdProcessState.getStateDate().isAfter(stateDate)) {
String newStateDate = stateDate.toString("dd/MM/yyyy") + " - " + type.getLocalizedName();
String actualStateDate =
phdProcessState.getStateDate().toString("dd/MM/yyyy") + " - "
+ phdProcessState.getType().getLocalizedName();
throw new PhdDomainOperationException("error.PhdProcessState.state.date.is.previous.of.actual.state.on.process",
newStateDate, actualStateDate);
}
}
}
public void delete() {
disconnect();
deleteDomainObject();
}
protected void disconnect() {
setPerson(null);
setRootDomainObject(null);
}
abstract public PhdProcessStateType getType();
abstract public boolean isLast();
public abstract PhdProgramProcess getProcess();
@Atomic
public void editStateDate(PhdProcessStateBean bean) {
if (bean.getStateDate() == null) {
throw new PhdDomainOperationException("error.PhdProcessState.state.date.required");
}
setStateDate(bean.getStateDate());
}
protected static String buildExpectedStatesDescription(List<? extends PhdProcessStateType> possibleNextStates) {
if (possibleNextStates.isEmpty()) {
return BundleUtil.getString(Bundle.PHD, Locale.getDefault(), "message.phd.process.state.none");
}
StringBuilder builder = new StringBuilder();
for (PhdProcessStateType expectedState : possibleNextStates) {
Locale locale = Locale.getDefault();
builder.append(expectedState.getLocalizedName(locale)).append(", ");
}
builder.delete(builder.length() - 2, builder.length());
return builder.toString();
}
}
| lgpl-3.0 |
duncanpMS/sonarlint-vs | src/SonarAnalyzer.Common/Rules/UseShortCircuitingOperatorFixProviderBase.cs | 3026 | /*
* SonarAnalyzer for .NET
* Copyright (C) 2015-2017 SonarSource SA
* mailto: contact AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CodeActions;
using Microsoft.CodeAnalysis.CodeFixes;
using SonarAnalyzer.Common;
using SonarAnalyzer.Helpers;
using System.Collections.Immutable;
using System.Linq;
using System.Threading.Tasks;
namespace SonarAnalyzer.Rules.Common
{
public abstract class UseShortCircuitingOperatorFixProviderBase<TBinaryExpression> : SonarCodeFixProvider
where TBinaryExpression : SyntaxNode
{
internal const string Title = "Use short-circuiting operators";
public override ImmutableArray<string> FixableDiagnosticIds => ImmutableArray.Create(UseShortCircuitingOperatorBase.DiagnosticId);
public override FixAllProvider GetFixAllProvider() => DocumentBasedFixAllProvider.Instance;
protected override Task RegisterCodeFixesAsync(SyntaxNode root, CodeFixContext context)
{
var diagnostic = context.Diagnostics.First();
var diagnosticSpan = diagnostic.Location.SourceSpan;
var expression = root.FindNode(diagnosticSpan, getInnermostNodeForTie: true) as TBinaryExpression;
if (expression == null ||
!IsCandidateExpression(expression))
{
return TaskHelper.CompletedTask;
}
context.RegisterCodeFix(
CodeAction.Create(
Title,
c => ReplaceExpression(expression, root, context.Document)),
context.Diagnostics);
return TaskHelper.CompletedTask;
}
internal abstract bool IsCandidateExpression(TBinaryExpression expression);
private Task<Document> ReplaceExpression(TBinaryExpression expression,
SyntaxNode root, Document document)
{
var replacement = GetShortCircuitingExpressionNode(expression)
.WithTriviaFrom(expression);
var newRoot = root.ReplaceNode(expression, replacement);
return Task.FromResult(document.WithSyntaxRoot(newRoot));
}
protected abstract TBinaryExpression GetShortCircuitingExpressionNode(TBinaryExpression expression);
}
}
| lgpl-3.0 |
Tybion/community-edition | projects/repository/source/java/org/alfresco/repo/bulkimport/ContentDataFactory.java | 2099 | /*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have received a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.repo.bulkimport;
import java.io.File;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.service.cmr.repository.ContentData;
/**
* Build a {@link ContentData} out of a given {@link ContentStore} and a given {@link File} within
* that store
*
* @since 4.0
*
*/
public interface ContentDataFactory
{
/**
* Create a {@link ContentData} by combining the given {@link ContentStore}'s root location and the {@link File}'s path within that store.
* The given file must therefore be accessible within the content store's configured root location.
* The encoding and mimetype will be guessed from the given file.
*
* @param store The {@link ContentStore} in which the file should be
* @param contentFile The {@link File} to check
* @return the constructed {@link ContentData}
*/
public ContentData createContentData(ContentStore store, File contentFile);
} | lgpl-3.0 |
marieke-bijlsma/molgenis | molgenis-scripts-core/src/main/java/org/molgenis/script/ScriptParameter.java | 753 | package org.molgenis.script;
import org.molgenis.data.DataService;
import org.molgenis.data.EntityMetaData;
import org.molgenis.data.support.DefaultEntity;
public class ScriptParameter extends DefaultEntity
{
private static final long serialVersionUID = 2005285224629134983L;
public static final String ENTITY_NAME = "ScriptParameter";
public static final String NAME = "name";
public static final EntityMetaData META_DATA = new ScriptParameterMetaData();
public ScriptParameter(DataService dataService)
{
super(META_DATA, dataService);
}
public void setName(String name)
{
set(NAME, name);
}
public String getName()
{
return getString(NAME);
}
@Override
public EntityMetaData getEntityMetaData()
{
return META_DATA;
}
}
| lgpl-3.0 |
aljoscha/flink | flink-clients/src/main/java/org/apache/flink/client/program/StreamContextEnvironment.java | 7431 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.client.program;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.DeploymentOptions;
import org.apache.flink.core.execution.DetachedJobExecutionResult;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.core.execution.JobListener;
import org.apache.flink.core.execution.PipelineExecutorServiceLoader;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironmentFactory;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.ShutdownHookUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Special {@link StreamExecutionEnvironment} that will be used in cases where the CLI client or
* testing utilities create a {@link StreamExecutionEnvironment} that should be used when {@link
* StreamExecutionEnvironment#getExecutionEnvironment()} is called.
*/
@PublicEvolving
public class StreamContextEnvironment extends StreamExecutionEnvironment {
private static final Logger LOG = LoggerFactory.getLogger(ExecutionEnvironment.class);
private final boolean suppressSysout;
private final boolean enforceSingleJobExecution;
private int jobCounter;
public StreamContextEnvironment(
final PipelineExecutorServiceLoader executorServiceLoader,
final Configuration configuration,
final ClassLoader userCodeClassLoader,
final boolean enforceSingleJobExecution,
final boolean suppressSysout) {
super(executorServiceLoader, configuration, userCodeClassLoader);
this.suppressSysout = suppressSysout;
this.enforceSingleJobExecution = enforceSingleJobExecution;
this.jobCounter = 0;
}
@Override
public JobExecutionResult execute(StreamGraph streamGraph) throws Exception {
final JobClient jobClient = executeAsync(streamGraph);
final List<JobListener> jobListeners = getJobListeners();
try {
final JobExecutionResult jobExecutionResult = getJobExecutionResult(jobClient);
jobListeners.forEach(
jobListener -> jobListener.onJobExecuted(jobExecutionResult, null));
return jobExecutionResult;
} catch (Throwable t) {
jobListeners.forEach(
jobListener ->
jobListener.onJobExecuted(
null, ExceptionUtils.stripExecutionException(t)));
ExceptionUtils.rethrowException(t);
// never reached, only make javac happy
return null;
}
}
private JobExecutionResult getJobExecutionResult(final JobClient jobClient) throws Exception {
checkNotNull(jobClient);
JobExecutionResult jobExecutionResult;
if (getConfiguration().getBoolean(DeploymentOptions.ATTACHED)) {
CompletableFuture<JobExecutionResult> jobExecutionResultFuture =
jobClient.getJobExecutionResult();
if (getConfiguration().getBoolean(DeploymentOptions.SHUTDOWN_IF_ATTACHED)) {
Thread shutdownHook =
ShutdownHookUtil.addShutdownHook(
() -> {
// wait a smidgen to allow the async request to go through
// before
// the jvm exits
jobClient.cancel().get(1, TimeUnit.SECONDS);
},
StreamContextEnvironment.class.getSimpleName(),
LOG);
jobExecutionResultFuture.whenComplete(
(ignored, throwable) ->
ShutdownHookUtil.removeShutdownHook(
shutdownHook,
StreamContextEnvironment.class.getSimpleName(),
LOG));
}
jobExecutionResult = jobExecutionResultFuture.get();
System.out.println(jobExecutionResult);
} else {
jobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID());
}
return jobExecutionResult;
}
@Override
public JobClient executeAsync(StreamGraph streamGraph) throws Exception {
validateAllowedExecution();
final JobClient jobClient = super.executeAsync(streamGraph);
if (!suppressSysout) {
System.out.println("Job has been submitted with JobID " + jobClient.getJobID());
}
return jobClient;
}
private void validateAllowedExecution() {
if (enforceSingleJobExecution && jobCounter > 0) {
throw new FlinkRuntimeException(
"Cannot have more than one execute() or executeAsync() call in a single environment.");
}
jobCounter++;
}
// --------------------------------------------------------------------------------------------
public static void setAsContext(
final PipelineExecutorServiceLoader executorServiceLoader,
final Configuration configuration,
final ClassLoader userCodeClassLoader,
final boolean enforceSingleJobExecution,
final boolean suppressSysout) {
StreamExecutionEnvironmentFactory factory =
conf -> {
Configuration mergedConfiguration = new Configuration();
mergedConfiguration.addAll(configuration);
mergedConfiguration.addAll(conf);
return new StreamContextEnvironment(
executorServiceLoader,
mergedConfiguration,
userCodeClassLoader,
enforceSingleJobExecution,
suppressSysout);
};
initializeContextEnvironment(factory);
}
public static void unsetAsContext() {
resetContextEnvironment();
}
}
| apache-2.0 |
emre-aydin/hazelcast | hazelcast/src/main/java/com/hazelcast/spi/impl/operationservice/AbstractNamedOperation.java | 1648 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.spi.impl.operationservice;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import java.io.IOException;
public abstract class AbstractNamedOperation extends Operation implements NamedOperation, IdentifiedDataSerializable {
protected String name;
protected AbstractNamedOperation(String name) {
this.name = name;
}
public AbstractNamedOperation() {
}
@Override
public final String getName() {
return name;
}
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeString(name);
}
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
name = in.readString();
}
@Override
protected void toString(StringBuilder sb) {
super.toString(sb);
sb.append(", name=").append(name);
}
}
| apache-2.0 |
clumsy/intellij-community | java/execution/impl/src/com/intellij/execution/scratch/JavaScratchConfigurable.java | 7667 | /*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.scratch;
import com.intellij.application.options.ModulesComboBox;
import com.intellij.execution.ui.CommonJavaParametersPanel;
import com.intellij.execution.ui.ConfigurationModuleSelector;
import com.intellij.execution.ui.DefaultJreSelector;
import com.intellij.execution.ui.JrePathEditor;
import com.intellij.ide.scratch.ScratchFileService;
import com.intellij.ide.scratch.ScratchRootType;
import com.intellij.openapi.fileChooser.FileChooser;
import com.intellij.openapi.fileChooser.FileChooserDescriptorFactory;
import com.intellij.openapi.options.ConfigurationException;
import com.intellij.openapi.options.SettingsEditor;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.ui.LabeledComponent;
import com.intellij.openapi.ui.TextFieldWithBrowseButton;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.VirtualFileWithId;
import com.intellij.ui.PanelWithAnchor;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
/**
* @author Eugene Zhuravlev
* Date: 30-Sep-15
*/
public class JavaScratchConfigurable extends SettingsEditor<JavaScratchConfiguration> implements PanelWithAnchor {
private final CommonJavaParametersPanel myCommonProgramParameters;
private final LabeledComponent<JTextField> myMainClass;
private final LabeledComponent<TextFieldWithBrowseButton> myScratchPathField;
private final LabeledComponent<ModulesComboBox> myModule;
private JPanel myWholePanel;
private final ConfigurationModuleSelector myModuleSelector;
private JrePathEditor myJrePathEditor;
private JComponent myAnchor;
public JavaScratchConfigurable(final Project project) {
myMainClass = new LabeledComponent<JTextField>();
myMainClass.setLabelLocation(BorderLayout.WEST);
myMainClass.setText("Main &class:");
myMainClass.setComponent(new JTextField());
myScratchPathField = new LabeledComponent<TextFieldWithBrowseButton>();
myScratchPathField.setLabelLocation(BorderLayout.WEST);
myScratchPathField.setText("&Path to scratch file:");
myScratchPathField.setComponent(new TextFieldWithBrowseButton(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
VirtualFile toSelect = getVFileFromEditor();
if (toSelect == null) {
final String scratchesRoot = ScratchFileService.getInstance().getRootPath(ScratchRootType.getInstance());
toSelect = LocalFileSystem.getInstance().findFileByPath(scratchesRoot);
}
final VirtualFile file =
FileChooser.chooseFile(FileChooserDescriptorFactory.createSingleFileNoJarsDescriptor(), myScratchPathField.getComponent(), project, toSelect);
if (file != null) {
setVFileToEditor(file);
}
}
}, this));
myModule = new LabeledComponent<ModulesComboBox>();
myModule.setLabelLocation(BorderLayout.WEST);
myModule.setComponent(new ModulesComboBox());
myModule.setText("Use classpath of &module:");
myModuleSelector = new ConfigurationModuleSelector(project, myModule.getComponent());
myCommonProgramParameters = new CommonJavaParametersPanel();
myCommonProgramParameters.setModuleContext(myModuleSelector.getModule());
myModule.getComponent().addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
myCommonProgramParameters.setModuleContext(myModuleSelector.getModule());
}
});
myJrePathEditor = new JrePathEditor(DefaultJreSelector.projectSdk(project));
myWholePanel = new JPanel(new GridBagLayout());
myWholePanel.add(myMainClass, new GridBagConstraints(0, 0, 1, 1, 1.0, 0.0, GridBagConstraints.NORTHWEST, GridBagConstraints.HORIZONTAL, new Insets(6, 0, 0, 0),0, 0 ));
myWholePanel.add(myScratchPathField, new GridBagConstraints(GridBagConstraints.RELATIVE, 1, 1, 1, 1.0, 0.0, GridBagConstraints.NORTHWEST, GridBagConstraints.HORIZONTAL, new Insets(6, 0, 0, 0),0, 0 ));
myWholePanel.add(myCommonProgramParameters, new GridBagConstraints(GridBagConstraints.RELATIVE, 2, 1, 1, 1.0, 1.0, GridBagConstraints.NORTHWEST, GridBagConstraints.BOTH, new Insets(12, 0, 12, 0),0, 0 ));
myWholePanel.add(myModule, new GridBagConstraints(GridBagConstraints.RELATIVE, 3, 1, 1, 1.0, 0.0, GridBagConstraints.NORTHWEST, GridBagConstraints.HORIZONTAL, new Insets(0, 0, 0, 0),0, 0 ));
myWholePanel.add(myJrePathEditor, new GridBagConstraints(GridBagConstraints.RELATIVE, 4, 1, 1, 1.0, 0.0, GridBagConstraints.NORTHWEST, GridBagConstraints.HORIZONTAL, new Insets(6, 0, 0, 0),0, 0 ));
myAnchor = UIUtil.mergeComponentsWithAnchor(myMainClass, myScratchPathField, myCommonProgramParameters, myJrePathEditor, myModule);
}
@Override
public void applyEditorTo(JavaScratchConfiguration configuration) throws ConfigurationException {
myCommonProgramParameters.applyTo(configuration);
myModuleSelector.applyTo(configuration);
configuration.MAIN_CLASS_NAME = myMainClass.getComponent().getText().trim();
configuration.ALTERNATIVE_JRE_PATH = myJrePathEditor.getJrePathOrName();
configuration.ALTERNATIVE_JRE_PATH_ENABLED = myJrePathEditor.isAlternativeJreSelected();
final VirtualFile vFile = getVFileFromEditor();
configuration.SCRATCH_FILE_ID = vFile instanceof VirtualFileWithId ? ((VirtualFileWithId)vFile).getId() : 0;
}
@Nullable
private VirtualFile getVFileFromEditor() {
final String path = FileUtil.toSystemIndependentName(myScratchPathField.getComponent().getText().trim());
return !StringUtil.isEmpty(path) ? LocalFileSystem.getInstance().findFileByPath(path) : null;
}
@Override
public void resetEditorFrom(JavaScratchConfiguration configuration) {
myCommonProgramParameters.reset(configuration);
myModuleSelector.reset(configuration);
myMainClass.getComponent().setText(configuration.MAIN_CLASS_NAME != null ? configuration.MAIN_CLASS_NAME.replaceAll("\\$", "\\.") : "");
myJrePathEditor.setPathOrName(configuration.ALTERNATIVE_JRE_PATH, configuration.ALTERNATIVE_JRE_PATH_ENABLED);
setVFileToEditor(configuration.getScratchVirtualFile());
}
private void setVFileToEditor(VirtualFile file) {
myScratchPathField.getComponent().setText(file != null? FileUtil.toSystemDependentName(file.getPath()): "");
}
@NotNull
@Override
public JComponent createEditor() {
return myWholePanel;
}
@Override
public JComponent getAnchor() {
return myAnchor;
}
@Override
public void setAnchor(@Nullable JComponent anchor) {
myAnchor = anchor;
myMainClass.setAnchor(anchor);
myScratchPathField.setAnchor(anchor);
myCommonProgramParameters.setAnchor(anchor);
myJrePathEditor.setAnchor(anchor);
myModule.setAnchor(anchor);
}
}
| apache-2.0 |
jtsay362/fast-serialization | src/main/java/org/nustaq/serialization/serializers/FSTArrayListSerializer.java | 2571 | /*
* Copyright 2014 Ruediger Moeller.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nustaq.serialization.serializers;
/**
* Created by ruedi on 07.03.14.
*/
import org.nustaq.serialization.FSTBasicObjectSerializer;
import org.nustaq.serialization.FSTClazzInfo;
import org.nustaq.serialization.FSTObjectInput;
import org.nustaq.serialization.FSTObjectOutput;
import org.nustaq.serialization.util.FSTUtil;
import java.io.IOException;
import java.util.*;
/**
* Created with IntelliJ IDEA.
* User: ruedi
* Date: 10.11.12
* Time: 15:55
* To change this template use File | Settings | File Templates.
*/
public class FSTArrayListSerializer extends FSTBasicObjectSerializer {
@Override
public void writeObject(FSTObjectOutput out, Object toWrite, FSTClazzInfo clzInfo, FSTClazzInfo.FSTFieldInfo referencedBy, int streamPosition) throws IOException {
ArrayList col = (ArrayList)toWrite;
int size = col.size();
out.writeInt(size);
Class lastClz = null;
FSTClazzInfo lastInfo = null;
for (int i = 0; i < size; i++) {
Object o = col.get(i);
if ( o != null ) {
lastInfo = out.writeObjectInternal(o, o.getClass() == lastClz ? lastInfo : null, null);
lastClz = o.getClass();
} else
out.writeObjectInternal(o, null, null);
}
}
@Override
public Object instantiate(Class objectClass, FSTObjectInput in, FSTClazzInfo serializationInfo, FSTClazzInfo.FSTFieldInfo referencee, int streamPosition) throws Exception {
try {
int len = in.readInt();
ArrayList res = new ArrayList(len);
in.registerObject(res, streamPosition,serializationInfo, referencee);
for ( int i = 0; i < len; i++ ) {
final Object o = in.readObjectInternal(null);
res.add(o);
}
return res;
} catch (Throwable th) {
FSTUtil.<RuntimeException>rethrow(th);
}
return null;
}
}
| apache-2.0 |
bstopp/acs-aem-commons | bundle/src/main/java/com/adobe/acs/commons/httpcache/store/caffeine/impl/CacheExpiryPolicy.java | 2335 | /*
* #%L
* ACS AEM Commons Bundle
* %%
* Copyright (C) 2015 Adobe
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.adobe.acs.commons.httpcache.store.caffeine.impl;
import com.adobe.acs.commons.httpcache.keys.CacheKey;
import com.adobe.acs.commons.httpcache.store.mem.impl.MemCachePersistenceObject;
import com.github.benmanes.caffeine.cache.Expiry;
import static com.adobe.acs.commons.httpcache.store.caffeine.impl.CaffeineMemHttpCacheStoreImpl.NANOSECOND_MODIFIER;
public class CacheExpiryPolicy implements Expiry<CacheKey, MemCachePersistenceObject> {
private final long standardTtl;
public CacheExpiryPolicy(long standardTtl) {
this.standardTtl = standardTtl;
}
@Override
public long expireAfterCreate(
CacheKey key, MemCachePersistenceObject value, long currentTime) {
long customExpiryTime = key.getExpiryForCreation();
if (customExpiryTime > 0) {
return customExpiryTime * NANOSECOND_MODIFIER;
} else {
if (standardTtl > 0) {
return standardTtl * NANOSECOND_MODIFIER;
} else {
return Long.MAX_VALUE;
}
}
}
@Override
public long expireAfterUpdate(
CacheKey key, MemCachePersistenceObject value, long currentTime, long currentDuration) {
if (key.getExpiryForUpdate() > 0) {
return key.getExpiryForUpdate() * NANOSECOND_MODIFIER;
}
return currentDuration;
}
@Override
public long expireAfterRead(
CacheKey key, MemCachePersistenceObject value, long currentTime, long currentDuration) {
if (key.getExpiryForAccess() > 0) {
return key.getExpiryForAccess() * NANOSECOND_MODIFIER;
}
return currentDuration;
}
}
| apache-2.0 |
emre-aydin/hazelcast | hazelcast/src/main/java/com/hazelcast/client/map/impl/querycache/subscriber/ClientQueryCacheEventService.java | 13034 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.client.map.impl.querycache.subscriber;
import com.hazelcast.client.impl.clientside.HazelcastClientInstanceImpl;
import com.hazelcast.client.impl.protocol.ClientMessage;
import com.hazelcast.client.impl.protocol.codec.ContinuousQueryAddListenerCodec;
import com.hazelcast.client.impl.protocol.codec.MapRemoveEntryListenerCodec;
import com.hazelcast.client.impl.spi.ClientListenerService;
import com.hazelcast.client.impl.spi.EventHandler;
import com.hazelcast.client.impl.spi.impl.ListenerMessageCodec;
import com.hazelcast.client.impl.spi.impl.listener.ClientListenerServiceImpl;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.serialization.InternalSerializationService;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.internal.util.ConstructorFunction;
import com.hazelcast.internal.util.executor.StripedExecutor;
import com.hazelcast.internal.util.executor.StripedRunnable;
import com.hazelcast.internal.util.executor.TimeoutRunnable;
import com.hazelcast.logging.ILogger;
import com.hazelcast.logging.Logger;
import com.hazelcast.map.EventLostEvent;
import com.hazelcast.map.IMapEvent;
import com.hazelcast.map.impl.ListenerAdapter;
import com.hazelcast.map.impl.event.EventData;
import com.hazelcast.map.impl.querycache.QueryCacheEventService;
import com.hazelcast.map.impl.querycache.event.BatchEventData;
import com.hazelcast.map.impl.querycache.event.BatchIMapEvent;
import com.hazelcast.map.impl.querycache.event.LocalEntryEventData;
import com.hazelcast.map.impl.querycache.event.QueryCacheEventData;
import com.hazelcast.map.impl.querycache.event.SingleIMapEvent;
import com.hazelcast.map.listener.MapListener;
import com.hazelcast.query.impl.QueryEntry;
import com.hazelcast.query.impl.getters.Extractors;
import com.hazelcast.spi.impl.eventservice.EventFilter;
import com.hazelcast.spi.impl.eventservice.impl.TrueEventFilter;
import java.util.Collection;
import java.util.Collections;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import static com.hazelcast.internal.util.ConcurrencyUtil.getOrPutIfAbsent;
import static com.hazelcast.internal.util.Preconditions.checkHasText;
import static com.hazelcast.internal.util.Preconditions.checkNotNull;
import static com.hazelcast.map.impl.querycache.subscriber.EventPublisherHelper.createIMapEvent;
import static com.hazelcast.map.impl.querycache.subscriber.QueryCacheEventListenerAdapters.createQueryCacheListenerAdaptor;
/**
* Client side event service implementation for query cache.
*
* @see QueryCacheEventService
*/
public class ClientQueryCacheEventService implements QueryCacheEventService {
private static final int EVENT_QUEUE_TIMEOUT_MILLIS = 500;
private static final ConstructorFunction<String, QueryCacheToListenerMapper> REGISTRY_CONSTRUCTOR =
new ConstructorFunction<String, QueryCacheToListenerMapper>() {
@Override
public QueryCacheToListenerMapper createNew(String arg) {
return new QueryCacheToListenerMapper();
}
};
private final StripedExecutor executor;
private final ClientListenerService listenerService;
private final InternalSerializationService serializationService;
private final ILogger logger = Logger.getLogger(getClass());
private final ConcurrentMap<String, QueryCacheToListenerMapper> registrations;
public ClientQueryCacheEventService(HazelcastClientInstanceImpl client) {
ClientListenerServiceImpl listenerService = (ClientListenerServiceImpl) client.getListenerService();
this.listenerService = listenerService;
this.serializationService = client.getSerializationService();
this.executor = listenerService.getEventExecutor();
this.registrations = new ConcurrentHashMap<String, QueryCacheToListenerMapper>();
}
@Override
public boolean hasListener(String mapName, String cacheId) {
QueryCacheToListenerMapper queryCacheToListenerMapper = registrations.get(mapName);
if (queryCacheToListenerMapper == null) {
return false;
}
return queryCacheToListenerMapper.hasListener(cacheId);
}
// used for testing purposes
public ConcurrentMap<String, QueryCacheToListenerMapper> getRegistrations() {
return registrations;
}
@Override
public void sendEventToSubscriber(String name, Object eventData, int orderKey) {
// this is already subscriber side. So no need to implement it for subscriber side.
throw new UnsupportedOperationException();
}
@Override
public void publish(String mapName, String cacheId, Object event,
int orderKey, Extractors extractors) {
checkHasText(mapName, "mapName");
checkHasText(cacheId, "cacheId");
checkNotNull(event, "event cannot be null");
Collection<ListenerInfo> listeners = getListeners(mapName, cacheId);
for (ListenerInfo info : listeners) {
if (!canPassFilter(event, info.getFilter(), extractors)) {
continue;
}
try {
executor.execute(new EventDispatcher(event, info, orderKey,
serializationService, EVENT_QUEUE_TIMEOUT_MILLIS));
} catch (RejectedExecutionException e) {
// TODO Should we notify user when we overloaded?
logger.warning("EventQueue overloaded! Can not process IMap=[" + mapName + "]"
+ ", QueryCache=[ " + cacheId + "]" + ", Event=[" + event + "]");
}
}
}
private boolean canPassFilter(Object eventData,
EventFilter filter, Extractors extractors) {
if (filter == null || filter instanceof TrueEventFilter) {
return true;
}
if (!(eventData instanceof LocalEntryEventData)) {
return true;
}
LocalEntryEventData localEntryEventData = (LocalEntryEventData) eventData;
if (localEntryEventData.getEventType() != EventLostEvent.EVENT_TYPE) {
Object value = getValueOrOldValue(localEntryEventData);
Data keyData = localEntryEventData.getKeyData();
QueryEntry entry = new QueryEntry(serializationService, keyData, value, extractors);
return filter.eval(entry);
}
return true;
}
private Object getValueOrOldValue(LocalEntryEventData localEntryEventData) {
Object value = localEntryEventData.getValue();
return value != null ? value : localEntryEventData.getOldValue();
}
@Override
public UUID addPublisherListener(String mapName, String cacheId, ListenerAdapter adapter) {
EventHandler handler = new QueryCacheHandler(adapter);
return listenerService.registerListener(createPublisherListenerCodec(cacheId), handler);
}
@Override
public boolean removePublisherListener(String mapName, String cacheId, UUID listenerId) {
return listenerService.deregisterListener(listenerId);
}
private ListenerMessageCodec createPublisherListenerCodec(final String listenerName) {
return new ListenerMessageCodec() {
@Override
public ClientMessage encodeAddRequest(boolean localOnly) {
return ContinuousQueryAddListenerCodec.encodeRequest(listenerName, localOnly);
}
@Override
public UUID decodeAddResponse(ClientMessage clientMessage) {
return ContinuousQueryAddListenerCodec.decodeResponse(clientMessage);
}
@Override
public ClientMessage encodeRemoveRequest(UUID realRegistrationId) {
return MapRemoveEntryListenerCodec.encodeRequest(listenerName, realRegistrationId);
}
@Override
public boolean decodeRemoveResponse(ClientMessage clientMessage) {
return MapRemoveEntryListenerCodec.decodeResponse(clientMessage);
}
};
}
@Override
public UUID addListener(String mapName, String cacheId, MapListener listener) {
return addListener(mapName, cacheId, listener, null);
}
@Override
public UUID addListener(String mapName, String cacheId, MapListener listener, EventFilter filter) {
checkHasText(mapName, "mapName");
checkHasText(cacheId, "cacheId");
checkNotNull(listener, "listener cannot be null");
QueryCacheToListenerMapper queryCacheToListenerMapper = getOrPutIfAbsent(registrations, mapName, REGISTRY_CONSTRUCTOR);
ListenerAdapter listenerAdaptor = createQueryCacheListenerAdaptor(listener);
return queryCacheToListenerMapper.addListener(cacheId, listenerAdaptor, filter);
}
@Override
public boolean removeListener(String mapName, String cacheId, UUID listenerId) {
checkHasText(mapName, "mapName");
checkHasText(cacheId, "cacheId");
checkNotNull(listenerId, "listenerId cannot be null");
QueryCacheToListenerMapper queryCacheToListenerMapper = getOrPutIfAbsent(registrations, mapName, REGISTRY_CONSTRUCTOR);
return queryCacheToListenerMapper.removeListener(cacheId, listenerId);
}
@Override
public void removeAllListeners(String mapName, String cacheId) {
checkHasText(mapName, "mapName");
checkHasText(cacheId, "cacheId");
QueryCacheToListenerMapper queryCacheToListenerMap = registrations.get(mapName);
if (queryCacheToListenerMap != null) {
queryCacheToListenerMap.removeAllListeners(cacheId);
}
}
/**
* Query cache event handler.
*/
private final class QueryCacheHandler extends ContinuousQueryAddListenerCodec.AbstractEventHandler
implements EventHandler<ClientMessage> {
private final ListenerAdapter adapter;
private QueryCacheHandler(ListenerAdapter adapter) {
this.adapter = adapter;
}
@Override
public void handleQueryCacheSingleEvent(QueryCacheEventData data) {
adapter.onEvent(new SingleIMapEvent(data));
}
@Override
public void handleQueryCacheBatchEvent(Collection<QueryCacheEventData> events, String source, int partitionId) {
adapter.onEvent(new BatchIMapEvent(new BatchEventData(events, source, partitionId)));
}
}
private Collection<ListenerInfo> getListeners(String mapName, String cacheName) {
QueryCacheToListenerMapper queryCacheToListenerMapper = registrations.get(mapName);
if (queryCacheToListenerMapper == null) {
return Collections.emptySet();
}
return queryCacheToListenerMapper.getListenerInfos(cacheName);
}
/**
* Dispatches an event to a listener.
*/
private static class EventDispatcher implements StripedRunnable, TimeoutRunnable {
private final Object event;
private final ListenerInfo listenerInfo;
private final int orderKey;
private final long timeoutMs;
private final SerializationService serializationService;
EventDispatcher(Object event, ListenerInfo listenerInfo, int orderKey,
SerializationService serializationService, long timeoutMs) {
this.event = event;
this.listenerInfo = listenerInfo;
this.orderKey = orderKey;
this.timeoutMs = timeoutMs;
this.serializationService = serializationService;
}
@Override
public int getKey() {
return orderKey;
}
@Override
public void run() {
EventData eventData = (EventData) event;
EventFilter filter = listenerInfo.getFilter();
IMapEvent event = createIMapEvent(eventData, filter, null, serializationService);
ListenerAdapter listenerAdapter = listenerInfo.getListenerAdapter();
listenerAdapter.onEvent(event);
}
@Override
public long getTimeout() {
return timeoutMs;
}
@Override
public TimeUnit getTimeUnit() {
return TimeUnit.MILLISECONDS;
}
}
}
| apache-2.0 |
ChetnaChaudhari/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java | 9681 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.EOFException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
/**
* OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or
* the console. Main entry point into utility, either via the command line or
* programmatically.
*/
@InterfaceAudience.Private
public class OfflineImageViewerPB {
private static final String HELP_OPT = "-h";
private static final String HELP_LONGOPT = "--help";
public static final Log LOG = LogFactory.getLog(OfflineImageViewerPB.class);
private final static String usage = "Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n"
+ "Offline Image Viewer\n"
+ "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n"
+ "saving the results in OUTPUTFILE.\n"
+ "\n"
+ "The oiv utility will attempt to parse correctly formed image files\n"
+ "and will abort fail with mal-formed image files.\n"
+ "\n"
+ "The tool works offline and does not require a running cluster in\n"
+ "order to process an image file.\n"
+ "\n"
+ "The following image processors are available:\n"
+ " * XML: This processor creates an XML document with all elements of\n"
+ " the fsimage enumerated, suitable for further analysis by XML\n"
+ " tools.\n"
+ " * ReverseXML: This processor takes an XML file and creates a\n"
+ " binary fsimage containing the same elements.\n"
+ " * FileDistribution: This processor analyzes the file size\n"
+ " distribution in the image.\n"
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
+ " analyzed (128GB by default).\n"
+ " -step defines the granularity of the distribution. (2MB by default)\n"
+ " -format formats the output result in a human-readable fashion\n"
+ " rather than a number of bytes. (false by default)\n"
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
+ " to both inodes and inodes-under-construction, separated by a\n"
+ " delimiter. The default delimiter is \\t, though this may be\n"
+ " changed via the -delimiter argument.\n"
+ "\n"
+ "Required command line arguments:\n"
+ "-i,--inputFile <arg> FSImage or XML file to process.\n"
+ "\n"
+ "Optional command line arguments:\n"
+ "-o,--outputFile <arg> Name of output file. If the specified\n"
+ " file exists, it will be overwritten.\n"
+ " (output to stdout by default)\n"
+ " If the input file was an XML file, we\n"
+ " will also create an <outputFile>.md5 file.\n"
+ "-p,--processor <arg> Select which type of processor to apply\n"
+ " against image file. (XML|FileDistribution|\n"
+ " ReverseXML|Web|Delimited)\n"
+ " The default is Web.\n"
+ "-delimiter <arg> Delimiting string to use with Delimited processor. \n"
+ "-t,--temp <arg> Use temporary dir to cache intermediate result to generate\n"
+ " Delimited outputs. If not set, Delimited processor constructs\n"
+ " the namespace in memory before outputting text.\n"
+ "-h,--help Display usage information and exit\n";
/**
* Build command-line options and descriptions
*/
private static Options buildOptions() {
Options options = new Options();
// Build in/output file arguments, which are required, but there is no
// addOption method that can specify this
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("inputFile");
options.addOption(OptionBuilder.create("i"));
options.addOption("o", "outputFile", true, "");
options.addOption("p", "processor", true, "");
options.addOption("h", "help", false, "");
options.addOption("maxSize", true, "");
options.addOption("step", true, "");
options.addOption("format", false, "");
options.addOption("addr", true, "");
options.addOption("delimiter", true, "");
options.addOption("t", "temp", true, "");
return options;
}
/**
* Entry point to command-line-driven operation. User may specify options and
* start fsimage viewer from the command line. Program will process image file
* and exit cleanly or, if an error is encountered, inform user and exit.
*
* @param args
* Command line options
* @throws IOException
*/
public static void main(String[] args) throws Exception {
int status = run(args);
System.exit(status);
}
public static int run(String[] args) throws Exception {
Options options = buildOptions();
if (args.length == 0) {
printUsage();
return 0;
}
// print help and exit with zero exit code
if (args.length == 1 && isHelpOption(args[0])) {
printUsage();
return 0;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd;
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
System.out.println("Error parsing command-line options: ");
printUsage();
return -1;
}
if (cmd.hasOption("h")) {
// print help and exit with non zero exit code since
// it is not expected to give help and other options together.
printUsage();
return -1;
}
String inputFile = cmd.getOptionValue("i");
String processor = cmd.getOptionValue("p", "Web");
String outputFile = cmd.getOptionValue("o", "-");
String delimiter = cmd.getOptionValue("delimiter",
PBImageDelimitedTextWriter.DEFAULT_DELIMITER);
String tempPath = cmd.getOptionValue("t", "");
Configuration conf = new Configuration();
try (PrintStream out = outputFile.equals("-") ?
System.out : new PrintStream(outputFile, "UTF-8")) {
switch (StringUtils.toUpperCase(processor)) {
case "FILEDISTRIBUTION":
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
boolean formatOutput = cmd.hasOption("format");
new FileDistributionCalculator(conf, maxSize, step, formatOutput, out)
.visit(new RandomAccessFile(inputFile, "r"));
break;
case "XML":
new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
"r"));
break;
case "REVERSEXML":
try {
OfflineImageReconstructor.run(inputFile, outputFile);
} catch (Exception e) {
System.err.println("OfflineImageReconstructor failed: "
+ e.getMessage());
e.printStackTrace(System.err);
System.exit(1);
}
break;
case "WEB":
String addr = cmd.getOptionValue("addr", "localhost:5978");
try (WebImageViewer viewer =
new WebImageViewer(NetUtils.createSocketAddr(addr))) {
viewer.start(inputFile);
}
break;
case "DELIMITED":
try (PBImageDelimitedTextWriter writer =
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
writer.visit(new RandomAccessFile(inputFile, "r"));
}
break;
default:
System.err.println("Invalid processor specified : " + processor);
printUsage();
return -1;
}
return 0;
} catch (EOFException e) {
System.err.println("Input file ended unexpectedly. Exiting");
} catch (IOException e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
e.printStackTrace(System.err);
}
return -1;
}
/**
* Print application usage instructions.
*/
private static void printUsage() {
System.out.println(usage);
}
private static boolean isHelpOption(String arg) {
return arg.equalsIgnoreCase(HELP_OPT) ||
arg.equalsIgnoreCase(HELP_LONGOPT);
}
}
| apache-2.0 |
harikrushna-Huawei/hackathon | apps/openstacknode/src/main/java/org/onosproject/openstacknode/OpenstackNodeService.java | 1688 | /*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.openstacknode;
import java.util.List;
/**
* Handles the bootstrap request for compute/gateway node.
*/
public interface OpenstackNodeService {
public enum OpenstackNodeType {
/**
* Compute or Gateway Node.
*/
COMPUTENODE,
GATEWAYNODE
}
/**
* Adds a new node to the service.
*
* @param node openstack node
*/
void addNode(OpenstackNode node);
/**
* Deletes a node from the service.
*
* @param node openstack node
*/
void deleteNode(OpenstackNode node);
/**
* Returns nodes known to the service for designated openstacktype.
*
* @param openstackNodeType openstack node type
* @return list of nodes
*/
List<OpenstackNode> getNodes(OpenstackNodeType openstackNodeType);
/**
* Returns the NodeState for a given node.
*
* @param node openstack node
* @return true if the NodeState for a given node is COMPLETE, false otherwise
*/
boolean isComplete(OpenstackNode node);
}
| apache-2.0 |
akuhtz/izpack | izpack-installer/src/test/java/com/izforge/izpack/installer/gui/DefaultNavigatorTest.java | 10736 | /*
* IzPack - Copyright 2001-2012 Julien Ponge, All Rights Reserved.
*
* http://izpack.org/
* http://izpack.codehaus.org/
*
* Copyright 2012 Tim Anderson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.izforge.izpack.installer.gui;
import com.izforge.izpack.api.container.Container;
import com.izforge.izpack.api.data.LocaleDatabase;
import com.izforge.izpack.api.data.Panel;
import com.izforge.izpack.api.factory.ObjectFactory;
import com.izforge.izpack.api.resource.Messages;
import com.izforge.izpack.api.resource.Resources;
import com.izforge.izpack.api.rules.RulesEngine;
import com.izforge.izpack.core.container.DefaultContainer;
import com.izforge.izpack.core.data.DefaultVariables;
import com.izforge.izpack.core.factory.DefaultObjectFactory;
import com.izforge.izpack.core.resource.DefaultLocales;
import com.izforge.izpack.core.rules.ConditionContainer;
import com.izforge.izpack.core.rules.RulesEngineImpl;
import com.izforge.izpack.gui.IconsDatabase;
import com.izforge.izpack.installer.data.GUIInstallData;
import com.izforge.izpack.installer.panel.Panels;
import com.izforge.izpack.util.Platforms;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* Tests the {@link DefaultNavigator}.
*
* @author Tim Anderson
*/
public class DefaultNavigatorTest
{
/**
* The installer frame.
*/
private final InstallerFrame frame;
/**
* The installation data.
*/
private final GUIInstallData installData;
/**
* Factory for creating IzPanels.
*/
private final ObjectFactory factory;
/**
* The container.
*/
private final Container container;
/**
* Panel id seed.
*/
private int id;
/**
* Constructs a {@code DefaultNavigatorTest}.
*/
public DefaultNavigatorTest()
{
frame = Mockito.mock(InstallerFrame.class);
installData = new GUIInstallData(new DefaultVariables(), Platforms.WINDOWS);
RulesEngine rules = new RulesEngineImpl(Mockito.mock(ConditionContainer.class), Platforms.WINDOWS);
installData.setRules(rules);
final Resources resources = Mockito.mock(Resources.class);
installData.setMessages(new LocaleDatabase((Messages) null, new DefaultLocales(resources)));
container = new DefaultContainer()
{
{
getContainer().addComponent(frame);
getContainer().addComponent(resources);
getContainer().addComponent(installData);
}
};
factory = new DefaultObjectFactory(container);
}
/**
* Tests panel navigation.
*/
@Test
public void testNavigation()
{
Panels panels = createPanels(3);
Navigator navigator = createNavigator(panels);
// prior to display of first panel
assertTrue(navigator.isNextEnabled());
assertFalse(navigator.isPreviousEnabled());
assertTrue(navigator.next());
// first panel
assertEquals(0, panels.getIndex());
assertTrue(navigator.isNextEnabled());
assertFalse(navigator.isPreviousEnabled());
assertTrue(navigator.next());
// second panel
assertEquals(1, panels.getIndex());
assertTrue(navigator.isNextEnabled());
assertTrue(navigator.isPreviousEnabled());
// make sure can navigate back
assertTrue(navigator.previous());
assertEquals(0, panels.getIndex());
assertFalse(navigator.isPreviousEnabled());
assertTrue(navigator.next());
assertEquals(1, panels.getIndex());
assertTrue(navigator.next());
// third panel
assertEquals(2, panels.getIndex());
assertFalse(navigator.isNextEnabled());
assertFalse(navigator.isPreviousEnabled()); // on last panel, can't navigate back
assertFalse(navigator.next());
}
/**
* Verifies that the next button can be disabled when switching panels.
*/
@Test
public void testDisableNextOnSwitch()
{
IzPanels panels = createPanels(3);
final Navigator navigator = createNavigator(panels);
// register a listener that disables the next button when the second panel is displayed
panels.setListener(new IzPanelsListener()
{
@Override
public void switchPanel(IzPanelView newPanel, IzPanelView oldPanel)
{
if (newPanel.getIndex() == 1)
{
navigator.setNextEnabled(false);
}
}
});
// navigate to the second panel
assertEquals(-1, panels.getIndex());
assertTrue(navigator.next());
assertEquals(0, panels.getIndex());
assertTrue(navigator.next());
assertEquals(1, panels.getIndex());
// verify the next button is disabled, and that navigation is disabled
assertFalse(navigator.isNextEnabled());
assertEquals(1, panels.getIndex());
// enable the next button and verify the third panel can be navigated to
navigator.setNextEnabled(true);
assertTrue(navigator.next());
assertEquals(2, panels.getIndex());
}
/**
* Verifies that the previous button can be disabled when switching panels.
*/
@Test
public void testDisablePreviousOnSwitch()
{
IzPanels panels = createPanels(3);
final Navigator navigator = createNavigator(panels);
// register a listener that disables the previous button when the second panel is displayed
panels.setListener(new IzPanelsListener()
{
@Override
public void switchPanel(IzPanelView newPanel, IzPanelView oldPanel)
{
if (newPanel.getIndex() == 1)
{
navigator.setPreviousEnabled(false);
}
}
});
// navigate to the second panel
assertEquals(-1, panels.getIndex());
assertTrue(navigator.next());
assertEquals(0, panels.getIndex());
assertTrue(navigator.next());
assertEquals(1, panels.getIndex());
// verify the previous button is disabled, and that navigation is disabled
assertFalse(navigator.isPreviousEnabled());
assertFalse(navigator.previous());
assertEquals(1, panels.getIndex());
}
/**
* Tests {@link com.izforge.izpack.installer.gui.Navigator#quit()}.
*/
@Test
public void testQuit()
{
IzPanels panels = createPanels(5);
// test quit with quit enabled
InstallerFrame frame1 = Mockito.mock(InstallerFrame.class);
Navigator navigator1 = createNavigator(panels, frame1);
assertTrue(navigator1.isQuitEnabled());
navigator1.quit();
verify(frame1, times(1)).quit(); // verify InstallerFrame.quit() invoked
// test quit with quit disabled
InstallerFrame frame2 = Mockito.mock(InstallerFrame.class);
Navigator navigator2 = createNavigator(panels, frame2);
navigator2.setQuitEnabled(false);
navigator2.quit();
verify(frame2, never()).quit(); // verify InstallerFrame.quit() not invoked
// now enable quit
navigator2.setQuitEnabled(true);
navigator2.quit();
verify(frame2, times(1)).quit(); // verify InstallerFrame.quit() invoked
}
/**
* Verifies that the next panel can be skipped.
*/
@Test
public void testSkipNextPanel()
{
IzPanels panels = createPanels(3);
final DefaultNavigator navigator = createNavigator(panels);
panels.setListener(new IzPanelsListener()
{
@Override
public void switchPanel(IzPanelView newPanel, IzPanelView oldPanel)
{
if (newPanel.getIndex() == 1)
{
navigator.next(false);
}
}
});
// navigate to the first panel
assertEquals(-1, panels.getIndex());
assertTrue(navigator.next());
assertEquals(0, panels.getIndex());
// navigate to the next, verifying that the second panel (index == 1) is skipped
assertTrue(navigator.next());
assertEquals(2, panels.getIndex());
}
/**
* Creates a new {@code Navigator} for the specified panels
*
* @param panels the panels to navigate
* @return a new {@code Navigator}
*/
private DefaultNavigator createNavigator(Panels panels)
{
return createNavigator(panels, frame);
}
/**
* Creates a new {@code Navigator} for the specified panels
*
* @param panels the panels to navigate
* @param frame the installer frame
* @return a new {@code Navigator}
*/
private DefaultNavigator createNavigator(Panels panels, InstallerFrame frame)
{
IconsDatabase icons = new IconsDatabase();
DefaultNavigator navigator = new DefaultNavigator(panels, icons, installData);
navigator.setInstallerFrame(frame);
return navigator;
}
/**
* Creates a {@link IzPanels} with the specified no. of panels.
*
* @param count the no. of panels
* @return a new {@link IzPanels} with {@code count} panels
*/
private IzPanels createPanels(int count)
{
List<IzPanelView> views = new ArrayList<IzPanelView>();
for (int i = 0; i < count; ++i)
{
Panel panel = new Panel();
panel.setClassName(TestIzPanel.class.getName());
panel.setPanelId(TestIzPanel.class.getSimpleName() + ++id);
IzPanelView panelView = new IzPanelView(panel, factory, installData);
views.add(panelView);
}
IzPanels panels = new IzPanels(views, container, installData);
panels.initialise();
panels.setListener(new IzPanelsListener()
{
@Override
public void switchPanel(IzPanelView newPanel, IzPanelView oldPanel)
{
}
});
return panels;
}
}
| apache-2.0 |
amarzavery/azure-xplat-cli | lib/commands/arm/armsdk/models/virtualMachineCaptureParameters.js | 1794 | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
/**
* Capture Virtual Machine parameters.
*
*/
class VirtualMachineCaptureParameters {
/**
* Create a VirtualMachineCaptureParameters.
* @member {string} vhdPrefix The captured virtual hard disk's name prefix.
* @member {string} destinationContainerName The destination container name.
* @member {boolean} overwriteVhds Specifies whether to overwrite the
* destination virtual hard disk, in case of conflict.
*/
constructor() {
}
/**
* Defines the metadata of VirtualMachineCaptureParameters
*
* @returns {object} metadata of VirtualMachineCaptureParameters
*
*/
mapper() {
return {
required: false,
serializedName: 'VirtualMachineCaptureParameters',
type: {
name: 'Composite',
className: 'VirtualMachineCaptureParameters',
modelProperties: {
vhdPrefix: {
required: true,
serializedName: 'vhdPrefix',
type: {
name: 'String'
}
},
destinationContainerName: {
required: true,
serializedName: 'destinationContainerName',
type: {
name: 'String'
}
},
overwriteVhds: {
required: true,
serializedName: 'overwriteVhds',
type: {
name: 'Boolean'
}
}
}
}
};
}
}
module.exports = VirtualMachineCaptureParameters;
| apache-2.0 |
franz1981/activemq-artemis | tests/integration-tests/src/test/java/org/apache/activemq/artemis/tests/integration/amqp/AmqpReceiverDrainTest.java | 6816 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.tests.integration.amqp;
import java.util.concurrent.TimeUnit;
import org.apache.activemq.artemis.core.server.Queue;
import org.apache.activemq.artemis.tests.integration.IntegrationTestLogger;
import org.apache.activemq.transport.amqp.client.AmqpClient;
import org.apache.activemq.transport.amqp.client.AmqpConnection;
import org.apache.activemq.transport.amqp.client.AmqpMessage;
import org.apache.activemq.transport.amqp.client.AmqpReceiver;
import org.apache.activemq.transport.amqp.client.AmqpSession;
import org.junit.Test;
/**
* Tests various behaviors of broker side drain support.
*/
public class AmqpReceiverDrainTest extends AmqpClientTestSupport {
@Test(timeout = 60000)
public void testReceiverCanDrainMessagesQueue() throws Exception {
doTestReceiverCanDrainMessages(false);
}
@Test(timeout = 60000)
public void testReceiverCanDrainMessagesTopic() throws Exception {
doTestReceiverCanDrainMessages(true);
}
private void doTestReceiverCanDrainMessages(boolean topic) throws Exception {
final String destinationName;
if (topic) {
destinationName = getTopicName();
} else {
destinationName = getQueueName();
}
int MSG_COUNT = 20;
AmqpClient client = createAmqpClient();
AmqpConnection connection = addConnection(client.connect());
AmqpSession session = connection.createSession();
AmqpReceiver receiver = session.createReceiver(destinationName);
sendMessages(destinationName, MSG_COUNT);
Queue queueView = getProxyToQueue(destinationName);
assertEquals(MSG_COUNT, queueView.getMessageCount());
assertEquals(0, queueView.getDeliveringCount());
receiver.drain(MSG_COUNT);
for (int i = 0; i < MSG_COUNT; ++i) {
AmqpMessage message = receiver.receive(5, TimeUnit.SECONDS);
assertNotNull("Failed to read message: " + (i + 1), message);
IntegrationTestLogger.LOGGER.info("Read message: " + message.getMessageId());
message.accept();
}
receiver.close();
connection.close();
}
@Test(timeout = 60000)
public void testPullWithNoMessageGetDrainedQueue() throws Exception {
doTestPullWithNoMessageGetDrained(false);
}
@Test(timeout = 60000)
public void testPullWithNoMessageGetDrainedTopic() throws Exception {
doTestPullWithNoMessageGetDrained(true);
}
private void doTestPullWithNoMessageGetDrained(boolean topic) throws Exception {
final String destinationName;
if (topic) {
destinationName = getTopicName();
} else {
destinationName = getQueueName();
}
AmqpClient client = createAmqpClient();
AmqpConnection connection = addConnection(client.connect());
AmqpSession session = connection.createSession();
AmqpReceiver receiver = session.createReceiver(destinationName);
receiver.flow(10);
Queue queueView = getProxyToQueue(destinationName);
assertEquals(0, queueView.getMessageCount());
assertEquals(0, queueView.getMessagesAcknowledged());
assertEquals(10, receiver.getReceiver().getRemoteCredit());
assertNull(receiver.pull(1, TimeUnit.SECONDS));
assertEquals(0, receiver.getReceiver().getRemoteCredit());
connection.close();
}
@Test(timeout = 60000)
public void testPullOneFromRemoteQueue() throws Exception {
doTestPullOneFromRemote(false);
}
@Test(timeout = 60000)
public void testPullOneFromRemoteTopic() throws Exception {
doTestPullOneFromRemote(true);
}
private void doTestPullOneFromRemote(boolean topic) throws Exception {
AmqpClient client = createAmqpClient();
AmqpConnection connection = addConnection(client.connect());
AmqpSession session = connection.createSession();
final String destinationName;
if (topic) {
destinationName = getTopicName();
} else {
destinationName = getQueueName();
}
AmqpReceiver receiver = session.createReceiver(destinationName);
int MSG_COUNT = 20;
sendMessages(destinationName, MSG_COUNT);
Queue queueView = getProxyToQueue(destinationName);
assertEquals(MSG_COUNT, queueView.getMessageCount());
assertEquals(0, queueView.getDeliveringCount());
assertEquals(0, receiver.getReceiver().getRemoteCredit());
AmqpMessage message = receiver.pull(5, TimeUnit.SECONDS);
assertNotNull(message);
message.accept();
assertEquals(0, receiver.getReceiver().getRemoteCredit());
receiver.close();
connection.close();
}
@Test(timeout = 60000)
public void testMultipleZeroResultPullsQueue() throws Exception {
doTestMultipleZeroResultPulls(false);
}
@Test(timeout = 60000)
public void testMultipleZeroResultPullsTopic() throws Exception {
doTestMultipleZeroResultPulls(true);
}
private void doTestMultipleZeroResultPulls(boolean topic) throws Exception {
AmqpClient client = createAmqpClient();
AmqpConnection connection = addConnection(client.connect());
AmqpSession session = connection.createSession();
final String destinationName;
if (topic) {
destinationName = getTopicName();
} else {
destinationName = getQueueName();
}
AmqpReceiver receiver = session.createReceiver(destinationName);
receiver.flow(10);
Queue queueView = getProxyToQueue(destinationName);
assertEquals(0, queueView.getMessageCount());
assertEquals(0, queueView.getDeliveringCount());
assertEquals(10, receiver.getReceiver().getRemoteCredit());
assertNull(receiver.pull(1, TimeUnit.SECONDS));
assertEquals(0, receiver.getReceiver().getRemoteCredit());
assertNull(receiver.pull(1, TimeUnit.SECONDS));
assertNull(receiver.pull(1, TimeUnit.SECONDS));
assertEquals(0, receiver.getReceiver().getRemoteCredit());
connection.close();
}
}
| apache-2.0 |
Xpray/flink | flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/HighAvailabilityServicesUtils.java | 5082 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.highavailability;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.runtime.blob.BlobStoreService;
import org.apache.flink.runtime.blob.BlobUtils;
import org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedHaServices;
import org.apache.flink.runtime.highavailability.nonha.standalone.StandaloneHaServices;
import org.apache.flink.runtime.highavailability.zookeeper.ZooKeeperHaServices;
import org.apache.flink.runtime.jobmanager.HighAvailabilityMode;
import org.apache.flink.runtime.jobmaster.JobMaster;
import org.apache.flink.runtime.resourcemanager.ResourceManager;
import org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils;
import org.apache.flink.runtime.util.LeaderRetrievalUtils;
import org.apache.flink.runtime.util.ZooKeeperUtils;
import org.apache.flink.util.ConfigurationException;
import java.util.concurrent.Executor;
/**
* Utils class to instantiate {@link HighAvailabilityServices} implementations.
*/
public class HighAvailabilityServicesUtils {
public static HighAvailabilityServices createAvailableOrEmbeddedServices(
Configuration config,
Executor executor) throws Exception {
HighAvailabilityMode highAvailabilityMode = LeaderRetrievalUtils.getRecoveryMode(config);
switch (highAvailabilityMode) {
case NONE:
return new EmbeddedHaServices(executor);
case ZOOKEEPER:
BlobStoreService blobStoreService = BlobUtils.createBlobStoreFromConfig(config);
return new ZooKeeperHaServices(
ZooKeeperUtils.startCuratorFramework(config),
executor,
config,
blobStoreService);
default:
throw new Exception("High availability mode " + highAvailabilityMode + " is not supported.");
}
}
public static HighAvailabilityServices createHighAvailabilityServices(
Configuration configuration,
Executor executor,
AddressResolution addressResolution) throws Exception {
HighAvailabilityMode highAvailabilityMode = LeaderRetrievalUtils.getRecoveryMode(configuration);
switch(highAvailabilityMode) {
case NONE:
final Tuple2<String, Integer> hostnamePort = getJobManagerAddress(configuration);
final String jobManagerRpcUrl = AkkaRpcServiceUtils.getRpcUrl(
hostnamePort.f0,
hostnamePort.f1,
JobMaster.JOB_MANAGER_NAME,
addressResolution,
configuration);
final String resourceManagerRpcUrl = AkkaRpcServiceUtils.getRpcUrl(
hostnamePort.f0,
hostnamePort.f1,
ResourceManager.RESOURCE_MANAGER_NAME,
addressResolution,
configuration);
return new StandaloneHaServices(resourceManagerRpcUrl, jobManagerRpcUrl);
case ZOOKEEPER:
BlobStoreService blobStoreService = BlobUtils.createBlobStoreFromConfig(configuration);
return new ZooKeeperHaServices(
ZooKeeperUtils.startCuratorFramework(configuration),
executor,
configuration,
blobStoreService);
default:
throw new Exception("Recovery mode " + highAvailabilityMode + " is not supported.");
}
}
/**
* Returns the JobManager's hostname and port extracted from the given
* {@link Configuration}.
*
* @param configuration Configuration to extract the JobManager's address from
* @return The JobManager's hostname and port
* @throws ConfigurationException if the JobManager's address cannot be extracted from the configuration
*/
public static Tuple2<String, Integer> getJobManagerAddress(Configuration configuration) throws ConfigurationException {
final String hostname = configuration.getString(JobManagerOptions.ADDRESS);
final int port = configuration.getInteger(JobManagerOptions.PORT);
if (hostname == null) {
throw new ConfigurationException("Config parameter '" + JobManagerOptions.ADDRESS +
"' is missing (hostname/address of JobManager to connect to).");
}
if (port <= 0 || port >= 65536) {
throw new ConfigurationException("Invalid value for '" + JobManagerOptions.PORT +
"' (port of the JobManager actor system) : " + port +
". it must be greater than 0 and less than 65536.");
}
return Tuple2.of(hostname, port);
}
public enum AddressResolution {
TRY_ADDRESS_RESOLUTION,
NO_ADDRESS_RESOLUTION
}
}
| apache-2.0 |
puneetjaiswal/crate | sql-parser/src/main/java/io/crate/sql/tree/TreePrinter.java | 10850 | /*
* Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.sql.tree;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import java.io.PrintStream;
import java.util.IdentityHashMap;
public class TreePrinter
{
private static final String INDENT = " ";
private final IdentityHashMap<QualifiedNameReference, QualifiedName> resolvedNameReferences;
private final PrintStream out;
public TreePrinter(IdentityHashMap<QualifiedNameReference, QualifiedName> resolvedNameReferences, PrintStream out)
{
this.resolvedNameReferences = new IdentityHashMap<>(resolvedNameReferences);
this.out = out;
}
public void print(Node root)
{
AstVisitor<Void, Integer> printer = new DefaultTraversalVisitor<Void, Integer>()
{
@Override
protected Void visitNode(Node node, Integer indentLevel)
{
throw new UnsupportedOperationException("not yet implemented: " + node);
}
@Override
protected Void visitQuery(Query node, Integer indentLevel)
{
print(indentLevel, "Query ");
indentLevel++;
print(indentLevel, "QueryBody");
process(node.getQueryBody(), indentLevel);
if (!node.getOrderBy().isEmpty()) {
print(indentLevel, "OrderBy");
for (SortItem sortItem : node.getOrderBy()) {
process(sortItem, indentLevel + 1);
}
}
if (node.getLimit().isPresent()) {
print(indentLevel, "Limit: " + node.getLimit().get());
}
return null;
}
@Override
protected Void visitQuerySpecification(QuerySpecification node, Integer indentLevel)
{
print(indentLevel, "QuerySpecification ");
indentLevel++;
process(node.getSelect(), indentLevel);
print(indentLevel, "From");
for (Relation relation : node.getFrom()) {
process(relation, indentLevel + 1);
}
if (node.getWhere().isPresent()) {
print(indentLevel, "Where");
process(node.getWhere().get(), indentLevel + 1);
}
if (!node.getGroupBy().isEmpty()) {
print(indentLevel, "GroupBy");
for (Expression expression : node.getGroupBy()) {
process(expression, indentLevel + 1);
}
}
if (node.getHaving().isPresent()) {
print(indentLevel, "Having");
process(node.getHaving().get(), indentLevel + 1);
}
if (!node.getOrderBy().isEmpty()) {
print(indentLevel, "OrderBy");
for (SortItem sortItem : node.getOrderBy()) {
process(sortItem, indentLevel + 1);
}
}
if (node.getLimit().isPresent()) {
print(indentLevel, "Limit: " + node.getLimit().get());
}
return null;
}
@Override
protected Void visitSelect(Select node, Integer indentLevel)
{
String distinct = "";
if (node.isDistinct()) {
distinct = "[DISTINCT]";
}
print(indentLevel, "Select" + distinct);
super.visitSelect(node, indentLevel + 1); // visit children
return null;
}
@Override
protected Void visitAllColumns(AllColumns node, Integer indent)
{
if (node.getPrefix().isPresent()) {
print(indent, node.getPrefix() + ".*");
}
else {
print(indent, "*");
}
return null;
}
@Override
protected Void visitSingleColumn(SingleColumn node, Integer indent)
{
if (node.getAlias().isPresent()) {
print(indent, "Alias: " + node.getAlias().get());
}
super.visitSingleColumn(node, indent + 1); // visit children
return null;
}
@Override
protected Void visitComparisonExpression(ComparisonExpression node, Integer indentLevel)
{
print(indentLevel, node.getType().toString());
super.visitComparisonExpression(node, indentLevel + 1);
return null;
}
@Override
protected Void visitArithmeticExpression(ArithmeticExpression node, Integer indentLevel)
{
print(indentLevel, node.getType().toString());
super.visitArithmeticExpression(node, indentLevel + 1);
return null;
}
@Override
protected Void visitLogicalBinaryExpression(LogicalBinaryExpression node, Integer indentLevel)
{
print(indentLevel, node.getType().toString());
super.visitLogicalBinaryExpression(node, indentLevel + 1);
return null;
}
@Override
protected Void visitStringLiteral(StringLiteral node, Integer indentLevel)
{
print(indentLevel, "String[" + node.getValue() + "]");
return null;
}
@Override
protected Void visitBooleanLiteral(BooleanLiteral node, Integer indentLevel)
{
print(indentLevel, "Boolean[" + node.getValue() + "]");
return null;
}
@Override
protected Void visitLongLiteral(LongLiteral node, Integer indentLevel)
{
print(indentLevel, "Long[" + node.getValue() + "]");
return null;
}
@Override
protected Void visitLikePredicate(LikePredicate node, Integer indentLevel)
{
print(indentLevel, "LIKE");
super.visitLikePredicate(node, indentLevel + 1);
return null;
}
@Override
public Void visitMatchPredicate(MatchPredicate node, Integer indentLevel)
{
print(indentLevel, "MATCH");
super.visitMatchPredicate(node, indentLevel + 1);
return null;
}
@Override
protected Void visitQualifiedNameReference(QualifiedNameReference node, Integer indentLevel)
{
QualifiedName resolved = resolvedNameReferences.get(node);
String resolvedName = "";
if (resolved != null) {
resolvedName = "=>" + resolved.toString();
}
print(indentLevel, "QualifiedName[" + node.getName() + resolvedName + "]");
return null;
}
@Override
protected Void visitFunctionCall(FunctionCall node, Integer indentLevel)
{
String name = Joiner.on('.').join(node.getName().getParts());
print(indentLevel, "FunctionCall[" + name + "]");
super.visitFunctionCall(node, indentLevel + 1);
return null;
}
@Override
protected Void visitTable(Table node, Integer indentLevel)
{
String name = Joiner.on('.').join(node.getName().getParts());
print(indentLevel, "Table[" + name + "]");
return null;
}
@Override
protected Void visitAliasedRelation(AliasedRelation node, Integer indentLevel)
{
print(indentLevel, "Alias[" + node.getAlias() + "]");
super.visitAliasedRelation(node, indentLevel + 1);
return null;
}
@Override
protected Void visitSampledRelation(SampledRelation node, Integer indentLevel)
{
String stratifyOn = "";
if (node.getColumnsToStratifyOn().isPresent()) {
stratifyOn = " STRATIFY ON (" + node.getColumnsToStratifyOn().get().toString() + ")";
}
print(indentLevel, "TABLESAMPLE[" + node.getType() + " (" + node.getSamplePercentage() + ")" + stratifyOn + "]");
super.visitSampledRelation(node, indentLevel + 1);
return null;
}
@Override
protected Void visitTableSubquery(TableSubquery node, Integer indentLevel)
{
print(indentLevel, "SubQuery");
super.visitTableSubquery(node, indentLevel + 1);
return null;
}
@Override
protected Void visitInPredicate(InPredicate node, Integer indentLevel)
{
print(indentLevel, "IN");
super.visitInPredicate(node, indentLevel + 1);
return null;
}
@Override
protected Void visitSubqueryExpression(SubqueryExpression node, Integer indentLevel)
{
print(indentLevel, "SubQuery");
super.visitSubqueryExpression(node, indentLevel + 1);
return null;
}
};
printer.process(root, 0);
}
private void print(Integer indentLevel, String value)
{
out.println(Strings.repeat(INDENT, indentLevel) + value);
}
}
| apache-2.0 |
charlesporter/incubator-metron | metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/mapper/HBaseProjectionCriteria.java | 2706 | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.metron.hbase.bolt.mapper;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
/**
* Allows the user to specify the projection criteria.
* If only columnFamily is specified all columns from that family will be returned.
* If a column is specified only that column from that family will be returned.
*
* Original code based on the Apache Storm project. See
* https://github.com/apache/storm/tree/master/external/storm-hbase.
*/
public class HBaseProjectionCriteria implements Serializable {
private List<byte[]> columnFamilies;
private List<ColumnMetaData> columns;
public static class ColumnMetaData implements Serializable {
private byte[] columnFamily;
private byte[] qualifier;
public ColumnMetaData(String columnFamily, String qualifier) {
this.columnFamily = columnFamily.getBytes();
this.qualifier = qualifier.getBytes();
}
public byte[] getColumnFamily() {
return columnFamily;
}
public byte[] getQualifier() {
return qualifier;
}
}
public HBaseProjectionCriteria() {
columnFamilies = Lists.newArrayList();
columns = Lists.newArrayList();
}
/**
* all columns from this family will be included as result of HBase lookup.
* @param columnFamily
* @return
*/
public HBaseProjectionCriteria addColumnFamily(String columnFamily) {
this.columnFamilies.add(columnFamily.getBytes());
return this;
}
/**
* Only this column from the the columnFamily will be included as result of HBase lookup.
* @param column
* @return
*/
public HBaseProjectionCriteria addColumn(ColumnMetaData column) {
this.columns.add(column);
return this;
}
public List<ColumnMetaData> getColumns() {
return columns;
}
public List<byte[]> getColumnFamilies() {
return columnFamilies;
}
}
| apache-2.0 |
mwol/gobblin | gobblin-core/src/test/java/gobblin/writer/SimpleDataWriterTest.java | 14140 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.writer;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.Collections;
import java.util.zip.GZIPInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import gobblin.configuration.ConfigurationKeys;
import gobblin.configuration.State;
import gobblin.crypto.EncryptionConfigParser;
import gobblin.crypto.EncryptionFactory;
import gobblin.metadata.types.GlobalMetadata;
/**
* Unit tests for {@link SimpleDataWriter}.
*
* @author [email protected]
*/
@Test(groups = { "gobblin.writer" })
public class SimpleDataWriterTest {
private String filePath;
private final String schema = "";
private final int newLine = "\n".getBytes()[0];
private State properties;
private static final String ENCRYPT_PREFIX = "writer.encrypt.";
@BeforeMethod
public void setUp() throws Exception {
properties = new State();
// Making the staging and/or output dirs if necessary
File stagingDir = new File(TestConstants.TEST_STAGING_DIR);
File outputDir = new File(TestConstants.TEST_OUTPUT_DIR);
if (!stagingDir.exists()) {
stagingDir.mkdirs();
}
if (!outputDir.exists()) {
outputDir.mkdirs();
}
this.filePath = TestConstants.TEST_EXTRACT_NAMESPACE.replaceAll("\\.", "/") + "/" + TestConstants.TEST_EXTRACT_TABLE
+ "/" + TestConstants.TEST_EXTRACT_ID + "_" + TestConstants.TEST_EXTRACT_PULL_TYPE;
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "\n");
properties.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, TestConstants.TEST_FS_URI);
properties.setProp(ConfigurationKeys.WRITER_STAGING_DIR, TestConstants.TEST_STAGING_DIR);
properties.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, TestConstants.TEST_OUTPUT_DIR);
properties.setProp(ConfigurationKeys.WRITER_FILE_PATH, this.filePath);
properties.setProp(ConfigurationKeys.WRITER_FILE_NAME, TestConstants.TEST_FILE_NAME);
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_PREPEND_SIZE, false);
}
/**
* Test writing records without a delimiter and make sure it works.
* @throws IOException
*/
@Test
public void testWriteBytesNoDelim() throws IOException {
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "");
// Build a writer to write test records
SimpleDataWriter writer = buildSimpleDataWriter();
byte[] rec1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
byte[] rec2 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
byte[] rec3 = { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 };
writer.write(rec1);
writer.write(rec2);
writer.write(rec3);
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 3);
Assert.assertEquals(writer.bytesWritten(), rec1.length + rec2.length + rec3.length);
File outputFile = new File(writer.getOutputFilePath());
InputStream is = new FileInputStream(outputFile);
int c, resNum = 0, resi = 0;
byte[][] records = { rec1, rec2, rec3 };
while ((c = is.read()) != -1) {
if (resi >= records[resNum].length) {
resNum++;
resi = 0;
}
Assert.assertEquals(c, records[resNum][resi]);
resi++;
}
}
/**
* Prepend the size to each record without delimiting the record. Each record
* should be prepended by the size of that record and the bytes written should
* include the prepended bytes.
*/
@Test
public void testPrependSizeWithoutDelimiter() throws IOException {
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_PREPEND_SIZE, true);
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "");
SimpleDataWriter writer = buildSimpleDataWriter();
byte[] rec1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
byte[] rec2 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
byte[] rec3 = { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 };
byte[][] records = { rec1, rec2, rec3 };
writer.write(rec1);
writer.write(rec2);
writer.write(rec3);
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 3);
Assert.assertEquals(writer.bytesWritten(), rec1.length + rec2.length + rec3.length + (Long.SIZE / 8 * 3));
File outputFile = new File(writer.getOutputFilePath());
DataInputStream dis = new DataInputStream(new FileInputStream(outputFile));
for (int i = 0; i < 3; i++) {
long size = dis.readLong();
Assert.assertEquals(size, records[i].length);
for (int j = 0; j < size; j++) {
Assert.assertEquals(dis.readByte(), records[i][j]);
}
}
}
/**
* Use the simple data writer to write random bytes to a file and ensure
* they are the same when read back.
*
* @throws IOException
*/
@Test
public void testWriteRandomBytes() throws IOException {
// Build a writer to write test records
SimpleDataWriter writer = buildSimpleDataWriter();
byte[] rec1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
byte[] rec2 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
byte[] rec3 = { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 };
writer.write(rec1);
writer.write(rec2);
writer.write(rec3);
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 3);
Assert.assertEquals(writer.bytesWritten(), rec1.length + rec2.length + rec3.length + 3); // 3 bytes for newline character
File outputFile = new File(writer.getOutputFilePath());
InputStream is = new FileInputStream(outputFile);
int c, resNum = 0, resi = 0;
byte[][] records = { rec1, rec2, rec3 };
while ((c = is.read()) != -1) {
if (c != newLine) {
Assert.assertEquals(c, records[resNum][resi]);
resi++;
} else {
resNum++;
resi = 0;
}
}
}
/**
* Prepend the size to each record and delimit the record. Each record
* should be prepended by the size of that record and the bytes written should
* include the prepended bytes.
*/
@Test
public void testPrependSizeWithDelimiter() throws IOException {
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_PREPEND_SIZE, true);
SimpleDataWriter writer = buildSimpleDataWriter();
byte[] rec1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
byte[] rec2 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
byte[] rec3 = { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 };
byte[][] records = { rec1, rec2, rec3 };
writer.write(rec1);
writer.write(rec2);
writer.write(rec3);
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 3);
Assert.assertEquals(writer.bytesWritten(), rec1.length + rec2.length + rec3.length + (Long.SIZE / 8 * 3) + 3);
File outputFile = new File(writer.getOutputFilePath());
DataInputStream dis = new DataInputStream(new FileInputStream(outputFile));
for (int i = 0; i < 3; i++) {
long size = dis.readLong();
Assert.assertEquals(size, records[i].length + 1);
for (int j = 0; j < size - 1; j++) {
Assert.assertEquals(dis.readByte(), records[i][j]);
}
Assert.assertEquals(dis.readByte(), '\n');
}
}
@Test
public void testSupportsGzip() throws IOException {
properties.setProp(ConfigurationKeys.WRITER_CODEC_TYPE, "gzip");
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "");
byte[] toWrite = new byte[] { 'a', 'b', 'c', 'd'};
SimpleDataWriter writer = buildSimpleDataWriter();
writer.write(toWrite);
writer.close();
writer.commit();
File outputFile = new File(writer.getOutputFilePath());
InputStream in = new GZIPInputStream(new FileInputStream(outputFile));
byte[] contents = IOUtils.toByteArray(in);
Assert.assertEquals(contents, toWrite, "Expected gzip'd content to be written out");
Assert.assertTrue(outputFile.getName().endsWith(".gzip"), "Expected gzip'd file to end in .gzip");
}
@Test
public void testSupportsGzipAndEncryption() throws IOException {
final String ENCRYPTION_TYPE = "insecure_shift";
final String COMPRESSION_TYPE = "gzip";
properties.setProp(ConfigurationKeys.WRITER_CODEC_TYPE, COMPRESSION_TYPE);
properties.setProp(ENCRYPT_PREFIX + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY,
ENCRYPTION_TYPE);
properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "");
byte[] toWrite = new byte[] { 'a', 'b', 'c', 'd'};
SimpleDataWriter writer = buildSimpleDataWriter();
writer.write(toWrite);
writer.close();
writer.commit();
File outputFile = new File(writer.getOutputFilePath());
Assert.assertTrue(outputFile.getName().endsWith("." + COMPRESSION_TYPE + "." + ENCRYPTION_TYPE),
"Expected compression & encryption in file name!");
InputStream decryptedFile =
EncryptionFactory.buildStreamCryptoProvider(ENCRYPTION_TYPE, Collections.<String, Object>emptyMap())
.decodeInputStream(new FileInputStream(outputFile));
InputStream uncompressedFile = new GZIPInputStream(decryptedFile);
byte[] contents = IOUtils.toByteArray(uncompressedFile);
Assert.assertEquals(contents, toWrite, "expected to decode same contents");
}
/**
* Use the simple writer to write json entries to a file and ensure that
* they are the same when read back.
*
* @throws IOException
*/
@Test
public void testWrite() throws IOException {
SimpleDataWriter writer = buildSimpleDataWriter();
int totalBytes = 3; // 3 extra bytes for the newline character
// Write all test records
for (String record : TestConstants.JSON_RECORDS) {
byte[] toWrite = record.getBytes();
Assert.assertEquals(toWrite.length, record.length()); // ensure null byte does not get added to end
writer.write(toWrite);
totalBytes += toWrite.length;
}
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 3);
Assert.assertEquals(writer.bytesWritten(), totalBytes);
File outputFile = new File(writer.getOutputFilePath());
BufferedReader br = new BufferedReader(new FileReader(outputFile));
String line;
int lineNumber = 0;
while ((line = br.readLine()) != null) {
Assert.assertEquals(line, TestConstants.JSON_RECORDS[lineNumber]);
lineNumber++;
}
br.close();
Assert.assertEquals(lineNumber, 3);
}
private SimpleDataWriter buildSimpleDataWriter()
throws IOException {
SimpleDataWriterBuilder b = (SimpleDataWriterBuilder)new SimpleDataWriterBuilder()
.writeTo(Destination.of(Destination.DestinationType.HDFS, properties)).writeInFormat(WriterOutputFormat.AVRO)
.withWriterId(TestConstants.TEST_WRITER_ID).withSchema(this.schema).forBranch(0);
return new SimpleDataWriter(b, properties);
}
/**
* If the staging file exists, the simple data writer should overwrite its contents.
*
* @throws IOException
*/
@Test
public void testOverwriteExistingStagingFile() throws IOException {
byte[] randomBytesStage = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
byte[] randomBytesWrite = { 11, 12, 13, 14, 15 };
Path stagingFile = new Path(TestConstants.TEST_STAGING_DIR + Path.SEPARATOR + this.filePath,
TestConstants.TEST_FILE_NAME + "." + TestConstants.TEST_WRITER_ID + "." + "tmp");
Configuration conf = new Configuration();
// Add all job configuration properties so they are picked up by Hadoop
for (String key : properties.getPropertyNames()) {
conf.set(key, properties.getProp(key));
}
FileSystem fs = FileSystem.get(URI.create(TestConstants.TEST_FS_URI), conf);
OutputStream os = fs.create(stagingFile);
os.write(randomBytesStage);
os.flush();
os.close();
SimpleDataWriter writer = buildSimpleDataWriter();
writer.write(randomBytesWrite);
writer.close();
writer.commit();
Assert.assertEquals(writer.recordsWritten(), 1);
Assert.assertEquals(writer.bytesWritten(), randomBytesWrite.length + 1);
File writeFile = new File(writer.getOutputFilePath());
int c, i = 0;
InputStream is = new FileInputStream(writeFile);
while ((c = is.read()) != -1) {
if (i == 5) {
Assert.assertEquals(c, (byte) newLine); // the last byte should be newline
i++;
continue;
}
Assert.assertEquals(randomBytesWrite[i], c);
i++;
}
}
@AfterMethod
public void tearDown() throws IOException {
// Clean up the staging and/or output directories if necessary
File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
if (testRootDir.exists()) {
FileUtil.fullyDelete(testRootDir);
}
}
}
| apache-2.0 |
michaelgallacher/intellij-community | plugins/groovy/groovy-psi/src/org/jetbrains/plugins/groovy/codeInspection/threading/GroovyNestedSynchronizedStatementInspection.java | 2670 | /*
* Copyright 2007-2008 Dave Griffith
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.codeInspection.threading;
import com.intellij.psi.PsiModifier;
import com.intellij.psi.util.PsiTreeUtil;
import org.jetbrains.annotations.Nls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.groovy.codeInspection.BaseInspection;
import org.jetbrains.plugins.groovy.codeInspection.BaseInspectionVisitor;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrStatement;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrSynchronizedStatement;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.blocks.GrClosableBlock;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.typedef.members.GrMethod;
public class GroovyNestedSynchronizedStatementInspection extends BaseInspection {
@Override
@Nls
@NotNull
public String getGroupDisplayName() {
return THREADING_ISSUES;
}
@Override
@Nls
@NotNull
public String getDisplayName() {
return "Nested 'synchronized' statement";
}
@Override
@Nullable
protected String buildErrorString(Object... args) {
return "Nested '#ref' statement #loc";
}
@NotNull
@Override
public BaseInspectionVisitor buildVisitor() {
return new Visitor();
}
private static class Visitor extends BaseInspectionVisitor {
@Override
public void visitSynchronizedStatement(@NotNull GrSynchronizedStatement synchronizedStatement) {
super.visitSynchronizedStatement(synchronizedStatement);
final GrStatement parent = PsiTreeUtil.getParentOfType(synchronizedStatement, GrSynchronizedStatement.class, GrClosableBlock.class);
if ((parent instanceof GrSynchronizedStatement)) {
registerStatementError(synchronizedStatement);
return;
}
final GrMethod containingMethod = PsiTreeUtil.getParentOfType(synchronizedStatement, GrMethod.class);
if (containingMethod != null && containingMethod.hasModifierProperty(PsiModifier.SYNCHRONIZED)) {
registerStatementError(synchronizedStatement);
}
}
}
}
| apache-2.0 |
codehaus/izpack | izpack-panel/src/test/java/com/izforge/izpack/panels/target/TargetPanelTest.java | 15040 | /*
* IzPack - Copyright 2001-2013 Julien Ponge, All Rights Reserved.
*
* http://izpack.org/
* http://izpack.codehaus.org/
*
* Copyright 2012 Tim Anderson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.izforge.izpack.panels.target;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import org.apache.commons.io.FileUtils;
import org.fest.swing.fixture.FrameFixture;
import org.fest.swing.fixture.JOptionPaneFixture;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.izforge.izpack.api.GuiId;
import com.izforge.izpack.api.data.InstallData;
import com.izforge.izpack.api.data.Panel;
import com.izforge.izpack.api.factory.ObjectFactory;
import com.izforge.izpack.api.resource.Locales;
import com.izforge.izpack.api.resource.Messages;
import com.izforge.izpack.api.resource.Resources;
import com.izforge.izpack.api.rules.RulesEngine;
import com.izforge.izpack.core.resource.ResourceManager;
import com.izforge.izpack.gui.IconsDatabase;
import com.izforge.izpack.gui.log.Log;
import com.izforge.izpack.installer.data.GUIInstallData;
import com.izforge.izpack.installer.data.UninstallDataWriter;
import com.izforge.izpack.installer.gui.InstallerFrame;
import com.izforge.izpack.panels.simplefinish.SimpleFinishPanel;
import com.izforge.izpack.panels.test.AbstractPanelTest;
import com.izforge.izpack.panels.test.TestGUIPanelContainer;
import com.izforge.izpack.test.Container;
/**
* Tests the {@link TargetPanel} class.
*
* @author Tim Anderson
*/
@Container(TestGUIPanelContainer.class)
public class TargetPanelTest extends AbstractPanelTest
{
/**
* Temporary folder.
*/
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
/**
* Constructs a {@code TargetPanelTest}.
*
* @param container the panel container
* @param installData the installation data
* @param resourceManager the resource manager
* @param factory the panel factory
* @param rules the rules
* @param icons the icons
* @param uninstallDataWriter the uninstallation data writer
* @param locales the locales
*/
public TargetPanelTest(TestGUIPanelContainer container, GUIInstallData installData, ResourceManager resourceManager,
ObjectFactory factory, RulesEngine rules, IconsDatabase icons,
UninstallDataWriter uninstallDataWriter, Locales locales)
{
super(container, installData, resourceManager, factory, rules, icons, uninstallDataWriter, locales);
}
/**
* Situation: Empty path is entered during target panel
* This is very similar to the testDirectoryExists test, since the current user directory (where JVM was started)
* is most likley to exist. How would you start the JVM from a non-existential location?
*
* 1. Emit a warning in the form of a question
* 2. Ensure target panel warning is shown in the warning question prompt
* 3. Ensure that the installation path is set to the 'user.dir' system property.
*
* @throws Exception for any error
*/
@Test
public void testEmptyPath() throws Exception
{
File userDir = new File(System.getProperty("user.dir"));
GUIInstallData installData = getInstallData();
installData.setDefaultInstallPath("");
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof TargetPanel);
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkWarningQuestion(fixture, installData.getMessages().get("TargetPanel.warn"));
Thread.sleep(1000);
assertEquals(userDir.getAbsolutePath(), installData.getInstallPath());
}
/**
* Verifies that a dialog is displayed if the directory will be created.
*
* @throws Exception for any error
*/
@Test
public void testShowCreateDirectoryMessage() throws Exception
{
GUIInstallData installData = getInstallData();
File root = temporaryFolder.getRoot();
File dir = new File(root, "install");
installData.setDefaultInstallPath(dir.getAbsolutePath());
// show the panel
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof TargetPanel);
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
String expectedMessage = installData.getMessages().get("TargetPanel.createdir") + "\n" + dir;
checkWarning(fixture, expectedMessage);
Thread.sleep(1000);
assertEquals(dir.getAbsolutePath(), installData.getInstallPath());
assertTrue(getPanels().getView() instanceof SimpleFinishPanel);
}
/**
* Verifies that a dialog is displayed if a directory is selected that already exists.
*
* @throws Exception for any error
*/
@Test
public void testDirectoryExists() throws Exception
{
GUIInstallData installData = getInstallData();
File root = temporaryFolder.getRoot();
File dir = new File(root, "install");
assertTrue(dir.mkdirs());
installData.setDefaultInstallPath(dir.getAbsolutePath());
// show the panel
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof TargetPanel);
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkWarningQuestion(fixture, installData.getMessages().get("TargetPanel.warn"));
Thread.sleep(1000);
assertEquals(dir.getAbsolutePath(), installData.getInstallPath());
assertTrue(getPanels().getView() instanceof SimpleFinishPanel);
}
/**
* Verifies that a dialog is displayed if the target directory cannot be written to.
*
* @throws Exception for any error
*/
@Test
public void testNotWritable() throws Exception
{
File root = File.listRoots()[0];
File dir = new File(root, "install");
GUIInstallData installData = getInstallData();
installData.setDefaultInstallPath(dir.getAbsolutePath());
// show the panel
FrameFixture fixture = show(NotWritableTargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkErrorMessage(fixture, installData.getMessages().get("TargetPanel.notwritable"));
assertNull(installData.getInstallPath());
}
/**
* Verifies that when the "modify.izpack.install" variable is specified, the target directory must exist and
* contain an <em>.installationinformation</em> file.
*
* @throws Exception for any error
*/
@Test
public void testModifyInstallation() throws Exception
{
GUIInstallData installData = getInstallData();
Messages messages = installData.getMessages();
installData.setVariable(InstallData.MODIFY_INSTALLATION, "true");
File root = temporaryFolder.getRoot();
File dir = new File(root, "install");
installData.setDefaultInstallPath(dir.getAbsolutePath());
// show the panel
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof TargetPanel);
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkErrorMessage(fixture, messages.get("PathInputPanel.required"));
Thread.sleep(1000);
assertTrue(dir.mkdirs());
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkErrorMessage(fixture, messages.get("PathInputPanel.required.forModificationInstallation"));
// create the .installinformationfile
TargetPanelTestHelper.createInstallationInfo(dir);
Thread.sleep(1000);
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
// navigation should now succeed.
assertEquals(dir.getAbsolutePath(), installData.getInstallPath());
assertTrue(getPanels().getView() instanceof SimpleFinishPanel);
}
/**
* Verifies that the <em>TargetPanel.incompatibleInstallation</em> message is displayed if the selected
* directory contains an unrecognised .installationinformation file.
*
* @throws Exception for any error
*/
@Test
public void testIncompatibleInstallation() throws Exception
{
GUIInstallData installData = getInstallData();
// set up two potential directories to install to, "badDir" and "goodDir"
File root = temporaryFolder.getRoot();
File badDir = new File(root, "badDir");
assertTrue(badDir.mkdirs());
File goodDir = new File(root, "goodDir"); // don't bother creating it
installData.setDefaultInstallPath(badDir.getAbsolutePath());
// create an invalid "badDir/.installationinformation" to simulate incompatible data
TargetPanelTestHelper.createBadInstallationInfo(badDir);
// show the panel
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(2000);
assertTrue(getPanels().getView() instanceof TargetPanel);
TargetPanel panel = (TargetPanel) getPanels().getView();
// attempt to navigate to the next panel
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
// panel should be the same and error should be displayed
assertEquals(panel, getPanels().getView());
checkErrorMessage(fixture, TargetPanelTestHelper.getIncompatibleInstallationMessage(installData));
Thread.sleep(1000);
// should still be on the TargetPanel
assertEquals(panel, getPanels().getView());
fixture.textBox().setText(goodDir.getAbsolutePath());
// suppress dialog indicating that goodDir will be created
installData.setVariable("ShowCreateDirectoryMessage", "false");
// attempt to navigate to the next panel
Thread.sleep(1000);
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1500);
assertTrue(getPanels().getView() instanceof SimpleFinishPanel);
}
/**
* Verifies that when {@link TargetPanel#setExistFiles(String[])} is used, the specified files must exist
* in order for the panel to be valid.
*
* @throws Exception for any error
*/
@Test
public void testFilesExist() throws Exception
{
String[] requiredFiles = {"a", "b"};
GUIInstallData installData = getInstallData();
Messages messages = installData.getMessages();
File root = temporaryFolder.getRoot();
File dir = new File(root, "install");
assertTrue(dir.mkdirs());
installData.setDefaultInstallPath(dir.getAbsolutePath());
// show the panel
FrameFixture fixture = show(TargetPanel.class, SimpleFinishPanel.class);
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof TargetPanel);
TargetPanel panel = (TargetPanel) getPanels().getView();
panel.setMustExist(true); // to avoid popping up a Directory already exists dialog
panel.setExistFiles(requiredFiles);
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
checkErrorMessage(fixture, messages.get("PathInputPanel.notValid"));
// create the required files
for (String required : requiredFiles)
{
File file = new File(dir, required);
FileUtils.touch(file);
}
fixture.button(GuiId.BUTTON_NEXT.id).click();
Thread.sleep(1000);
assertTrue(getPanels().getView() instanceof SimpleFinishPanel);
assertEquals(dir.getAbsolutePath(), installData.getInstallPath());
}
/**
* Verifies that a warning dialog is being displayed.
*
* @param frame the parent frame
* @param expected the expected warning message
*/
private void checkWarning(FrameFixture frame, String expected)
{
JOptionPaneFixture warning = frame.optionPane().requireWarningMessage();
warning.requireMessage(expected);
warning.okButton().click();
}
private void checkWarningQuestion(FrameFixture frame, String expected)
{
JOptionPaneFixture warningQuestion = frame.optionPane().requireWarningMessage();
warningQuestion.requireMessage(expected);
warningQuestion.yesButton().click();
}
/**
* Verify that an error dialog is being displayed.
*
* @param frame the parent frame
* @param expected the expected error message
*/
private void checkErrorMessage(FrameFixture frame, String expected)
{
JOptionPaneFixture error = frame.optionPane().requireErrorMessage();
error.requireMessage(expected);
error.okButton().click();
}
/**
* Verify that a question dialog is being displayed.
*
* @param frame the parent frame
* @param expected the expected error message
*/
private void checkQuestionMessage(FrameFixture frame, String expected)
{
JOptionPaneFixture question = frame.optionPane().requireQuestionMessage();
question.requireMessage(expected);
question.yesButton().click();
}
/**
* Helper implementation of TargetPanel that simulates no permission to write to a directory.
*/
public static class NotWritableTargetPanel extends TargetPanel
{
public NotWritableTargetPanel(Panel panel, InstallerFrame parent, GUIInstallData installData,
Resources resources, Log log)
{
super(panel, parent, installData, resources, log);
}
}
}
| apache-2.0 |
vlsi/calcite | core/src/main/java/org/apache/calcite/sql/dialect/SnowflakeSqlDialect.java | 1502 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.sql.dialect;
import org.apache.calcite.avatica.util.Casing;
import org.apache.calcite.sql.SqlDialect;
/**
* A <code>SqlDialect</code> implementation for the Snowflake database.
*/
public class SnowflakeSqlDialect extends SqlDialect {
public static final SqlDialect.Context DEFAULT_CONTEXT = SqlDialect.EMPTY_CONTEXT
.withDatabaseProduct(SqlDialect.DatabaseProduct.SNOWFLAKE)
.withIdentifierQuoteString("\"")
.withUnquotedCasing(Casing.TO_UPPER);
public static final SqlDialect DEFAULT =
new SnowflakeSqlDialect(DEFAULT_CONTEXT);
/** Creates a SnowflakeSqlDialect. */
public SnowflakeSqlDialect(Context context) {
super(context);
}
}
| apache-2.0 |
narahari92/metamodel | core/src/main/java/org/apache/metamodel/data/DefaultRow.java | 5379 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.metamodel.data;
import java.io.ObjectInputStream;
import java.io.ObjectInputStream.GetField;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.List;
import org.apache.metamodel.query.SelectItem;
/**
* Default Row implementation. Holds values in memory.
*/
public final class DefaultRow extends AbstractRow implements Row {
private static final long serialVersionUID = 1L;
private final DataSetHeader _header;
private final Object[] _values;
private final Style[] _styles;
/**
* Constructs a row.
*
* @param header
* @param values
* @param styles
*/
public DefaultRow(DataSetHeader header, Object[] values, Style[] styles) {
if (header == null) {
throw new IllegalArgumentException("DataSet header cannot be null");
}
if (values == null) {
throw new IllegalArgumentException("Values cannot be null");
}
if (header.size() != values.length) {
throw new IllegalArgumentException("Header size and values length must be equal. " + header.size()
+ " select items present in header and encountered these values: " + Arrays.toString(values));
}
if (styles != null) {
if (values.length != styles.length) {
throw new IllegalArgumentException("Values length and styles length must be equal. " + values.length
+ " values present and encountered these styles: " + Arrays.toString(styles));
}
boolean entirelyNoStyle = true;
for (int i = 0; i < styles.length; i++) {
if (styles[i] == null) {
throw new IllegalArgumentException("Elements in the style array cannot be null");
}
if (entirelyNoStyle && !Style.NO_STYLE.equals(styles[i])) {
entirelyNoStyle = false;
}
}
if (entirelyNoStyle) {
// no need to reference any styles
styles = null;
}
}
_header = header;
_values = values;
_styles = styles;
}
/**
* Constructs a row.
*
* @param header
* @param values
*/
public DefaultRow(DataSetHeader header, Object[] values) {
this(header, values, null);
}
@Override
public Object getValue(int index) throws ArrayIndexOutOfBoundsException {
return _values[index];
}
@Override
public Object[] getValues() {
return _values;
}
@Override
public Style getStyle(int index) throws IndexOutOfBoundsException {
if (_styles == null) {
return Style.NO_STYLE;
}
return _styles[index];
}
@Override
public Style[] getStyles() {
return _styles;
}
@Override
protected DataSetHeader getHeader() {
return _header;
}
/**
* Method invoked by the Java serialization framework while deserializing
* Row instances. Since previous versions of MetaModel did not use a
* DataSetHeader, but had a reference to a List<SelectItem>, this
* deserialization is particularly tricky. We check if the items variable is
* there, and if it is, we convert it to a header instead.
*
* @param stream
* @throws Exception
*/
private void readObject(ObjectInputStream stream) throws Exception {
GetField fields = stream.readFields();
try {
// backwards compatible deserialization, convert items to header
Object items = fields.get("_items", null);
@SuppressWarnings("unchecked")
List<SelectItem> itemsList = (List<SelectItem>) items;
SimpleDataSetHeader header = new SimpleDataSetHeader(itemsList);
Field field = getClass().getDeclaredField("_header");
field.setAccessible(true);
field.set(this, header);
} catch (IllegalArgumentException e) {
// no backwards compatible deserialization needed.
setWhileDeserializing(fields, "_header");
}
setWhileDeserializing(fields, "_values");
setWhileDeserializing(fields, "_styles");
}
private void setWhileDeserializing(GetField fields, String fieldName) throws Exception {
Object value = fields.get(fieldName, null);
Field field = getClass().getDeclaredField(fieldName);
field.setAccessible(true);
field.set(this, value);
}
} | apache-2.0 |
Miciah/origin | vendor/github.com/Azure/azure-sdk-for-go/services/sql/mgmt/2014-04-01/sql/servertableauditingpolicies.go | 12395 | package sql
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ServerTableAuditingPoliciesClient is the the Azure SQL Database management API provides a RESTful set of web
// services that interact with Azure SQL Database services to manage your databases. The API enables you to create,
// retrieve, update, and delete databases.
type ServerTableAuditingPoliciesClient struct {
BaseClient
}
// NewServerTableAuditingPoliciesClient creates an instance of the ServerTableAuditingPoliciesClient client.
func NewServerTableAuditingPoliciesClient(subscriptionID string) ServerTableAuditingPoliciesClient {
return NewServerTableAuditingPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewServerTableAuditingPoliciesClientWithBaseURI creates an instance of the ServerTableAuditingPoliciesClient client.
func NewServerTableAuditingPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ServerTableAuditingPoliciesClient {
return ServerTableAuditingPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a server's table auditing policy. Table auditing is deprecated, use blob auditing
// instead.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// serverName - the name of the server.
// parameters - the server table auditing policy.
func (client ServerTableAuditingPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serverName string, parameters ServerTableAuditingPolicy) (result ServerTableAuditingPolicy, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServerTableAuditingPoliciesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serverName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ServerTableAuditingPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serverName string, parameters ServerTableAuditingPolicy) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serverName": autorest.Encode("path", serverName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableAuditingPolicyName": autorest.Encode("path", "default"),
}
const APIVersion = "2014-04-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
parameters.Kind = nil
parameters.Location = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/auditingPolicies/{tableAuditingPolicyName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ServerTableAuditingPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ServerTableAuditingPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ServerTableAuditingPolicy, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets a server's table auditing policy. Table auditing is deprecated, use blob auditing instead.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// serverName - the name of the server.
func (client ServerTableAuditingPoliciesClient) Get(ctx context.Context, resourceGroupName string, serverName string) (result ServerTableAuditingPolicy, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServerTableAuditingPoliciesClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, serverName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ServerTableAuditingPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serverName": autorest.Encode("path", serverName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableAuditingPolicyName": autorest.Encode("path", "default"),
}
const APIVersion = "2014-04-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/auditingPolicies/{tableAuditingPolicyName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ServerTableAuditingPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ServerTableAuditingPoliciesClient) GetResponder(resp *http.Response) (result ServerTableAuditingPolicy, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByServer lists a server's table auditing policies. Table auditing is deprecated, use blob auditing instead.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// serverName - the name of the server.
func (client ServerTableAuditingPoliciesClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) (result ServerTableAuditingPolicyListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServerTableAuditingPoliciesClient.ListByServer")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListByServerPreparer(ctx, resourceGroupName, serverName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "ListByServer", nil, "Failure preparing request")
return
}
resp, err := client.ListByServerSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "ListByServer", resp, "Failure sending request")
return
}
result, err = client.ListByServerResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ServerTableAuditingPoliciesClient", "ListByServer", resp, "Failure responding to request")
}
return
}
// ListByServerPreparer prepares the ListByServer request.
func (client ServerTableAuditingPoliciesClient) ListByServerPreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serverName": autorest.Encode("path", serverName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2014-04-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/auditingPolicies", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByServerSender sends the ListByServer request. The method will close the
// http.Response Body if it receives an error.
func (client ServerTableAuditingPoliciesClient) ListByServerSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// ListByServerResponder handles the response to the ListByServer request. The method always
// closes the http.Response Body.
func (client ServerTableAuditingPoliciesClient) ListByServerResponder(resp *http.Response) (result ServerTableAuditingPolicyListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| apache-2.0 |
gfyoung/elasticsearch | server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java | 50810 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.tasks;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchTransportService;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.test.tasks.MockTaskManagerListener;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import static java.util.Collections.emptyList;
import static java.util.Collections.singleton;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.startsWith;
/**
* Integration tests for task management API
* <p>
* We need at least 2 nodes so we have a master node a non-master node
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2, transportClientRatio = 0.0)
public class TasksIT extends ESIntegTestCase {
private Map<Tuple<String, String>, RecordingTaskManagerListener> listeners = new HashMap<>();
@Override
protected Collection<Class<? extends Plugin>> getMockPlugins() {
Collection<Class<? extends Plugin>> mockPlugins = new ArrayList<>(super.getMockPlugins());
mockPlugins.remove(MockTransportService.TestPlugin.class);
return mockPlugins;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(MockTransportService.TestPlugin.class, TestTaskPlugin.class);
}
@Override
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
return nodePlugins();
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), true)
.build();
}
public void testTaskCounts() {
// Run only on data nodes
ListTasksResponse response = client().admin().cluster().prepareListTasks("data:true").setActions(ListTasksAction.NAME + "[n]")
.get();
assertThat(response.getTasks().size(), greaterThanOrEqualTo(cluster().numDataNodes()));
}
public void testMasterNodeOperationTasks() {
registerTaskManageListeners(ClusterHealthAction.NAME);
// First run the health on the master node - should produce only one task on the master node
internalCluster().masterClient().admin().cluster().prepareHealth().get();
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events
resetTaskManageListeners(ClusterHealthAction.NAME);
// Now run the health on a non-master node - should produce one task on master and one task on another node
internalCluster().nonMasterClient().admin().cluster().prepareHealth().get();
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events
List<TaskInfo> tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1);
// Verify that one of these tasks is a parent of another task
if (tasks.get(0).getParentTaskId().isSet()) {
assertParentTask(Collections.singletonList(tasks.get(0)), tasks.get(1));
} else {
assertParentTask(Collections.singletonList(tasks.get(1)), tasks.get(0));
}
}
public void testTransportReplicationAllShardsTasks() {
registerTaskManageListeners(ValidateQueryAction.NAME); // main task
registerTaskManageListeners(ValidateQueryAction.NAME + "[s]"); // shard
// level
// tasks
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated
client().admin().indices().prepareValidateQuery("test").setAllShards(true).get();
// the field stats operation should produce one main task
NumShards numberOfShards = getNumShards("test");
assertEquals(1, numberOfEvents(ValidateQueryAction.NAME, Tuple::v1));
// and then one operation per shard
assertEquals(numberOfShards.numPrimaries, numberOfEvents(ValidateQueryAction.NAME + "[s]", Tuple::v1));
// the shard level tasks should have the main task as a parent
assertParentTask(findEvents(ValidateQueryAction.NAME + "[s]", Tuple::v1), findEvents(ValidateQueryAction.NAME, Tuple::v1).get(0));
}
public void testTransportBroadcastByNodeTasks() {
registerTaskManageListeners(UpgradeAction.NAME); // main task
registerTaskManageListeners(UpgradeAction.NAME + "[n]"); // node level tasks
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated
client().admin().indices().prepareUpgrade("test").get();
// the percolate operation should produce one main task
assertEquals(1, numberOfEvents(UpgradeAction.NAME, Tuple::v1));
// and then one operation per each node where shards are located
assertEquals(internalCluster().nodesInclude("test").size(), numberOfEvents(UpgradeAction.NAME + "[n]", Tuple::v1));
// all node level tasks should have the main task as a parent
assertParentTask(findEvents(UpgradeAction.NAME + "[n]", Tuple::v1), findEvents(UpgradeAction.NAME, Tuple::v1).get(0));
}
public void testTransportReplicationSingleShardTasks() {
registerTaskManageListeners(ValidateQueryAction.NAME); // main task
registerTaskManageListeners(ValidateQueryAction.NAME + "[s]"); // shard level tasks
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated
client().admin().indices().prepareValidateQuery("test").get();
// the validate operation should produce one main task
assertEquals(1, numberOfEvents(ValidateQueryAction.NAME, Tuple::v1));
// and then one operation
assertEquals(1, numberOfEvents(ValidateQueryAction.NAME + "[s]", Tuple::v1));
// the shard level operation should have the main task as its parent
assertParentTask(findEvents(ValidateQueryAction.NAME + "[s]", Tuple::v1), findEvents(ValidateQueryAction.NAME, Tuple::v1).get(0));
}
public void testTransportBroadcastReplicationTasks() {
registerTaskManageListeners(RefreshAction.NAME); // main task
registerTaskManageListeners(RefreshAction.NAME + "[s]"); // shard level tasks
registerTaskManageListeners(RefreshAction.NAME + "[s][*]"); // primary and replica shard tasks
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated
client().admin().indices().prepareRefresh("test").get();
// the refresh operation should produce one main task
NumShards numberOfShards = getNumShards("test");
logger.debug("number of shards, total: [{}], primaries: [{}] ", numberOfShards.totalNumShards, numberOfShards.numPrimaries);
logger.debug("main events {}", numberOfEvents(RefreshAction.NAME, Tuple::v1));
logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getTaskId().getNodeId());
logger.debug("[s] events {}", numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1));
logger.debug("[s][*] events {}", numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
logger.debug("nodes with the index {}", internalCluster().nodesInclude("test"));
assertEquals(1, numberOfEvents(RefreshAction.NAME, Tuple::v1));
// Because it's broadcast replication action we will have as many [s] level requests
// as we have primary shards on the coordinating node plus we will have one task per primary outside of the
// coordinating node due to replication.
// If all primaries are on the coordinating node, the number of tasks should be equal to the number of primaries
// If all primaries are not on the coordinating node, the number of tasks should be equal to the number of primaries times 2
assertThat(numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1), greaterThanOrEqualTo(numberOfShards.numPrimaries));
assertThat(numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1), lessThanOrEqualTo(numberOfShards.numPrimaries * 2));
// Verify that all [s] events have the proper parent
// This is complicated because if the shard task runs on the same node it has main task as a parent
// but if it runs on non-coordinating node it would have another intermediate [s] task on the coordinating node as a parent
TaskInfo mainTask = findEvents(RefreshAction.NAME, Tuple::v1).get(0);
List<TaskInfo> sTasks = findEvents(RefreshAction.NAME + "[s]", Tuple::v1);
for (TaskInfo taskInfo : sTasks) {
if (mainTask.getTaskId().getNodeId().equals(taskInfo.getTaskId().getNodeId())) {
// This shard level task runs on the same node as a parent task - it should have the main task as a direct parent
assertParentTask(Collections.singletonList(taskInfo), mainTask);
} else {
String description = taskInfo.getDescription();
// This shard level task runs on another node - it should have a corresponding shard level task on the node where main task
// is running
List<TaskInfo> sTasksOnRequestingNode = findEvents(RefreshAction.NAME + "[s]",
event -> event.v1() && mainTask.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId())
&& description.equals(event.v2().getDescription()));
// There should be only one parent task
assertEquals(1, sTasksOnRequestingNode.size());
assertParentTask(Collections.singletonList(taskInfo), sTasksOnRequestingNode.get(0));
}
}
// we will have as many [s][p] and [s][r] tasks as we have primary and replica shards
assertEquals(numberOfShards.totalNumShards, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
// we the [s][p] and [s][r] tasks should have a corresponding [s] task on the same node as a parent
List<TaskInfo> spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1);
for (TaskInfo taskInfo : spEvents) {
List<TaskInfo> sTask;
if (taskInfo.getAction().endsWith("[s][p]")) {
// A [s][p] level task should have a corresponding [s] level task on the same node
sTask = findEvents(RefreshAction.NAME + "[s]",
event -> event.v1() && taskInfo.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId())
&& taskInfo.getDescription().equals(event.v2().getDescription()));
} else {
// A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located)
sTask = findEvents(RefreshAction.NAME + "[s]",
event -> event.v1() && taskInfo.getParentTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) && taskInfo
.getDescription()
.equals(event.v2().getDescription()));
}
// There should be only one parent task
assertEquals(1, sTask.size());
assertParentTask(Collections.singletonList(taskInfo), sTask.get(0));
}
}
public void testTransportBulkTasks() {
registerTaskManageListeners(BulkAction.NAME); // main task
registerTaskManageListeners(BulkAction.NAME + "[s]"); // shard task
registerTaskManageListeners(BulkAction.NAME + "[s][p]"); // shard task on primary
registerTaskManageListeners(BulkAction.NAME + "[s][r]"); // shard task on replica
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks
client().prepareBulk().add(client().prepareIndex("test", "doc", "test_id")
.setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get();
// the bulk operation should produce one main task
List<TaskInfo> topTask = findEvents(BulkAction.NAME, Tuple::v1);
assertEquals(1, topTask.size());
assertEquals("requests[1], indices[test]", topTask.get(0).getDescription());
// we should also get 1 or 2 [s] operation with main operation as a parent
// in case the primary is located on the coordinating node we will have 1 operation, otherwise - 2
List<TaskInfo> shardTasks = findEvents(BulkAction.NAME + "[s]", Tuple::v1);
assertThat(shardTasks.size(), allOf(lessThanOrEqualTo(2), greaterThanOrEqualTo(1)));
// Select the effective shard task
TaskInfo shardTask;
if (shardTasks.size() == 1) {
// we have only one task - it's going to be the parent task for all [s][p] and [s][r] tasks
shardTask = shardTasks.get(0);
// and it should have the main task as a parent
assertParentTask(shardTask, findEvents(BulkAction.NAME, Tuple::v1).get(0));
assertEquals("requests[1], index[test]", shardTask.getDescription());
} else {
if (shardTasks.get(0).getParentTaskId().equals(shardTasks.get(1).getTaskId())) {
// task 1 is the parent of task 0, that means that task 0 will control [s][p] and [s][r] tasks
shardTask = shardTasks.get(0);
// in turn the parent of the task 1 should be the main task
assertParentTask(shardTasks.get(1), findEvents(BulkAction.NAME, Tuple::v1).get(0));
assertEquals("requests[1], index[test]", shardTask.getDescription());
} else {
// otherwise task 1 will control [s][p] and [s][r] tasks
shardTask = shardTasks.get(1);
// in turn the parent of the task 0 should be the main task
assertParentTask(shardTasks.get(0), findEvents(BulkAction.NAME, Tuple::v1).get(0));
assertEquals("requests[1], index[test]", shardTask.getDescription());
}
}
// we should also get one [s][p] operation with shard operation as a parent
assertEquals(1, numberOfEvents(BulkAction.NAME + "[s][p]", Tuple::v1));
assertParentTask(findEvents(BulkAction.NAME + "[s][p]", Tuple::v1), shardTask);
// we should get as many [s][r] operations as we have replica shards
// they all should have the same shard task as a parent
assertEquals(getNumShards("test").numReplicas, numberOfEvents(BulkAction.NAME + "[s][r]", Tuple::v1));
assertParentTask(findEvents(BulkAction.NAME + "[s][r]", Tuple::v1), shardTask);
}
public void testSearchTaskDescriptions() {
registerTaskManageListeners(SearchAction.NAME); // main task
registerTaskManageListeners(SearchAction.NAME + "[*]"); // shard task
createIndex("test");
ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks
client().prepareIndex("test", "doc", "test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
Map<String, String> headers = new HashMap<>();
headers.put(Task.X_OPAQUE_ID, "my_id");
headers.put("Foo-Header", "bar");
headers.put("Custom-Task-Header", "my_value");
assertSearchResponse(
client().filterWithHeader(headers).prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get());
// the search operation should produce one main task
List<TaskInfo> mainTask = findEvents(SearchAction.NAME, Tuple::v1);
assertEquals(1, mainTask.size());
assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], types[doc], search_type["));
assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\""));
assertTaskHeaders(mainTask.get(0));
// check that if we have any shard-level requests they all have non-zero length description
List<TaskInfo> shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1);
for (TaskInfo taskInfo : shardTasks) {
assertThat(taskInfo.getParentTaskId(), notNullValue());
assertEquals(mainTask.get(0).getTaskId(), taskInfo.getParentTaskId());
assertTaskHeaders(taskInfo);
switch (taskInfo.getAction()) {
case SearchTransportService.QUERY_ACTION_NAME:
case SearchTransportService.DFS_ACTION_NAME:
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription()));
break;
case SearchTransportService.QUERY_ID_ACTION_NAME:
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("id[*], indices[test]", taskInfo.getDescription()));
break;
case SearchTransportService.FETCH_ID_ACTION_NAME:
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]",
taskInfo.getDescription()));
break;
case SearchTransportService.QUERY_CAN_MATCH_NAME:
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription()));
break;
default:
fail("Unexpected action [" + taskInfo.getAction() + "] with description [" + taskInfo.getDescription() + "]");
}
// assert that all task descriptions have non-zero length
assertThat(taskInfo.getDescription().length(), greaterThan(0));
}
}
public void testSearchTaskHeaderLimit() {
int maxSize = Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.getDefault(Settings.EMPTY).getBytes() / 2 + 1);
Map<String, String> headers = new HashMap<>();
headers.put(Task.X_OPAQUE_ID, "my_id");
headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100));
IllegalArgumentException ex = expectThrows(
IllegalArgumentException.class,
() -> client().filterWithHeader(headers).admin().cluster().prepareListTasks().get()
);
assertThat(ex.getMessage(), startsWith("Request exceeded the maximum size of task headers "));
}
private void assertTaskHeaders(TaskInfo taskInfo) {
assertThat(taskInfo.getHeaders().keySet(), hasSize(2));
assertEquals("my_id", taskInfo.getHeaders().get(Task.X_OPAQUE_ID));
assertEquals("my_value", taskInfo.getHeaders().get("Custom-Task-Header"));
}
/**
* Very basic "is it plugged in" style test that indexes a document and makes sure that you can fetch the status of the process. The
* goal here is to verify that the large moving parts that make fetching task status work fit together rather than to verify any
* particular status results from indexing. For that, look at {@link TransportReplicationActionTests}. We intentionally don't use the
* task recording mechanism used in other places in this test so we can make sure that the status fetching works properly over the wire.
*/
public void testCanFetchIndexStatus() throws Exception {
// First latch waits for the task to start, second on blocks it from finishing.
CountDownLatch taskRegistered = new CountDownLatch(1);
CountDownLatch letTaskFinish = new CountDownLatch(1);
Thread index = null;
try {
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void onTaskRegistered(Task task) {
if (task.getAction().startsWith(IndexAction.NAME)) {
taskRegistered.countDown();
logger.debug("Blocking [{}] starting", task);
try {
assertTrue(letTaskFinish.await(10, TimeUnit.SECONDS));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Override
public void onTaskUnregistered(Task task) {
}
@Override
public void waitForTaskCompletion(Task task) {
}
});
}
// Need to run the task in a separate thread because node client's .execute() is blocked by our task listener
index = new Thread(() -> {
IndexResponse indexResponse = client().prepareIndex("test", "test").setSource("test", "test").get();
assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures());
});
index.start();
assertTrue(taskRegistered.await(10, TimeUnit.SECONDS)); // waiting for at least one task to be registered
ListTasksResponse listResponse = client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*")
.setDetailed(true).get();
assertThat(listResponse.getTasks(), not(empty()));
for (TaskInfo task : listResponse.getTasks()) {
assertNotNull(task.getStatus());
GetTaskResponse getResponse = client().admin().cluster().prepareGetTask(task.getTaskId()).get();
assertFalse("task should still be running", getResponse.getTask().isCompleted());
TaskInfo fetchedWithGet = getResponse.getTask().getTask();
assertEquals(task.getId(), fetchedWithGet.getId());
assertEquals(task.getType(), fetchedWithGet.getType());
assertEquals(task.getAction(), fetchedWithGet.getAction());
assertEquals(task.getDescription(), fetchedWithGet.getDescription());
assertEquals(task.getStatus(), fetchedWithGet.getStatus());
assertEquals(task.getStartTime(), fetchedWithGet.getStartTime());
assertThat(fetchedWithGet.getRunningTimeNanos(), greaterThanOrEqualTo(task.getRunningTimeNanos()));
assertEquals(task.isCancellable(), fetchedWithGet.isCancellable());
assertEquals(task.getParentTaskId(), fetchedWithGet.getParentTaskId());
}
} finally {
letTaskFinish.countDown();
if (index != null) {
index.join();
}
assertBusy(() -> {
assertEquals(emptyList(),
client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks());
});
}
}
public void testTasksCancellation() throws Exception {
// Start blocking test task
// Get real client (the plugin is not registered on transport nodes)
ActionFuture<TestTaskPlugin.NodesResponse> future = new TestTaskPlugin.NodesRequestBuilder(client(),
TestTaskPlugin.TestTaskAction.INSTANCE).execute();
logger.info("--> started test tasks");
// Wait for the task to start on all nodes
assertBusy(() -> assertEquals(internalCluster().size(),
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
logger.info("--> cancelling the main test task");
CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME).get();
assertEquals(1, cancelTasksResponse.getTasks().size());
future.get();
logger.info("--> checking that test tasks are not running");
assertEquals(0,
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "*").get().getTasks().size());
}
public void testTasksUnblocking() throws Exception {
// Start blocking test task
ActionFuture<TestTaskPlugin.NodesResponse> future =
new TestTaskPlugin.NodesRequestBuilder(client(), TestTaskPlugin.TestTaskAction.INSTANCE).execute();
// Wait for the task to start on all nodes
assertBusy(() -> assertEquals(internalCluster().size(),
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
new TestTaskPlugin.UnblockTestTasksRequestBuilder(client(), TestTaskPlugin.UnblockTestTasksAction.INSTANCE).get();
future.get();
assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get()
.getTasks().size());
}
public void testListTasksWaitForCompletion() throws Exception {
waitForCompletionTestCase(randomBoolean(),
id -> client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME)
.setWaitForCompletion(true).execute(),
response -> {
assertThat(response.getNodeFailures(), empty());
assertThat(response.getTaskFailures(), empty());
assertThat(response.getTasks(), hasSize(1));
TaskInfo task = response.getTasks().get(0);
assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction());
}
);
}
public void testGetTaskWaitForCompletionWithoutStoringResult() throws Exception {
waitForCompletionTestCase(false,
id -> client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(),
response -> {
assertTrue(response.getTask().isCompleted());
//We didn't store the result so it won't come back when we wait
assertNull(response.getTask().getResponse());
//But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete
assertNotNull(response.getTask().getTask());
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
}
);
}
public void testGetTaskWaitForCompletionWithStoringResult() throws Exception {
waitForCompletionTestCase(true,
id -> client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(),
response -> {
assertTrue(response.getTask().isCompleted());
// We stored the task so we should get its results
assertEquals(0, response.getTask().getResponseAsMap().get("failure_count"));
// The task's details should also be there
assertNotNull(response.getTask().getTask());
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
}
);
}
/**
* Test wait for completion.
* @param storeResult should the task store its results
* @param wait start waiting for a task. Accepts that id of the task to wait for and returns a future waiting for it.
* @param validator validate the response and return the task ids that were found
*/
private <T> void waitForCompletionTestCase(boolean storeResult, Function<TaskId, ActionFuture<T>> wait, Consumer<T> validator)
throws Exception {
// Start blocking test task
ActionFuture<TestTaskPlugin.NodesResponse> future = new TestTaskPlugin.NodesRequestBuilder(client(),
TestTaskPlugin.TestTaskAction.INSTANCE).setShouldStoreResult(storeResult).execute();
ActionFuture<T> waitResponseFuture;
TaskId taskId;
try {
taskId = waitForTestTaskStartOnAllNodes();
// Wait for the task to start
assertBusy(() -> client().admin().cluster().prepareGetTask(taskId).get());
// Register listeners so we can be sure the waiting started
CountDownLatch waitForWaitingToStart = new CountDownLatch(1);
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void waitForTaskCompletion(Task task) {
waitForWaitingToStart.countDown();
}
@Override
public void onTaskRegistered(Task task) {
}
@Override
public void onTaskUnregistered(Task task) {
}
});
}
// Spin up a request to wait for the test task to finish
waitResponseFuture = wait.apply(taskId);
/* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a
* reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait
* on which is fine. */
waitForWaitingToStart.await();
} finally {
// Unblock the request so the wait for completion request can finish
new TestTaskPlugin.UnblockTestTasksRequestBuilder(client(), TestTaskPlugin.UnblockTestTasksAction.INSTANCE).get();
}
// Now that the task is unblocked the list response will come back
T waitResponse = waitResponseFuture.get();
validator.accept(waitResponse);
TestTaskPlugin.NodesResponse response = future.get();
assertEquals(emptyList(), response.failures());
}
public void testListTasksWaitForTimeout() throws Exception {
waitForTimeoutTestCase(id -> {
ListTasksResponse response = client().admin().cluster().prepareListTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME).setWaitForCompletion(true).setTimeout(timeValueMillis(100))
.get();
assertThat(response.getNodeFailures(), not(empty()));
return response.getNodeFailures();
});
}
public void testGetTaskWaitForTimeout() throws Exception {
waitForTimeoutTestCase(id -> {
Exception e = expectThrows(Exception.class,
() -> client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).setTimeout(timeValueMillis(100)).get());
return singleton(e);
});
}
/**
* Test waiting for a task that times out.
* @param wait wait for the running task and return all the failures you accumulated waiting for it
*/
private void waitForTimeoutTestCase(Function<TaskId, ? extends Iterable<? extends Throwable>> wait) throws Exception {
// Start blocking test task
ActionFuture<TestTaskPlugin.NodesResponse> future = new TestTaskPlugin.NodesRequestBuilder(client(),
TestTaskPlugin.TestTaskAction.INSTANCE).execute();
try {
TaskId taskId = waitForTestTaskStartOnAllNodes();
// Wait for the task to start
assertBusy(() -> client().admin().cluster().prepareGetTask(taskId).get());
// Spin up a request that should wait for those tasks to finish
// It will timeout because we haven't unblocked the tasks
Iterable<? extends Throwable> failures = wait.apply(taskId);
for (Throwable failure : failures) {
assertNotNull(
ExceptionsHelper.unwrap(failure, ElasticsearchTimeoutException.class, ReceiveTimeoutTransportException.class));
}
} finally {
// Now we can unblock those requests
new TestTaskPlugin.UnblockTestTasksRequestBuilder(client(), TestTaskPlugin.UnblockTestTasksAction.INSTANCE).get();
}
future.get();
}
/**
* Wait for the test task to be running on all nodes and return the TaskId of the primary task.
*/
private TaskId waitForTestTaskStartOnAllNodes() throws Exception {
assertBusy(() -> {
List<TaskInfo> tasks = client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]")
.get().getTasks();
assertEquals(internalCluster().size(), tasks.size());
});
List<TaskInfo> task = client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME).get().getTasks();
assertThat(task, hasSize(1));
return task.get(0).getTaskId();
}
public void testTasksListWaitForNoTask() throws Exception {
// Spin up a request to wait for no matching tasks
ActionFuture<ListTasksResponse> waitResponseFuture = client().admin().cluster().prepareListTasks()
.setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(10))
.execute();
// It should finish quickly and without complaint
assertThat(waitResponseFuture.get().getTasks(), empty());
}
public void testTasksGetWaitForNoTask() throws Exception {
// Spin up a request to wait for no matching tasks
ActionFuture<GetTaskResponse> waitResponseFuture = client().admin().cluster().prepareGetTask("notfound:1")
.setWaitForCompletion(true).setTimeout(timeValueMillis(10))
.execute();
// It should finish quickly and without complaint
expectNotFound(waitResponseFuture::get);
}
public void testTasksWaitForAllTask() throws Exception {
// Spin up a request to wait for all tasks in the cluster to make sure it doesn't cause an infinite loop
ListTasksResponse response = client().admin().cluster().prepareListTasks().setWaitForCompletion(true)
.setTimeout(timeValueSeconds(10)).get();
// It should finish quickly and without complaint and list the list tasks themselves
assertThat(response.getNodeFailures(), emptyCollectionOf(ElasticsearchException.class));
assertThat(response.getTaskFailures(), emptyCollectionOf(TaskOperationFailure.class));
assertThat(response.getTasks().size(), greaterThanOrEqualTo(1));
}
public void testTaskStoringSuccesfulResult() throws Exception {
// Randomly create an empty index to make sure the type is created automatically
if (randomBoolean()) {
logger.info("creating an empty results index with custom settings");
assertAcked(client().admin().indices().prepareCreate(TaskResultsService.TASK_INDEX));
}
registerTaskManageListeners(TestTaskPlugin.TestTaskAction.NAME); // we need this to get task id of the process
// Start non-blocking test task
new TestTaskPlugin.NodesRequestBuilder(client(), TestTaskPlugin.TestTaskAction.INSTANCE)
.setShouldStoreResult(true).setShouldBlock(false).get();
List<TaskInfo> events = findEvents(TestTaskPlugin.TestTaskAction.NAME, Tuple::v1);
assertEquals(1, events.size());
TaskInfo taskInfo = events.get(0);
TaskId taskId = taskInfo.getTaskId();
GetResponse resultDoc = client()
.prepareGet(TaskResultsService.TASK_INDEX, TaskResultsService.TASK_TYPE, taskId.toString()).get();
assertTrue(resultDoc.isExists());
Map<String, Object> source = resultDoc.getSource();
@SuppressWarnings("unchecked")
Map<String, Object> task = (Map<String, Object>) source.get("task");
assertEquals(taskInfo.getTaskId().getNodeId(), task.get("node"));
assertEquals(taskInfo.getAction(), task.get("action"));
assertEquals(Long.toString(taskInfo.getId()), task.get("id").toString());
@SuppressWarnings("unchecked")
Map<String, Object> result = (Map<String, Object>) source.get("response");
assertEquals("0", result.get("failure_count").toString());
assertNull(source.get("failure"));
assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get());
SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX)
.setTypes(TaskResultsService.TASK_TYPE)
.setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction())))
.get();
assertEquals(1L, searchResponse.getHits().getTotalHits());
searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX).setTypes(TaskResultsService.TASK_TYPE)
.setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.getTaskId().getNodeId())))
.get();
assertEquals(1L, searchResponse.getHits().getTotalHits());
GetTaskResponse getResponse = expectFinishedTask(taskId);
assertEquals(result, getResponse.getTask().getResponseAsMap());
assertNull(getResponse.getTask().getError());
}
public void testTaskStoringFailureResult() throws Exception {
registerTaskManageListeners(TestTaskPlugin.TestTaskAction.NAME); // we need this to get task id of the process
// Start non-blocking test task that should fail
assertThrows(
new TestTaskPlugin.NodesRequestBuilder(client(), TestTaskPlugin.TestTaskAction.INSTANCE)
.setShouldFail(true)
.setShouldStoreResult(true)
.setShouldBlock(false),
IllegalStateException.class
);
List<TaskInfo> events = findEvents(TestTaskPlugin.TestTaskAction.NAME, Tuple::v1);
assertEquals(1, events.size());
TaskInfo failedTaskInfo = events.get(0);
TaskId failedTaskId = failedTaskInfo.getTaskId();
GetResponse failedResultDoc = client()
.prepareGet(TaskResultsService.TASK_INDEX, TaskResultsService.TASK_TYPE, failedTaskId.toString())
.get();
assertTrue(failedResultDoc.isExists());
Map<String, Object> source = failedResultDoc.getSource();
@SuppressWarnings("unchecked")
Map<String, Object> task = (Map<String, Object>) source.get("task");
assertEquals(failedTaskInfo.getTaskId().getNodeId(), task.get("node"));
assertEquals(failedTaskInfo.getAction(), task.get("action"));
assertEquals(Long.toString(failedTaskInfo.getId()), task.get("id").toString());
@SuppressWarnings("unchecked")
Map<String, Object> error = (Map<String, Object>) source.get("error");
assertEquals("Simulating operation failure", error.get("reason"));
assertEquals("illegal_state_exception", error.get("type"));
assertNull(source.get("result"));
GetTaskResponse getResponse = expectFinishedTask(failedTaskId);
assertNull(getResponse.getTask().getResponse());
assertEquals(error, getResponse.getTask().getErrorAsMap());
}
public void testGetTaskNotFound() throws Exception {
// Node isn't found, tasks index doesn't even exist
expectNotFound(() -> client().admin().cluster().prepareGetTask("not_a_node:1").get());
// Node exists but the task still isn't found
expectNotFound(() -> client().admin().cluster().prepareGetTask(new TaskId(internalCluster().getNodeNames()[0], 1)).get());
}
public void testNodeNotFoundButTaskFound() throws Exception {
// Save a fake task that looks like it is from a node that isn't part of the cluster
CyclicBarrier b = new CyclicBarrier(2);
TaskResultsService resultsService = internalCluster().getInstance(TaskResultsService.class);
resultsService.storeResult(new TaskResult(
new TaskInfo(new TaskId("fake", 1), "test", "test", "", null, 0, 0, false, TaskId.EMPTY_TASK_ID, Collections.emptyMap()),
new RuntimeException("test")),
new ActionListener<Void>() {
@Override
public void onResponse(Void response) {
try {
b.await();
} catch (InterruptedException | BrokenBarrierException e) {
onFailure(e);
}
}
@Override
public void onFailure(Exception e) {
throw new RuntimeException(e);
}
});
b.await();
// Now we can find it!
GetTaskResponse response = expectFinishedTask(new TaskId("fake:1"));
assertEquals("test", response.getTask().getTask().getAction());
assertNotNull(response.getTask().getError());
assertNull(response.getTask().getResponse());
}
@Override
public void tearDown() throws Exception {
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {
((MockTaskManager) internalCluster().getInstance(TransportService.class, entry.getKey().v1()).getTaskManager())
.removeListener(entry.getValue());
}
listeners.clear();
super.tearDown();
}
/**
* Registers recording task event listeners with the given action mask on all nodes
*/
private void registerTaskManageListeners(String actionMasks) {
for (String nodeName : internalCluster().getNodeNames()) {
DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode();
RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node.getId(), actionMasks.split(","));
((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener);
RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.getName(), actionMasks), listener);
assertNull(oldListener);
}
}
/**
* Resets all recording task event listeners with the given action mask on all nodes
*/
private void resetTaskManageListeners(String actionMasks) {
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {
if (actionMasks == null || entry.getKey().v2().equals(actionMasks)) {
entry.getValue().reset();
}
}
}
/**
* Returns the number of events that satisfy the criteria across all nodes
*
* @param actionMasks action masks to match
* @return number of events that satisfy the criteria
*/
private int numberOfEvents(String actionMasks, Function<Tuple<Boolean, TaskInfo>, Boolean> criteria) {
return findEvents(actionMasks, criteria).size();
}
/**
* Returns all events that satisfy the criteria across all nodes
*
* @param actionMasks action masks to match
* @return number of events that satisfy the criteria
*/
private List<TaskInfo> findEvents(String actionMasks, Function<Tuple<Boolean, TaskInfo>, Boolean> criteria) {
List<TaskInfo> events = new ArrayList<>();
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {
if (actionMasks == null || entry.getKey().v2().equals(actionMasks)) {
for (Tuple<Boolean, TaskInfo> taskEvent : entry.getValue().getEvents()) {
if (criteria.apply(taskEvent)) {
events.add(taskEvent.v2());
}
}
}
}
return events;
}
/**
* Asserts that all tasks in the tasks list have the same parentTask
*/
private void assertParentTask(List<TaskInfo> tasks, TaskInfo parentTask) {
for (TaskInfo task : tasks) {
assertParentTask(task, parentTask);
}
}
private void assertParentTask(TaskInfo task, TaskInfo parentTask) {
assertTrue(task.getParentTaskId().isSet());
assertEquals(parentTask.getTaskId().getNodeId(), task.getParentTaskId().getNodeId());
assertTrue(Strings.hasLength(task.getParentTaskId().getNodeId()));
assertEquals(parentTask.getId(), task.getParentTaskId().getId());
}
private ResourceNotFoundException expectNotFound(ThrowingRunnable r) {
Exception e = expectThrows(Exception.class, r);
ResourceNotFoundException notFound = (ResourceNotFoundException) ExceptionsHelper.unwrap(e, ResourceNotFoundException.class);
if (notFound == null) throw new RuntimeException("Expected ResourceNotFoundException", e);
return notFound;
}
/**
* Fetch the task status from the list tasks API using it's "fallback to get from the task index" behavior. Asserts some obvious stuff
* about the fetched task and returns a map of it's status.
*/
private GetTaskResponse expectFinishedTask(TaskId taskId) throws IOException {
GetTaskResponse response = client().admin().cluster().prepareGetTask(taskId).get();
assertTrue("the task should have been completed before fetching", response.getTask().isCompleted());
TaskInfo info = response.getTask().getTask();
assertEquals(taskId, info.getTaskId());
assertNull(info.getStatus()); // The test task doesn't have any status
return response;
}
}
| apache-2.0 |
GunoH/intellij-community | platform/execution-impl/src/com/intellij/execution/compound/CompoundRunConfigurationSettingsEditor.java | 6223 | // Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.execution.compound;
import com.intellij.execution.BeforeRunTask;
import com.intellij.execution.ExecutionTarget;
import com.intellij.execution.RunnerAndConfigurationSettings;
import com.intellij.execution.configurations.RunConfiguration;
import com.intellij.execution.impl.RunConfigurationBeforeRunProvider;
import com.intellij.execution.impl.RunConfigurationSelector;
import com.intellij.execution.impl.RunManagerImpl;
import com.intellij.execution.impl.RunManagerImplKt;
import com.intellij.ide.DataManager;
import com.intellij.lang.LangBundle;
import com.intellij.openapi.actionSystem.ActionToolbarPosition;
import com.intellij.openapi.options.ConfigurationException;
import com.intellij.openapi.options.SettingsEditor;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Pair;
import com.intellij.ui.*;
import com.intellij.ui.components.JBList;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.ui.JBUI;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public final class CompoundRunConfigurationSettingsEditor extends SettingsEditor<CompoundRunConfiguration> {
private final Project myProject;
private final JBList<Pair<RunConfiguration, ExecutionTarget>> myList;
private final RunManagerImpl myRunManager;
private final SortedListModel<Pair<RunConfiguration, ExecutionTarget>> myModel;
private CompoundRunConfiguration mySnapshot;
public CompoundRunConfigurationSettingsEditor(@NotNull Project project) {
myProject = project;
myRunManager = RunManagerImpl.getInstanceImpl(project);
myModel = new SortedListModel<>((o1, o2) -> CompoundRunConfiguration.COMPARATOR.compare(o1.first, o2.first));
myList = new JBList<>(myModel);
myList.setCellRenderer(SimpleListCellRenderer.create((label, value, index) -> {
label.setIcon(value.first.getType().getIcon());
label.setText(ConfigurationSelectionUtil.getDisplayText(value.first, value.second));
}));
myList.setVisibleRowCount(15);
}
private boolean canBeAdded(@NotNull RunConfiguration candidate, @NotNull final CompoundRunConfiguration root) {
if (candidate.getType() == root.getType() && candidate.getName().equals(root.getName())) return false;
List<BeforeRunTask<?>> tasks = RunManagerImplKt.doGetBeforeRunTasks(candidate);
for (BeforeRunTask<?> task : tasks) {
if (task instanceof RunConfigurationBeforeRunProvider.RunConfigurableBeforeRunTask) {
RunConfigurationBeforeRunProvider.RunConfigurableBeforeRunTask runTask
= (RunConfigurationBeforeRunProvider.RunConfigurableBeforeRunTask)task;
RunnerAndConfigurationSettings settings = runTask.getSettings();
if (settings != null) {
if (!canBeAdded(settings.getConfiguration(), root)) return false;
}
}
}
if (candidate instanceof CompoundRunConfiguration) {
for (RunConfiguration configuration : ((CompoundRunConfiguration)candidate).getConfigurationsWithTargets(myRunManager).keySet()) {
if (!canBeAdded(configuration, root)) {
return false;
}
}
}
return true;
}
@Override
protected void resetEditorFrom(@NotNull CompoundRunConfiguration compoundRunConfiguration) {
myModel.clear();
myModel.addAll(ContainerUtil.map2List(compoundRunConfiguration.getConfigurationsWithTargets(myRunManager)));
mySnapshot = compoundRunConfiguration;
}
@Override
protected void applyEditorTo(@NotNull CompoundRunConfiguration compoundConfiguration) throws ConfigurationException {
Map<RunConfiguration, ExecutionTarget> checked = new HashMap<>();
for (int i = 0; i < myModel.getSize(); i++) {
Pair<RunConfiguration, ExecutionTarget> configurationAndTarget = myModel.get(i);
RunConfiguration configuration = configurationAndTarget.first;
String message =
LangBundle.message("compound.run.configuration.cycle", configuration.getType().getDisplayName(), configuration.getName());
if (!canBeAdded(configuration, compoundConfiguration)) {
throw new ConfigurationException(message);
}
checked.put(configuration, configurationAndTarget.second);
}
compoundConfiguration.setConfigurationsWithTargets(checked);
}
@NotNull
@Override
protected JComponent createEditor() {
ToolbarDecorator decorator = ToolbarDecorator.createDecorator(myList)
.setToolbarPosition(ActionToolbarPosition.TOP)
.setPanelBorder(JBUI.Borders.empty())
.disableUpDownActions();
decorator.setAddAction(new AnActionButtonRunnable() {
@Override
public void run(AnActionButton button) {
List<RunConfiguration> configurations = new ArrayList<>();
for (RunnerAndConfigurationSettings settings : myRunManager.getAllSettings()) {
RunConfiguration configuration = settings.getConfiguration();
if (!mySnapshot.getConfigurationsWithTargets(myRunManager).containsKey(configuration) && canBeAdded(configuration, mySnapshot)) {
configurations.add(configuration);
}
}
ConfigurationSelectionUtil.createPopup(myProject, myRunManager, configurations, (selectedConfigs, selectedTarget) -> {
for (RunConfiguration each : selectedConfigs) {
myModel.add(Pair.create(each, selectedTarget));
}
}).showUnderneathOf(decorator.getActionsPanel());
}
}).setEditAction(new AnActionButtonRunnable() {
@Override
public void run(AnActionButton button) {
int index = myList.getSelectedIndex();
if (index == -1) return;
RunConfiguration configuration = myModel.get(index).first;
RunConfigurationSelector selector =
RunConfigurationSelector.KEY.getData(DataManager.getInstance().getDataContext(button.getContextComponent()));
if (selector != null) {
selector.select(configuration);
}
}
});
return decorator.createPanel();
}
}
| apache-2.0 |
sjug/origin | vendor/github.com/Azure/azure-sdk-for-go/services/databoxedge/mgmt/2019-07-01/databoxedge/storageaccountcredentials.go | 17980 | package databoxedge
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// StorageAccountCredentialsClient is the client for the StorageAccountCredentials methods of the Databoxedge service.
type StorageAccountCredentialsClient struct {
BaseClient
}
// NewStorageAccountCredentialsClient creates an instance of the StorageAccountCredentialsClient client.
func NewStorageAccountCredentialsClient(subscriptionID string) StorageAccountCredentialsClient {
return NewStorageAccountCredentialsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewStorageAccountCredentialsClientWithBaseURI creates an instance of the StorageAccountCredentialsClient client.
func NewStorageAccountCredentialsClientWithBaseURI(baseURI string, subscriptionID string) StorageAccountCredentialsClient {
return StorageAccountCredentialsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates the storage account credential.
// Parameters:
// deviceName - the device name.
// name - the storage account credential name.
// storageAccountCredential - the storage account credential.
// resourceGroupName - the resource group name.
func (client StorageAccountCredentialsClient) CreateOrUpdate(ctx context.Context, deviceName string, name string, storageAccountCredential StorageAccountCredential, resourceGroupName string) (result StorageAccountCredentialsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountCredentialsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: storageAccountCredential,
Constraints: []validation.Constraint{{Target: "storageAccountCredential.StorageAccountCredentialProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "storageAccountCredential.StorageAccountCredentialProperties.Alias", Name: validation.Null, Rule: true, Chain: nil},
{Target: "storageAccountCredential.StorageAccountCredentialProperties.AccountKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "storageAccountCredential.StorageAccountCredentialProperties.AccountKey.Value", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("databoxedge.StorageAccountCredentialsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, deviceName, name, storageAccountCredential, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client StorageAccountCredentialsClient) CreateOrUpdatePreparer(ctx context.Context, deviceName string, name string, storageAccountCredential StorageAccountCredential, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"deviceName": autorest.Encode("path", deviceName),
"name": autorest.Encode("path", name),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}", pathParameters),
autorest.WithJSON(storageAccountCredential),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client StorageAccountCredentialsClient) CreateOrUpdateSender(req *http.Request) (future StorageAccountCredentialsCreateOrUpdateFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client StorageAccountCredentialsClient) CreateOrUpdateResponder(resp *http.Response) (result StorageAccountCredential, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the storage account credential.
// Parameters:
// deviceName - the device name.
// name - the storage account credential name.
// resourceGroupName - the resource group name.
func (client StorageAccountCredentialsClient) Delete(ctx context.Context, deviceName string, name string, resourceGroupName string) (result StorageAccountCredentialsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountCredentialsClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, deviceName, name, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client StorageAccountCredentialsClient) DeletePreparer(ctx context.Context, deviceName string, name string, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"deviceName": autorest.Encode("path", deviceName),
"name": autorest.Encode("path", name),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client StorageAccountCredentialsClient) DeleteSender(req *http.Request) (future StorageAccountCredentialsDeleteFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client StorageAccountCredentialsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the properties of the specified storage account credential.
// Parameters:
// deviceName - the device name.
// name - the storage account credential name.
// resourceGroupName - the resource group name.
func (client StorageAccountCredentialsClient) Get(ctx context.Context, deviceName string, name string, resourceGroupName string) (result StorageAccountCredential, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountCredentialsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, deviceName, name, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client StorageAccountCredentialsClient) GetPreparer(ctx context.Context, deviceName string, name string, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"deviceName": autorest.Encode("path", deviceName),
"name": autorest.Encode("path", name),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client StorageAccountCredentialsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client StorageAccountCredentialsClient) GetResponder(resp *http.Response) (result StorageAccountCredential, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByDataBoxEdgeDevice sends the list by data box edge device request.
// Parameters:
// deviceName - the device name.
// resourceGroupName - the resource group name.
func (client StorageAccountCredentialsClient) ListByDataBoxEdgeDevice(ctx context.Context, deviceName string, resourceGroupName string) (result StorageAccountCredentialListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountCredentialsClient.ListByDataBoxEdgeDevice")
defer func() {
sc := -1
if result.sacl.Response.Response != nil {
sc = result.sacl.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByDataBoxEdgeDeviceNextResults
req, err := client.ListByDataBoxEdgeDevicePreparer(ctx, deviceName, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "ListByDataBoxEdgeDevice", nil, "Failure preparing request")
return
}
resp, err := client.ListByDataBoxEdgeDeviceSender(req)
if err != nil {
result.sacl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "ListByDataBoxEdgeDevice", resp, "Failure sending request")
return
}
result.sacl, err = client.ListByDataBoxEdgeDeviceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "ListByDataBoxEdgeDevice", resp, "Failure responding to request")
}
return
}
// ListByDataBoxEdgeDevicePreparer prepares the ListByDataBoxEdgeDevice request.
func (client StorageAccountCredentialsClient) ListByDataBoxEdgeDevicePreparer(ctx context.Context, deviceName string, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"deviceName": autorest.Encode("path", deviceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByDataBoxEdgeDeviceSender sends the ListByDataBoxEdgeDevice request. The method will close the
// http.Response Body if it receives an error.
func (client StorageAccountCredentialsClient) ListByDataBoxEdgeDeviceSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// ListByDataBoxEdgeDeviceResponder handles the response to the ListByDataBoxEdgeDevice request. The method always
// closes the http.Response Body.
func (client StorageAccountCredentialsClient) ListByDataBoxEdgeDeviceResponder(resp *http.Response) (result StorageAccountCredentialList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByDataBoxEdgeDeviceNextResults retrieves the next set of results, if any.
func (client StorageAccountCredentialsClient) listByDataBoxEdgeDeviceNextResults(ctx context.Context, lastResults StorageAccountCredentialList) (result StorageAccountCredentialList, err error) {
req, err := lastResults.storageAccountCredentialListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "listByDataBoxEdgeDeviceNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByDataBoxEdgeDeviceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "listByDataBoxEdgeDeviceNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByDataBoxEdgeDeviceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "databoxedge.StorageAccountCredentialsClient", "listByDataBoxEdgeDeviceNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByDataBoxEdgeDeviceComplete enumerates all values, automatically crossing page boundaries as required.
func (client StorageAccountCredentialsClient) ListByDataBoxEdgeDeviceComplete(ctx context.Context, deviceName string, resourceGroupName string) (result StorageAccountCredentialListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountCredentialsClient.ListByDataBoxEdgeDevice")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByDataBoxEdgeDevice(ctx, deviceName, resourceGroupName)
return
}
| apache-2.0 |
mbiarnes/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-backend/src/main/java/org/kie/workbench/common/stunner/bpmn/backend/converters/tostunner/activities/BaseCallActivityConverter.java | 3844 | /*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.backend.converters.tostunner.activities;
import org.eclipse.bpmn2.CallActivity;
import org.kie.workbench.common.stunner.bpmn.backend.converters.Result;
import org.kie.workbench.common.stunner.bpmn.backend.converters.TypedFactoryManager;
import org.kie.workbench.common.stunner.bpmn.backend.converters.tostunner.BpmnNode;
import org.kie.workbench.common.stunner.bpmn.backend.converters.tostunner.NodeConverter;
import org.kie.workbench.common.stunner.bpmn.backend.converters.tostunner.properties.CallActivityPropertyReader;
import org.kie.workbench.common.stunner.bpmn.backend.converters.tostunner.properties.PropertyReaderFactory;
import org.kie.workbench.common.stunner.bpmn.definition.BaseReusableSubprocess;
import org.kie.workbench.common.stunner.bpmn.definition.property.dataio.DataIOSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.general.BPMNGeneralSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.general.Documentation;
import org.kie.workbench.common.stunner.bpmn.definition.property.general.Name;
import org.kie.workbench.common.stunner.bpmn.definition.property.task.BaseReusableSubprocessTaskExecutionSet;
import org.kie.workbench.common.stunner.core.graph.Edge;
import org.kie.workbench.common.stunner.core.graph.Node;
import org.kie.workbench.common.stunner.core.graph.content.view.View;
public abstract class BaseCallActivityConverter<R extends BaseReusableSubprocess,
E extends BaseReusableSubprocessTaskExecutionSet> implements NodeConverter<CallActivity> {
protected final TypedFactoryManager factoryManager;
private final PropertyReaderFactory propertyReaderFactory;
public BaseCallActivityConverter(TypedFactoryManager factoryManager,
PropertyReaderFactory propertyReaderFactory) {
this.factoryManager = factoryManager;
this.propertyReaderFactory = propertyReaderFactory;
}
@SuppressWarnings("unchecked")
public Result<BpmnNode> convert(CallActivity activity) {
CallActivityPropertyReader p = propertyReaderFactory.of(activity);
Node<View<R>, Edge> node = createNode(activity, p);
R definition = node.getContent().getDefinition();
definition.setGeneral(new BPMNGeneralSet(
new Name(p.getName()),
new Documentation(p.getDocumentation())
));
definition.setExecutionSet(createReusableSubprocessTaskExecutionSet(activity, p));
definition.setDataIOSet(new DataIOSet(p.getAssignmentsInfo()));
node.getContent().setBounds(p.getBounds());
definition.setSimulationSet(p.getSimulationSet());
definition.setDimensionsSet(p.getRectangleDimensionsSet());
definition.setFontSet(p.getFontSet());
definition.setBackgroundSet(p.getBackgroundSet());
return Result.success(BpmnNode.of(node, p));
}
protected abstract Node<View<R>, Edge> createNode(CallActivity activity, CallActivityPropertyReader p);
protected abstract E createReusableSubprocessTaskExecutionSet(CallActivity activity,
CallActivityPropertyReader p);
}
| apache-2.0 |
tzulitai/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/LegacySinkTest.scala | 9313 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.flink.table.types.logical.{BigIntType, IntType, VarCharType}
import org.junit.Test
class LegacySinkTest extends TableTestBase {
private val util = streamTestUtil()
util.addDataStream[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val STRING = new VarCharType(VarCharType.MAX_LENGTH)
val LONG = new BigIntType()
val INT = new IntType()
@Test
def testExceptionForAppendSink(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT COUNT(*) AS cnt FROM MyTable GROUP BY a")
val appendSink = util.createAppendTableSink(Array("a"), Array(LONG))
thrown.expect(classOf[TableException])
thrown.expectMessage("AppendStreamTableSink doesn't support consuming update " +
"changes which is produced by node GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt])")
util.verifyPlanInsert(table, appendSink, "appendSink")
}
@Test
def testExceptionForOverAggregate(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table = util.tableEnv.sqlQuery("SELECT COUNT(*) AS cnt FROM MyTable GROUP BY a")
util.tableEnv.createTemporaryView("TempTable", table)
val retractSink = util.createRetractTableSink(Array("cnt"), Array(LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink)
stmtSet.addInsert("retractSink1", table)
val table2 = util.tableEnv.sqlQuery(
"SELECT cnt, SUM(cnt) OVER (ORDER BY PROCTIME()) FROM TempTable")
val retractSink2 = util.createRetractTableSink(Array("cnt", "total"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table2)
thrown.expect(classOf[TableException])
thrown.expectMessage("OverAggregate doesn't support consuming update changes " +
"which is produced by node GroupAggregate(groupBy=[a], select=[a, COUNT(*) AS cnt])")
util.verifyPlan(stmtSet)
}
@Test
def testAppendSink(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT a + b, c FROM MyTable")
val appendSink = util.createAppendTableSink(Array("d", "c"), Array(LONG, STRING))
util.verifyPlanInsert(table, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testRetractSink1(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT a, COUNT(*) AS cnt FROM MyTable GROUP BY a")
val retractSink = util.createRetractTableSink(Array("a", "cnt"), Array(INT, LONG))
util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testRetractSink2(): Unit = {
val sqlQuery =
"""
|SELECT cnt, COUNT(a) AS a FROM (
| SELECT a, COUNT(*) AS cnt FROM MyTable GROUP BY a) t
|GROUP BY cnt
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
val retractSink = util.createRetractTableSink(Array("cnt", "a"), Array(LONG, LONG))
util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpsertSink1(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT a, COUNT(*) AS cnt FROM MyTable GROUP BY a")
val upsertSink = util.createUpsertTableSink(Array(0), Array("a", "cnt"), Array(INT, LONG))
util.verifyPlanInsert(table, upsertSink, "upsertSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpsertSink2(): Unit = {
val sqlQuery =
"""
|with t1 AS (SELECT a AS a1, b FROM MyTable WHERE a <= 10),
| t2 AS (SELECT * from MyTable WHERE a >= 0),
| t3 AS (SELECT a AS a2, c from t2 where b >= 5),
| t4 AS (SELECT a AS a3, c AS c1 FROM t2 WHERE b < 5),
| t5 AS (SELECT a1, b, c AS c2 FROM t1, t3 where a1 = a2)
|SELECT a1, b, c1 FROM t4, t5 WHERE a1 = a3
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
val upsertSink = util.createUpsertTableSink(Array(), Array("a1", "b", "c1"),
Array(INT, LONG, STRING))
util.verifyPlanInsert(table, upsertSink, "upsertSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpsertSinkWithFilter(): Unit = {
val sql =
"""
|SELECT *
|FROM (SELECT a, COUNT(*) AS cnt FROM MyTable GROUP BY a)
|WHERE cnt < 10
|""".stripMargin
val table = util.tableEnv.sqlQuery(sql)
val upsertSink = util.createUpsertTableSink(Array(0), Array("a", "cnt"), Array(INT, LONG))
// a filter after aggregation, the Aggregation and Calc should produce UPDATE_BEFORE
util.verifyPlanInsert(table, upsertSink, "upsertSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testRetractAndUpsertSink(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table = util.tableEnv.sqlQuery("SELECT b, COUNT(a) AS cnt FROM MyTable GROUP BY b")
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable WHERE b < 4")
val retractSink = util.createRetractTableSink(Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table1)
val table2 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable WHERE b >= 4 AND b < 6")
val upsertSink = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table2)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpsertAndUpsertSink(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table = util.tableEnv.sqlQuery("SELECT b, COUNT(a) AS cnt FROM MyTable GROUP BY b")
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery(
"SELECT cnt, COUNT(b) AS frequency FROM TempTable WHERE b < 4 GROUP BY cnt")
val upsertSink1 = util.createUpsertTableSink(Array(0), Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink1", upsertSink1)
stmtSet.addInsert("upsertSink1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable WHERE b >= 4 AND b < 6")
val upsertSink2 = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink2", upsertSink2)
stmtSet.addInsert("upsertSink2", table2)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testAppendUpsertAndRetractSink(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.addDataStream[(Int, Long, String)]("MyTable2", 'd, 'e, 'f)
util.addDataStream[(Int, Long, String)]("MyTable3", 'i, 'j, 'k)
val table = util.tableEnv.sqlQuery(
"SELECT a, b FROM MyTable UNION ALL SELECT d, e FROM MyTable2")
util.tableEnv.registerTable("TempTable", table)
val appendSink = util.createAppendTableSink(Array("a", "b"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink", appendSink)
stmtSet.addInsert("appendSink", table)
val table1 = util.tableEnv.sqlQuery(
"SELECT a, b FROM TempTable UNION ALL SELECT i, j FROM MyTable3")
util.tableEnv.registerTable("TempTable1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable1")
val retractSink = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table2)
val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable1")
val upsertSink = util.createUpsertTableSink(Array(), Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
}
| apache-2.0 |
adelez/grpc | test/cpp/common/auth_property_iterator_test.cc | 2778 | /*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc++/security/auth_context.h>
#include <grpc/grpc_security.h>
#include <gtest/gtest.h>
#include "src/cpp/common/secure_auth_context.h"
#include "test/cpp/util/string_ref_helper.h"
#include "src/core/lib/security/context/security_context.h"
using ::grpc::testing::ToString;
namespace grpc {
namespace {
class TestAuthPropertyIterator : public AuthPropertyIterator {
public:
TestAuthPropertyIterator() {}
TestAuthPropertyIterator(const grpc_auth_property* property,
const grpc_auth_property_iterator* iter)
: AuthPropertyIterator(property, iter) {}
};
class AuthPropertyIteratorTest : public ::testing::Test {
protected:
void SetUp() override {
ctx_ = grpc_auth_context_create(nullptr);
grpc_auth_context_add_cstring_property(ctx_, "name", "chapi");
grpc_auth_context_add_cstring_property(ctx_, "name", "chapo");
grpc_auth_context_add_cstring_property(ctx_, "foo", "bar");
EXPECT_EQ(1,
grpc_auth_context_set_peer_identity_property_name(ctx_, "name"));
}
void TearDown() override { grpc_auth_context_release(ctx_); }
grpc_auth_context* ctx_;
};
TEST_F(AuthPropertyIteratorTest, DefaultCtor) {
TestAuthPropertyIterator iter1;
TestAuthPropertyIterator iter2;
EXPECT_EQ(iter1, iter2);
}
TEST_F(AuthPropertyIteratorTest, GeneralTest) {
grpc_auth_property_iterator c_iter =
grpc_auth_context_property_iterator(ctx_);
const grpc_auth_property* property =
grpc_auth_property_iterator_next(&c_iter);
TestAuthPropertyIterator iter(property, &c_iter);
TestAuthPropertyIterator empty_iter;
EXPECT_FALSE(iter == empty_iter);
AuthProperty p0 = *iter;
++iter;
AuthProperty p1 = *iter;
iter++;
AuthProperty p2 = *iter;
EXPECT_EQ("name", ToString(p0.first));
EXPECT_EQ("chapi", ToString(p0.second));
EXPECT_EQ("name", ToString(p1.first));
EXPECT_EQ("chapo", ToString(p1.second));
EXPECT_EQ("foo", ToString(p2.first));
EXPECT_EQ("bar", ToString(p2.second));
++iter;
EXPECT_EQ(empty_iter, iter);
}
} // namespace
} // namespace grpc
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| apache-2.0 |
christophd/camel | components/camel-milo/src/generated/java/org/apache/camel/component/milo/client/MiloClientEndpointUriFactory.java | 3155 | /* Generated by camel build tools - do NOT edit this file! */
package org.apache.camel.component.milo.client;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.camel.spi.EndpointUriFactory;
/**
* Generated by camel build tools - do NOT edit this file!
*/
public class MiloClientEndpointUriFactory extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":endpointUri";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
static {
Set<String> props = new HashSet<>(33);
props.add("productUri");
props.add("monitorFilterType");
props.add("keyAlias");
props.add("channelLifetime");
props.add("discoveryEndpointSuffix");
props.add("defaultAwaitWrites");
props.add("dataChangeFilterDeadbandValue");
props.add("samplingInterval");
props.add("keyStoreUrl");
props.add("maxPendingPublishRequests");
props.add("bridgeErrorHandler");
props.add("keyPassword");
props.add("endpointUri");
props.add("applicationName");
props.add("requestTimeout");
props.add("keyStoreType");
props.add("maxResponseMessageSize");
props.add("clientId");
props.add("method");
props.add("keyStorePassword");
props.add("dataChangeFilterTrigger");
props.add("sessionName");
props.add("exchangePattern");
props.add("dataChangeFilterDeadbandType");
props.add("node");
props.add("lazyStartProducer");
props.add("overrideHost");
props.add("sessionTimeout");
props.add("discoveryEndpointUri");
props.add("applicationUri");
props.add("allowedSecurityPolicies");
props.add("exceptionHandler");
props.add("requestedPublishingInterval");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(2);
secretProps.add("keyStorePassword");
secretProps.add("keyPassword");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
}
@Override
public boolean isEnabled(String scheme) {
return "milo-client".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "endpointUri", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| apache-2.0 |
albinsuresh/ehcache3 | clustered/ehcache-client/src/main/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolver.java | 2855 | /*
* Copyright Terracotta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ehcache.clustered.client.internal.store.operations;
import org.ehcache.clustered.client.internal.store.ClusteredValueHolder;
import org.ehcache.clustered.client.internal.store.ServerStoreProxy;
import org.ehcache.clustered.common.internal.store.Chain;
import org.ehcache.clustered.common.internal.store.operations.Operation;
import org.ehcache.clustered.common.internal.store.operations.PutOperation;
import org.ehcache.clustered.common.internal.store.operations.Result;
import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec;
import org.ehcache.core.spi.store.Store.ValueHolder;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* A specialized chain resolver for eternal caches.
*
* @see org.ehcache.expiry.Expirations#noExpiration()
*
* @param <K> key type
* @param <V> value type
*/
public class EternalChainResolver<K, V> extends ChainResolver<K, V> {
public EternalChainResolver(final OperationsCodec<K, V> codec) {
super(codec);
}
@Override
public ValueHolder<V> resolve(ServerStoreProxy.ChainEntry entry, K key, long now, int threshold) {
PutOperation<K, V> resolved = resolve(entry, key, threshold);
return resolved == null ? null : new ClusteredValueHolder<>(resolved.getValue());
}
@Override
public Map<K, ValueHolder<V>> resolveAll(Chain chain) {
Map<K, PutOperation<K, V>> resolved = resolveToSimplePuts(chain);
Map<K, ValueHolder<V>> values = new HashMap<>(resolved.size());
for (Map.Entry<K, PutOperation<K, V>> e : resolved.entrySet()) {
values.put(e.getKey(), new ClusteredValueHolder<>(e.getValue().getValue()));
}
return unmodifiableMap(values);
}
@Override
public Map<K, ValueHolder<V>> resolveAll(Chain chain, long now) {
return resolveAll(chain);
}
/**
* Applies the given operation returning a result that never expires.
*
* {@inheritDoc}
*/
public PutOperation<K, V> applyOperation(K key, PutOperation<K, V> existing, Operation<K, V> operation) {
final Result<K, V> newValue = operation.apply(existing);
if (newValue == null) {
return null;
} else {
return newValue.asOperationExpiringAt(Long.MAX_VALUE);
}
}
}
| apache-2.0 |
fanyon/flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/utils/SimpleAckingTaskManagerGateway.java | 4286 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph.utils;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.blob.BlobKey;
import org.apache.flink.runtime.checkpoint.CheckpointOptions;
import org.apache.flink.runtime.clusterframework.ApplicationStatus;
import org.apache.flink.runtime.concurrent.Future;
import org.apache.flink.runtime.concurrent.impl.FlinkCompletableFuture;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
import org.apache.flink.runtime.executiongraph.PartitionInfo;
import org.apache.flink.runtime.instance.InstanceID;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.messages.StackTrace;
import org.apache.flink.runtime.messages.StackTraceSampleResponse;
import java.util.UUID;
/**
* A TaskManagerGateway that simply acks the basic operations (deploy, cancel, update) and does not
* support any more advanced operations.
*/
public class SimpleAckingTaskManagerGateway implements TaskManagerGateway {
private final String address = UUID.randomUUID().toString();
@Override
public String getAddress() {
return address;
}
@Override
public void disconnectFromJobManager(InstanceID instanceId, Exception cause) {}
@Override
public void stopCluster(ApplicationStatus applicationStatus, String message) {}
@Override
public Future<StackTrace> requestStackTrace(Time timeout) {
return FlinkCompletableFuture.completedExceptionally(new UnsupportedOperationException());
}
@Override
public Future<StackTraceSampleResponse> requestStackTraceSample(
ExecutionAttemptID executionAttemptID,
int sampleId,
int numSamples,
Time delayBetweenSamples,
int maxStackTraceDepth,
Time timeout) {
return FlinkCompletableFuture.completedExceptionally(new UnsupportedOperationException());
}
@Override
public Future<Acknowledge> submitTask(TaskDeploymentDescriptor tdd, Time timeout) {
return FlinkCompletableFuture.completed(Acknowledge.get());
}
@Override
public Future<Acknowledge> stopTask(ExecutionAttemptID executionAttemptID, Time timeout) {
return FlinkCompletableFuture.completed(Acknowledge.get());
}
@Override
public Future<Acknowledge> cancelTask(ExecutionAttemptID executionAttemptID, Time timeout) {
return FlinkCompletableFuture.completed(Acknowledge.get());
}
@Override
public Future<Acknowledge> updatePartitions(ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos, Time timeout) {
return FlinkCompletableFuture.completed(Acknowledge.get());
}
@Override
public void failPartition(ExecutionAttemptID executionAttemptID) {}
@Override
public void notifyCheckpointComplete(
ExecutionAttemptID executionAttemptID,
JobID jobId,
long checkpointId,
long timestamp) {}
@Override
public void triggerCheckpoint(
ExecutionAttemptID executionAttemptID,
JobID jobId,
long checkpointId,
long timestamp,
CheckpointOptions checkpointOptions) {}
@Override
public Future<BlobKey> requestTaskManagerLog(Time timeout) {
return FlinkCompletableFuture.completedExceptionally(new UnsupportedOperationException());
}
@Override
public Future<BlobKey> requestTaskManagerStdout(Time timeout) {
return FlinkCompletableFuture.completedExceptionally(new UnsupportedOperationException());
}
}
| apache-2.0 |
alanforgithub/android_xlight | cloudsdk/src/main/java/org/kaazing/gateway/client/impl/ws/CloseCommandMessage.java | 1795 | /**
* Copyright (c) 2007-2014 Kaazing Corporation. All rights reserved.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.kaazing.gateway.client.impl.ws;
import java.io.UnsupportedEncodingException;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.kaazing.gateway.client.impl.CommandMessage;
public class CloseCommandMessage implements CommandMessage {
private static final String CLASS_NAME = CloseCommandMessage.class.getName();
private static final Logger LOG = Logger.getLogger(CLASS_NAME);
public static final int CLOSE_NO_STATUS = 1005;
public static final int CLOSE_ABNORMAL = 1006;
private int code = 0;
private String reason;
public CloseCommandMessage(int code, String reason) {
if (code == 0) {
code = CLOSE_NO_STATUS;
}
this.code = code;
this.reason = reason;
}
public int getCode() {
return code;
}
public String getReason() {
return reason;
}
}
| apache-2.0 |
gruiz17/origin | pkg/deploy/util/util_test.go | 9679 | package util
import (
"sort"
"strconv"
"testing"
"time"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deploytest "github.com/openshift/origin/pkg/deploy/api/test"
_ "github.com/openshift/origin/pkg/api/install"
)
func podTemplateA() *kapi.PodTemplateSpec {
t := deploytest.OkPodTemplate()
t.Spec.Containers = append(t.Spec.Containers, kapi.Container{
Name: "container1",
Image: "registry:8080/repo1:ref1",
})
return t
}
func podTemplateB() *kapi.PodTemplateSpec {
t := podTemplateA()
t.Labels = map[string]string{"c": "d"}
return t
}
func podTemplateC() *kapi.PodTemplateSpec {
t := podTemplateA()
t.Spec.Containers[0] = kapi.Container{
Name: "container2",
Image: "registry:8080/repo1:ref3",
}
return t
}
func podTemplateD() *kapi.PodTemplateSpec {
t := podTemplateA()
t.Spec.Containers = append(t.Spec.Containers, kapi.Container{
Name: "container2",
Image: "registry:8080/repo1:ref4",
})
return t
}
func TestPodName(t *testing.T) {
deployment := &kapi.ReplicationController{
ObjectMeta: kapi.ObjectMeta{
Name: "testName",
},
}
expected := "testName-deploy"
actual := DeployerPodNameForDeployment(deployment.Name)
if expected != actual {
t.Errorf("Unexpected pod name for deployment. Expected: %s Got: %s", expected, actual)
}
}
func TestMakeDeploymentOk(t *testing.T) {
config := deploytest.OkDeploymentConfig(1)
deployment, err := MakeDeployment(config, kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion))
if err != nil {
t.Fatalf("unexpected error: %#v", err)
}
expectedAnnotations := map[string]string{
deployapi.DeploymentConfigAnnotation: config.Name,
deployapi.DeploymentStatusAnnotation: string(deployapi.DeploymentStatusNew),
deployapi.DeploymentVersionAnnotation: strconv.Itoa(config.Status.LatestVersion),
}
for key, expected := range expectedAnnotations {
if actual := deployment.Annotations[key]; actual != expected {
t.Fatalf("expected deployment annotation %s=%s, got %s", key, expected, actual)
}
}
expectedAnnotations = map[string]string{
deployapi.DeploymentAnnotation: deployment.Name,
deployapi.DeploymentConfigAnnotation: config.Name,
deployapi.DeploymentVersionAnnotation: strconv.Itoa(config.Status.LatestVersion),
}
for key, expected := range expectedAnnotations {
if actual := deployment.Spec.Template.Annotations[key]; actual != expected {
t.Fatalf("expected pod template annotation %s=%s, got %s", key, expected, actual)
}
}
if len(EncodedDeploymentConfigFor(deployment)) == 0 {
t.Fatalf("expected deployment with DeploymentEncodedConfigAnnotation annotation")
}
if decodedConfig, err := DecodeDeploymentConfig(deployment, kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion)); err != nil {
t.Fatalf("invalid encoded config on deployment: %v", err)
} else {
if e, a := config.Name, decodedConfig.Name; e != a {
t.Fatalf("encoded config name doesn't match source config")
}
// TODO: more assertions
}
if deployment.Spec.Replicas != 0 {
t.Fatalf("expected deployment replicas to be 0")
}
if l, e, a := deployapi.DeploymentConfigAnnotation, config.Name, deployment.Labels[deployapi.DeploymentConfigAnnotation]; e != a {
t.Fatalf("expected label %s=%s, got %s", l, e, a)
}
if e, a := config.Name, deployment.Spec.Template.Labels[deployapi.DeploymentConfigLabel]; e != a {
t.Fatalf("expected label DeploymentConfigLabel=%s, got %s", e, a)
}
if e, a := deployment.Name, deployment.Spec.Template.Labels[deployapi.DeploymentLabel]; e != a {
t.Fatalf("expected label DeploymentLabel=%s, got %s", e, a)
}
if e, a := config.Name, deployment.Spec.Selector[deployapi.DeploymentConfigLabel]; e != a {
t.Fatalf("expected selector DeploymentConfigLabel=%s, got %s", e, a)
}
if e, a := deployment.Name, deployment.Spec.Selector[deployapi.DeploymentLabel]; e != a {
t.Fatalf("expected selector DeploymentLabel=%s, got %s", e, a)
}
}
func TestDeploymentsByLatestVersion_sorting(t *testing.T) {
mkdeployment := func(version int) kapi.ReplicationController {
deployment, _ := MakeDeployment(deploytest.OkDeploymentConfig(version), kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion))
return *deployment
}
deployments := []kapi.ReplicationController{
mkdeployment(4),
mkdeployment(1),
mkdeployment(2),
mkdeployment(3),
}
sort.Sort(ByLatestVersionAsc(deployments))
for i := 0; i < 4; i++ {
if e, a := i+1, DeploymentVersionFor(&deployments[i]); e != a {
t.Errorf("expected deployment[%d]=%d, got %d", i, e, a)
}
}
sort.Sort(ByLatestVersionDesc(deployments))
for i := 0; i < 4; i++ {
if e, a := 4-i, DeploymentVersionFor(&deployments[i]); e != a {
t.Errorf("expected deployment[%d]=%d, got %d", i, e, a)
}
}
}
// TestSort verifies that builds are sorted by most recently created
func TestSort(t *testing.T) {
present := unversioned.Now()
past := unversioned.NewTime(present.Time.Add(-1 * time.Minute))
controllers := []*kapi.ReplicationController{
{
ObjectMeta: kapi.ObjectMeta{
Name: "past",
CreationTimestamp: past,
},
},
{
ObjectMeta: kapi.ObjectMeta{
Name: "present",
CreationTimestamp: present,
},
},
}
sort.Sort(ByMostRecent(controllers))
if controllers[0].Name != "present" {
t.Errorf("Unexpected sort order")
}
if controllers[1].Name != "past" {
t.Errorf("Unexpected sort order")
}
}
func TestCanTransitionPhase(t *testing.T) {
tests := []struct {
name string
current, next deployapi.DeploymentStatus
expected bool
}{
{
name: "New->New",
current: deployapi.DeploymentStatusNew,
next: deployapi.DeploymentStatusNew,
expected: false,
},
{
name: "New->Pending",
current: deployapi.DeploymentStatusNew,
next: deployapi.DeploymentStatusPending,
expected: true,
},
{
name: "New->Running",
current: deployapi.DeploymentStatusNew,
next: deployapi.DeploymentStatusRunning,
expected: true,
},
{
name: "New->Complete",
current: deployapi.DeploymentStatusNew,
next: deployapi.DeploymentStatusComplete,
expected: true,
},
{
name: "New->Failed",
current: deployapi.DeploymentStatusNew,
next: deployapi.DeploymentStatusFailed,
expected: true,
},
{
name: "Pending->New",
current: deployapi.DeploymentStatusPending,
next: deployapi.DeploymentStatusNew,
expected: false,
},
{
name: "Pending->Pending",
current: deployapi.DeploymentStatusPending,
next: deployapi.DeploymentStatusPending,
expected: false,
},
{
name: "Pending->Running",
current: deployapi.DeploymentStatusPending,
next: deployapi.DeploymentStatusRunning,
expected: true,
},
{
name: "Pending->Failed",
current: deployapi.DeploymentStatusPending,
next: deployapi.DeploymentStatusFailed,
expected: true,
},
{
name: "Pending->Complete",
current: deployapi.DeploymentStatusPending,
next: deployapi.DeploymentStatusComplete,
expected: true,
},
{
name: "Running->New",
current: deployapi.DeploymentStatusRunning,
next: deployapi.DeploymentStatusNew,
expected: false,
},
{
name: "Running->Pending",
current: deployapi.DeploymentStatusRunning,
next: deployapi.DeploymentStatusPending,
expected: false,
},
{
name: "Running->Running",
current: deployapi.DeploymentStatusRunning,
next: deployapi.DeploymentStatusRunning,
expected: false,
},
{
name: "Running->Failed",
current: deployapi.DeploymentStatusRunning,
next: deployapi.DeploymentStatusFailed,
expected: true,
},
{
name: "Running->Complete",
current: deployapi.DeploymentStatusRunning,
next: deployapi.DeploymentStatusComplete,
expected: true,
},
{
name: "Complete->New",
current: deployapi.DeploymentStatusComplete,
next: deployapi.DeploymentStatusNew,
expected: false,
},
{
name: "Complete->Pending",
current: deployapi.DeploymentStatusComplete,
next: deployapi.DeploymentStatusPending,
expected: false,
},
{
name: "Complete->Running",
current: deployapi.DeploymentStatusComplete,
next: deployapi.DeploymentStatusRunning,
expected: false,
},
{
name: "Complete->Failed",
current: deployapi.DeploymentStatusComplete,
next: deployapi.DeploymentStatusFailed,
expected: false,
},
{
name: "Complete->Complete",
current: deployapi.DeploymentStatusComplete,
next: deployapi.DeploymentStatusComplete,
expected: false,
},
{
name: "Failed->New",
current: deployapi.DeploymentStatusFailed,
next: deployapi.DeploymentStatusNew,
expected: false,
},
{
name: "Failed->Pending",
current: deployapi.DeploymentStatusFailed,
next: deployapi.DeploymentStatusPending,
expected: false,
},
{
name: "Failed->Running",
current: deployapi.DeploymentStatusFailed,
next: deployapi.DeploymentStatusRunning,
expected: false,
},
{
name: "Failed->Complete",
current: deployapi.DeploymentStatusFailed,
next: deployapi.DeploymentStatusComplete,
expected: false,
},
{
name: "Failed->Failed",
current: deployapi.DeploymentStatusFailed,
next: deployapi.DeploymentStatusFailed,
expected: false,
},
}
for _, test := range tests {
got := CanTransitionPhase(test.current, test.next)
if got != test.expected {
t.Errorf("%s: expected %t, got %t", test.name, test.expected, got)
}
}
}
| apache-2.0 |
fengshao0907/incubator-geode | gemfire-core/src/test/java/com/gemstone/gemfire/GemFireTestCase.java | 2360 | /*=========================================================================
* Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* one or more patents listed at http://www.pivotal.io/patents.
*=========================================================================
*/
package com.gemstone.gemfire;
import static org.junit.Assert.*;
import com.gemstone.gemfire.distributed.DistributedSystem;
import com.gemstone.gemfire.distributed.internal.*;
import java.util.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
/**
* This is an abstract superclass for classes that test GemFire. It
* has setUp() and tearDown() methods that create and initialize a
* GemFire connection.
*
* @author davidw
*
*/
public abstract class GemFireTestCase {
@Rule
public TestName testName = new TestName();
@Before
public void setUp() throws Exception {
Properties p = new Properties();
// make it a loner
p.setProperty("mcast-port", "0");
p.setProperty("locators", "");
p.setProperty(DistributionConfig.NAME_NAME, getName());
DistributedSystem.connect(p);
}
@After
public void tearDown() throws Exception {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
if (ds != null) {
ds.disconnect();
}
}
protected String getName() {
return testName.getMethodName();
}
/**
* Strip the package off and gives just the class name.
* Needed because of Windows file name limits.
*/
private String getShortClassName() {
return getClass().getSimpleName();
}
/**
* Returns a unique name for this test method. It is based on the
* name of the class as well as the name of the method.
*/
protected String getUniqueName() {
return getShortClassName() + "_" + getName();
}
/**
* Assert an Invariant condition on an object.
* @param inv the Invariant to assert. If null, this method just returns
* @param the obj to assert the Invariant on.
*/
protected void assertInvariant(Invariant inv, Object obj) {
if (inv == null) return;
InvariantResult result = inv.verify(obj);
assertTrue(result.message, result.valid);
}
}
| apache-2.0 |
thedrow/samsa | pykafka/rdkafka/__init__.py | 89 | from .producer import RdKafkaProducer
from .simple_consumer import RdKafkaSimpleConsumer
| apache-2.0 |
LedaLima/incubator-spot | spot-setup/migration/spot_conf_migration.py | 5986 | #!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ConfigParser
import sys
import os
from shutil import copyfile
from utilities import util
def main():
if len(sys.argv[1:]) < 2:
print "Please provide paths to: old_spot.conf , new_spot.conf"
sys.exit(1)
old_conf_file = sys.argv[1]
new_conf_file = sys.argv[2]
log = util.get_logger('SPOT.MIGRATE.CONF')
old_path = os.path.dirname(os.path.realpath(old_conf_file))
# create backup for the current configuration file.
log.info("Create a backup of /etc/spot.conf before changing it")
util.execute_cmd('sudo cp {0} {1}/spot.conf.bkp_0_9'.format(old_conf_file, old_path),log)
# create configuration objects.
old_config = ConfigParser.ConfigParser()
current_config = ConfigParser.ConfigParser()
new_config = ConfigParser.ConfigParser()
old_config.readfp(SecHead(open(old_conf_file)))
current_config.readfp(SecHead(open(new_conf_file)))
# create the new conf file.
new_config.add_section('conf')
for (k,v) in current_config.items("conf"):
if old_config.has_option('conf',k):
new_config.set('conf',k, old_config.get('conf',k))
else:
new_config.set('conf',k,v)
new_path = os.path.dirname(os.path.realpath(new_conf_file))
updated_conf_file = '{0}/spot.conf.new'.format(new_path)
log.info("Generating merged configuration file in {0}".format(updated_conf_file))
formatter(updated_conf_file,new_config)
log.info("Updating original spot.conf with new and migrated variables and values")
util.execute_cmd('sudo cp {0} {1}/spot.conf'.format(updated_conf_file, old_path),log)
util.execute_cmd('sudo chmod 0755 {0}/spot.conf'.format(old_path),log)
def formatter(conf_file,conf):
with open(conf_file,'wb') as file:
file.write("# node configuration\n")
file.write("{0}={1}\n".format('UINODE',conf.get('conf','uinode')))
file.write("{0}={1}\n".format('MLNODE',conf.get('conf','MLNODE')))
file.write("{0}={1}\n".format('GWNODE',conf.get('conf','GWNODE')))
file.write("{0}={1}\n".format('DBNAME',conf.get('conf','DBNAME')))
file.write("# hdfs - base user and data source config\n")
file.write("{0}={1}\n".format('HUSER',conf.get('conf','HUSER')))
file.write("{0}={1}\n".format('NAME_NODE',conf.get('conf','NAME_NODE')))
file.write("{0}={1}\n".format('WEB_PORT',conf.get('conf','WEB_PORT')))
file.write("{0}={1}\n".format('DNS_PATH',conf.get('conf','DNS_PATH')))
file.write("{0}={1}\n".format('PROXY_PATH',conf.get('conf','PROXY_PATH')))
file.write("{0}={1}\n".format('FLOW_PATH',conf.get('conf','FLOW_PATH')))
file.write("{0}={1}\n".format('HPATH',conf.get('conf','HPATH')))
file.write("# impala config\n")
file.write("{0}={1}\n".format('IMPALA_DEM',conf.get('conf','IMPALA_DEM')))
file.write("{0}={1}\n".format('IMPALA_PORT',conf.get('conf','IMPALA_PORT')))
file.write("# local fs base user and data source config\n")
file.write("{0}={1}\n".format('LUSER',conf.get('conf','LUSER')))
file.write("{0}={1}\n".format('LPATH',conf.get('conf','LPATH')))
file.write("{0}={1}\n".format('RPATH',conf.get('conf','RPATH')))
file.write("{0}={1}\n".format('LIPATH',conf.get('conf','LIPATH')))
file.write("# suspicious connects config\n")
file.write("{0}={1}\n".format('USER_DOMAIN',conf.get('conf','USER_DOMAIN')))
file.write("{0}={1}\n".format('SPK_EXEC',conf.get('conf','SPK_EXEC')))
file.write("{0}={1}\n".format('SPK_EXEC_MEM',conf.get('conf','SPK_EXEC_MEM')))
file.write("{0}={1}\n".format('SPK_DRIVER_MEM',conf.get('conf','SPK_DRIVER_MEM')))
file.write("{0}={1}\n".format('SPK_DRIVER_MAX_RESULTS',conf.get('conf','SPK_DRIVER_MAX_RESULTS')))
file.write("{0}={1}\n".format('SPK_EXEC_CORES',conf.get('conf','SPK_EXEC_CORES')))
file.write("{0}={1}\n".format('SPK_DRIVER_MEM_OVERHEAD',conf.get('conf','SPK_DRIVER_MEM_OVERHEAD')))
file.write("{0}={1}\n".format('SPK_EXEC_MEM_OVERHEAD',conf.get('conf','SPK_EXEC_MEM_OVERHEAD')))
file.write("{0}={1}\n".format('SPK_AUTO_BRDCST_JOIN_THR',conf.get('conf','SPK_AUTO_BRDCST_JOIN_THR')))
file.write("{0}={1}\n".format('LDA_OPTIMIZER',conf.get('conf','LDA_OPTIMIZER')))
file.write("{0}={1}\n".format('LDA_ALPHA',conf.get('conf','LDA_ALPHA')))
file.write("{0}={1}\n".format('LDA_BETA',conf.get('conf','LDA_BETA')))
file.write("{0}={1}\n".format('PRECISION',conf.get('conf','PRECISION')))
file.write("{0}={1}\n".format('TOL',conf.get('conf','TOL')))
file.write("{0}={1}\n".format('TOPIC_COUNT',conf.get('conf','TOPIC_COUNT')))
file.write("{0}={1}\n".format('DUPFACTOR',conf.get('conf','DUPFACTOR')))
class SecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[conf]\n'
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
if __name__ == '__main__':
main()
| apache-2.0 |
slburson/fast-serialization | src/main/java/org/nustaq/serialization/serializers/FSTBigIntegerSerializer.java | 1255 | package org.nustaq.serialization.serializers;
import org.nustaq.serialization.FSTBasicObjectSerializer;
import org.nustaq.serialization.FSTClazzInfo;
import org.nustaq.serialization.FSTObjectInput;
import org.nustaq.serialization.FSTObjectOutput;
import java.io.IOException;
import java.math.BigInteger;
/**
* author: nicoruti
* submitted via #53
*
*/
public class FSTBigIntegerSerializer extends FSTBasicObjectSerializer {
@Override
public void writeObject(FSTObjectOutput out, Object toWrite, FSTClazzInfo clzInfo, FSTClazzInfo.FSTFieldInfo referencedBy,
int streamPosition) throws IOException {
byte[] value = ((BigInteger) toWrite).toByteArray();
out.writeInt(value.length);
out.write(value);
}
@Override
public Object instantiate(Class objectClass, FSTObjectInput in, FSTClazzInfo serializationInfo, FSTClazzInfo.FSTFieldInfo referencee,
int streamPosition) throws Exception {
int len = in.readInt();
byte[] buf = new byte[len];
in.read(buf);
BigInteger bigInteger = new BigInteger(buf);
in.registerObject(bigInteger,streamPosition,serializationInfo,referencee);
return bigInteger;
}
}
| apache-2.0 |
minestarks/TypeScript | tests/baselines/reference/classHeritageWithTrailingSeparator.js | 1023 | //// [classHeritageWithTrailingSeparator.ts]
class C { foo: number }
class D extends C, {
}
//// [classHeritageWithTrailingSeparator.js]
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var C = /** @class */ (function () {
function C() {
}
return C;
}());
var D = /** @class */ (function (_super) {
__extends(D, _super);
function D() {
return _super !== null && _super.apply(this, arguments) || this;
}
return D;
}(C));
| apache-2.0 |
Dominator008/buck | src/com/facebook/buck/js/ReactNativeLibraryArgs.java | 1263 | /*
* Copyright 2015-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.js;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.rules.AbstractDescriptionArg;
import com.facebook.buck.rules.SourcePath;
import com.facebook.infer.annotation.SuppressFieldNotInitialized;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableSortedSet;
@SuppressFieldNotInitialized
public class ReactNativeLibraryArgs extends AbstractDescriptionArg {
public Optional<ImmutableSortedSet<SourcePath>> srcs;
public SourcePath entryPath;
public String bundleName;
public Optional<String> packagerFlags;
public Optional<ImmutableSortedSet<BuildTarget>> deps;
}
| apache-2.0 |
stefwalter/origin | Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/admission/namespace/exists/admission.go | 3053 | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exists
import (
"fmt"
"io"
"github.com/GoogleCloudPlatform/kubernetes/pkg/admission"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
)
func init() {
admission.RegisterPlugin("NamespaceExists", func(client client.Interface, config io.Reader) (admission.Interface, error) {
return NewExists(client), nil
})
}
// exists is an implementation of admission.Interface.
// It rejects all incoming requests in a namespace context if the namespace does not exist.
// It is useful in deployments that want to enforce pre-declaration of a Namespace resource.
type exists struct {
client client.Interface
store cache.Store
}
func (e *exists) Admit(a admission.Attributes) (err error) {
defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource())
if err != nil {
return admission.NewForbidden(a, err)
}
mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion)
if err != nil {
return admission.NewForbidden(a, err)
}
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
return nil
}
namespace := &api.Namespace{
ObjectMeta: api.ObjectMeta{
Name: a.GetNamespace(),
Namespace: "",
},
Status: api.NamespaceStatus{},
}
_, exists, err := e.store.Get(namespace)
if err != nil {
return admission.NewForbidden(a, err)
}
if exists {
return nil
}
return admission.NewForbidden(a, fmt.Errorf("Namespace %s does not exist", a.GetNamespace()))
}
func NewExists(c client.Interface) admission.Interface {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&api.Namespace{},
store,
0,
)
reflector.Run()
return &exists{
client: c,
store: store,
}
}
| apache-2.0 |
resmo/cloudstack | plugins/network-elements/juniper-contrail/src/org/apache/cloudstack/network/contrail/management/ContrailManager.java | 5713 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.network.contrail.management;
import java.io.IOException;
import java.util.List;
import net.juniper.contrail.api.ApiConnector;
import net.juniper.contrail.api.types.FloatingIp;
import net.juniper.contrail.api.types.NetworkPolicy;
import net.juniper.contrail.api.types.VirtualNetwork;
import org.apache.cloudstack.network.contrail.model.ModelController;
import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel;
import com.cloud.domain.DomainVO;
import com.cloud.network.Network;
import com.cloud.network.Networks.TrafficType;
import com.cloud.network.PublicIpAddress;
import com.cloud.network.dao.IPAddressVO;
import com.cloud.network.dao.NetworkVO;
import com.cloud.network.dao.PhysicalNetworkVO;
import com.cloud.offering.NetworkOffering;
import com.cloud.projects.ProjectVO;
import com.cloud.network.vpc.NetworkACLVO;
import com.cloud.network.vpc.VpcOffering;
import com.cloud.network.vpc.VpcVO;
public interface ContrailManager {
public static final String routerOfferingName = "Juniper Contrail Network Offering";
public static final String routerOfferingDisplayText = "Juniper Contrail Network Offering";
public static final String routerPublicOfferingName = "Juniper Contrail Public Network Offering";
public static final String routerPublicOfferingDisplayText = "Juniper Contrail Public Network Offering";
public static final String vpcRouterOfferingName = "Juniper Contrail VPC Network Offering";
public static final String vpcRouterOfferingDisplayText = "Juniper Contrail VPC Network Offering";
public static final String juniperVPCOfferingName = "Juniper Contrail VPC Offering";
public static final String juniperVPCOfferingDisplayText = "Juniper Contrail VPC Offering";
public static final int DB_SYNC_INTERVAL_DEFAULT = 600000;
public static final String VNC_ROOT_DOMAIN = "default-domain";
public static final String VNC_DEFAULT_PROJECT = "default-project";
public static final String managementNetworkName = "ip-fabric";
public NetworkOffering getRouterOffering();
public NetworkOffering getPublicRouterOffering();
public NetworkOffering getVpcRouterOffering();
public VpcOffering getVpcOffering();
public void syncNetworkDB(short syncMode) throws IOException;
public boolean isManagedPhysicalNetwork(Network network);
/**
* Lookup the virtual network that implements the CloudStack network object.
* @param net_id internal identifier of the NetworkVO object.
* @return the uuid of the virtual network that corresponds to the
* specified CloudStack network.
*/
public String findVirtualNetworkId(Network net) throws IOException;
public void findInfrastructureNetworks(PhysicalNetworkVO phys, List<NetworkVO> dbList);
public String getPhysicalNetworkName(PhysicalNetworkVO physNet);
public String getCanonicalName(Network net);
public String getDomainCanonicalName(DomainVO domain);
public String getProjectCanonicalName(ProjectVO project);
public String getFQN(Network net);
public String getDomainName(long domainId);
public String getProjectName(long accountId);
public String getDefaultPublicNetworkFQN();
public String getProjectId(long domainId, long accountId) throws IOException;
public net.juniper.contrail.api.types.Project getVncProject(long domainId, long accountId) throws IOException;
public net.juniper.contrail.api.types.Project getDefaultVncProject() throws IOException;
public boolean isSystemRootDomain(net.juniper.contrail.api.types.Domain vnc);
public boolean isSystemRootDomain(DomainVO domain);
public boolean isSystemDefaultProject(net.juniper.contrail.api.types.Project project);
public boolean isSystemDefaultProject(ProjectVO project);
public boolean isSystemDefaultNetwork(VirtualNetwork vnet);
public boolean isSystemDefaultNetwork(NetworkVO dbNet);
public String getVifNameByVmName(String vmName, Integer deviceId);
public String getVifNameByVmUuid(String vmUuid, Integer deviceId);
public ApiConnector getApiConnector();
public ModelDatabase getDatabase();
public ModelController getModelController();
public List<NetworkVO> findManagedNetworks(List<TrafficType> types);
public List<NetworkVO> findSystemNetworks(List<TrafficType> types);
public List<IPAddressVO> findManagedPublicIps();
public List<VpcVO> findManagedVpcs();
public List<NetworkACLVO> findManagedACLs();
public VirtualNetwork findDefaultVirtualNetwork(TrafficType trafficType) throws IOException;
public List<FloatingIp> getFloatingIps();
public VirtualNetworkModel lookupPublicNetworkModel();
public boolean createFloatingIp(PublicIpAddress ip);
public boolean deleteFloatingIp(PublicIpAddress ip);
public boolean isSystemDefaultNetworkPolicy(NetworkPolicy policy);
}
| apache-2.0 |
nathangoulding/amphtml | ads/taboola.js | 2423 | /**
* Copyright 2015 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {loadScript, validateDataExists, validateExactlyOne} from '../src/3p';
/**
* @param {!Window} global
* @param {!Object} data
*/
export function taboola(global, data) {
// do not copy the following attributes from the 'data' object
// to _tablloa global object
const blackList = ['height', 'initialWindowHeight', 'initialWindowWidth',
'type', 'width', 'placement', 'mode'];
// ensure we have vlid publisher, placement and mode
// and exactly one page-type
validateDataExists(data, ['publisher', 'placement', 'mode']);
validateExactlyOne(data, ['article', 'video', 'photo', 'search', 'category',
'homepage', 'others']);
// setup default values for referrer and url
const params = {
referrer: data.referrer || global.context.referrer,
url: data.url || global.context.canonicalUrl,
};
// copy none blacklisted attribute to the 'params' map
Object.keys(data).forEach(k => {
if (blackList.indexOf(k) === -1) {
params[k] = data[k];
}
});
// push the two object into the '_taboola' global
(global._taboola = global._taboola || []).push([{
viewId: global.context.pageViewId,
publisher: data.publisher,
placement: data.placement,
mode: data.mode,
framework: 'amp',
container: 'c',
},
params,
{flush: true}]
);
// install observation on entering/leaving the view
global.context.observeIntersection(function(changes) {
changes.forEach(function(c) {
if (c.intersectionRect.height) {
global._taboola.push({
visible: true,
rects: c,
placement: data.placement,
});
}
});
});
// load the taboola loader asynchronously
loadScript(global, `https://cdn.taboola.com/libtrc/${encodeURIComponent(data.publisher)}/loader.js`);
}
| apache-2.0 |
jangalinski/camunda-bpm-platform | engine/src/main/java/org/camunda/bpm/engine/impl/form/FormDataImpl.java | 1956 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.form;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import org.camunda.bpm.engine.form.FormData;
import org.camunda.bpm.engine.form.FormField;
import org.camunda.bpm.engine.form.FormProperty;
/**
* @author Tom Baeyens
* @author Daniel Meyer
*/
public abstract class FormDataImpl implements FormData, Serializable {
private static final long serialVersionUID = 1L;
protected String formKey;
protected String deploymentId;
protected List<FormProperty> formProperties = new ArrayList<FormProperty>();
protected List<FormField> formFields = new ArrayList<FormField>();
// getters and setters //////////////////////////////////////////////////////
public String getFormKey() {
return formKey;
}
public String getDeploymentId() {
return deploymentId;
}
public List<FormProperty> getFormProperties() {
return formProperties;
}
public void setFormKey(String formKey) {
this.formKey = formKey;
}
public void setDeploymentId(String deploymentId) {
this.deploymentId = deploymentId;
}
public void setFormProperties(List<FormProperty> formProperties) {
this.formProperties = formProperties;
}
public List<FormField> getFormFields() {
return formFields;
}
public void setFormFields(List<FormField> formFields) {
this.formFields = formFields;
}
}
| apache-2.0 |
nathanaeljones/weaver-nodelua | app/controllers/articles.js | 2019 | 'use strict';
/**
* Module dependencies.
*/
var mongoose = require('mongoose'),
Article = mongoose.model('Article'),
_ = require('lodash');
/**
* Find article by id
*/
exports.article = function(req, res, next, id) {
Article.load(id, function(err, article) {
if (err) return next(err);
if (!article) return next(new Error('Failed to load article ' + id));
req.article = article;
next();
});
};
/**
* Create a article
*/
exports.create = function(req, res) {
var article = new Article(req.body);
article.user = req.user;
article.save(function(err) {
if (err) {
return res.send('users/signup', {
errors: err.errors,
article: article
});
} else {
res.jsonp(article);
}
});
};
/**
* Update a article
*/
exports.update = function(req, res) {
var article = req.article;
article = _.extend(article, req.body);
article.save(function(err) {
if (err) {
return res.send('users/signup', {
errors: err.errors,
article: article
});
} else {
res.jsonp(article);
}
});
};
/**
* Delete an article
*/
exports.destroy = function(req, res) {
var article = req.article;
article.remove(function(err) {
if (err) {
return res.send('users/signup', {
errors: err.errors,
article: article
});
} else {
res.jsonp(article);
}
});
};
/**
* Show an article
*/
exports.show = function(req, res) {
res.jsonp(req.article);
};
/**
* List of Articles
*/
exports.all = function(req, res) {
Article.find().sort('-created').populate('user', 'name username').exec(function(err, articles) {
if (err) {
res.render('error', {
status: 500
});
} else {
res.jsonp(articles);
}
});
}; | apache-2.0 |
stemlending/fabric | common/metrics/statsd/provider.go | 2830 | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package statsd
import (
"github.com/go-kit/kit/metrics/statsd"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/common/metrics/internal/namer"
)
const defaultFormat = "%{#fqname}"
type Provider struct {
Statsd *statsd.Statsd
}
func (p *Provider) NewCounter(o metrics.CounterOpts) metrics.Counter {
if o.StatsdFormat == "" {
o.StatsdFormat = defaultFormat
}
counter := &Counter{
statsdProvider: p.Statsd,
namer: namer.NewCounterNamer(o),
}
if len(o.LabelNames) == 0 {
counter.Counter = p.Statsd.NewCounter(counter.namer.Format(), 1)
}
return counter
}
func (p *Provider) NewGauge(o metrics.GaugeOpts) metrics.Gauge {
if o.StatsdFormat == "" {
o.StatsdFormat = defaultFormat
}
gauge := &Gauge{
statsdProvider: p.Statsd,
namer: namer.NewGaugeNamer(o),
}
if len(o.LabelNames) == 0 {
gauge.Gauge = p.Statsd.NewGauge(gauge.namer.Format())
}
return gauge
}
func (p *Provider) NewHistogram(o metrics.HistogramOpts) metrics.Histogram {
if o.StatsdFormat == "" {
o.StatsdFormat = defaultFormat
}
histogram := &Histogram{
statsdProvider: p.Statsd,
namer: namer.NewHistogramNamer(o),
}
if len(o.LabelNames) == 0 {
histogram.Timing = p.Statsd.NewTiming(histogram.namer.Format(), 1.0)
}
return histogram
}
type Counter struct {
Counter *statsd.Counter
namer *namer.Namer
statsdProvider *statsd.Statsd
}
func (c *Counter) Add(delta float64) {
if c.Counter == nil {
panic("label values must be provided by calling With")
}
c.Counter.Add(delta)
}
func (c *Counter) With(labelValues ...string) metrics.Counter {
name := c.namer.Format(labelValues...)
return &Counter{Counter: c.statsdProvider.NewCounter(name, 1)}
}
type Gauge struct {
Gauge *statsd.Gauge
namer *namer.Namer
statsdProvider *statsd.Statsd
}
func (g *Gauge) Add(delta float64) {
if g.Gauge == nil {
panic("label values must be provided by calling With")
}
g.Gauge.Add(delta)
}
func (g *Gauge) Set(value float64) {
if g.Gauge == nil {
panic("label values must be provided by calling With")
}
g.Gauge.Set(value)
}
func (g *Gauge) With(labelValues ...string) metrics.Gauge {
name := g.namer.Format(labelValues...)
return &Gauge{Gauge: g.statsdProvider.NewGauge(name)}
}
type Histogram struct {
Timing *statsd.Timing
namer *namer.Namer
statsdProvider *statsd.Statsd
}
func (h *Histogram) With(labelValues ...string) metrics.Histogram {
name := h.namer.Format(labelValues...)
return &Histogram{Timing: h.statsdProvider.NewTiming(name, 1)}
}
func (h *Histogram) Observe(value float64) {
if h.Timing == nil {
panic("label values must be provided by calling With")
}
h.Timing.Observe(value)
}
| apache-2.0 |
yuananf/presto | presto-main/src/main/java/com/facebook/presto/operator/aggregation/histogram/GroupedHistogramState.java | 2060 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator.aggregation.histogram;
import com.facebook.presto.operator.aggregation.state.AbstractGroupedAccumulatorState;
import com.facebook.presto.spi.block.Block;
import com.facebook.presto.spi.type.Type;
import org.openjdk.jol.info.ClassLayout;
/**
* state object that uses a single histogram for all groups. See {@link GroupedTypedHistogram}
*/
public class GroupedHistogramState
extends AbstractGroupedAccumulatorState
implements HistogramState
{
private static final int INSTANCE_SIZE = ClassLayout.parseClass(GroupedHistogramState.class).instanceSize();
private TypedHistogram typedHistogram;
private long size;
public GroupedHistogramState(Type keyType, int expectedEntriesCount)
{
typedHistogram = new GroupedTypedHistogram(keyType, expectedEntriesCount);
}
@Override
public void ensureCapacity(long size)
{
typedHistogram.ensureCapacity(size);
}
@Override
public TypedHistogram get()
{
return typedHistogram.setGroupId(getGroupId());
}
@Override
public void deserialize(Block block, Type type, int expectedSize)
{
typedHistogram = new GroupedTypedHistogram(getGroupId(), block, type, expectedSize);
}
@Override
public void addMemoryUsage(long memory)
{
size += memory;
}
@Override
public long getEstimatedSize()
{
return INSTANCE_SIZE + size + typedHistogram.getEstimatedSize();
}
}
| apache-2.0 |
SanthiSridharan/Hygieia | core/src/test/java/com/capitalone/dashboard/repository/EmbeddedMongoDBRule.java | 5492 | package com.capitalone.dashboard.repository;
import com.mongodb.MongoClient;
import com.mongodb.ServerAddress;
import de.flapdoodle.embed.mongo.Command;
import de.flapdoodle.embed.mongo.MongodExecutable;
import de.flapdoodle.embed.mongo.MongodProcess;
import de.flapdoodle.embed.mongo.MongodStarter;
import de.flapdoodle.embed.mongo.config.ArtifactStoreBuilder;
import de.flapdoodle.embed.mongo.config.DownloadConfigBuilder;
import de.flapdoodle.embed.mongo.config.IMongodConfig;
import de.flapdoodle.embed.mongo.config.MongodConfigBuilder;
import de.flapdoodle.embed.mongo.config.Net;
import de.flapdoodle.embed.mongo.config.RuntimeConfigBuilder;
import de.flapdoodle.embed.mongo.distribution.Version;
import de.flapdoodle.embed.process.config.IRuntimeConfig;
import de.flapdoodle.embed.process.config.store.IProxyFactory;
import de.flapdoodle.embed.process.runtime.Network;
import org.junit.rules.ExternalResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.Proxy;
import java.net.URL;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.util.StringTokenizer;
/**
*
*/
@SuppressWarnings("deprecation")
public class EmbeddedMongoDBRule extends ExternalResource {
private static final Logger LOGGER = LoggerFactory
.getLogger(EmbeddedMongoDBRule.class);
private static final String MONGO_PORT_PROP = "MONGO_PORT";
private MongodExecutable mongoExec;
private MongodProcess mongoProc;
private MongoClient client;
static class SystemProxy implements IProxyFactory {
@Override
public Proxy createProxy() {
String proxy = System.getenv("HTTP_PROXY");
if (proxy == null || proxy.isEmpty()) {
proxy = System.getProperty("HTTP_PROXY");
}
try {
URL proxyUrl = new URL(proxy);
// Case for Proxy authentication required
try {
String proxyUserInfo = proxyUrl.getUserInfo();
if (proxyUserInfo != null) {
StringTokenizer tokenizedUrl = new StringTokenizer(proxyUserInfo, ":");
if (tokenizedUrl.hasMoreTokens()) {
final String authUser = tokenizedUrl.nextToken();
if (tokenizedUrl.hasMoreTokens()) {
final String authPassword = tokenizedUrl
.nextToken();
if ((proxy != null && !proxy.isEmpty())
&& (authUser != null && !authUser.isEmpty())
&& (authUser != null && !authPassword
.isEmpty())) {
Authenticator.setDefault(new Authenticator() {
public PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(
authUser, authPassword
.toCharArray());
}
});
System.setProperty("http.proxyUser", authUser);
System.setProperty("http.proxyPassword",
authPassword);
}
} else {
LOGGER.warn("Proxy Authentication did not contain a valid password parameter\nSkipping Authenticated proxy step.");
}
} else {
LOGGER.warn("Proxy Authentication did not contain user info\nSkipping Authenticated proxy step.");
}
} else {
LOGGER.info("Proxy did not contain authentication parameters - assuming non-authenticated proxy");
}
} catch (IllegalArgumentException e) {
LOGGER.warn(
"Malformed Proxy Authentication Credentials for HTTP Proxy in "
+ this.getClass().getName(), e);
}
// Configuring proxy
if (proxy != null && !proxy.isEmpty()) {
return new Proxy(Proxy.Type.HTTP, new InetSocketAddress(
proxyUrl.getHost(), proxyUrl.getPort()));
}
} catch (MalformedURLException ex) {
LOGGER.error("Malformed HTTP Proxy for "
+ this.getClass().getName(), ex);
} catch (NullPointerException npe) {
LOGGER.error(
"Unexpectedly, something in your proxy configuration was blank or misreferenced for "
+ this.getClass().getName(), npe);
}
return Proxy.NO_PROXY;
}
}
@Override
public void before() throws Throwable {
int port = Network.getFreeServerPort();
String portProp = System.getProperty(MONGO_PORT_PROP);
if (portProp != null && !portProp.isEmpty()) {
port = Integer.valueOf(portProp);
}
IMongodConfig conf = new MongodConfigBuilder()
.version(Version.Main.PRODUCTION)
.net(new Net(port, Network.localhostIsIPv6())).build();
Command command = Command.MongoD;
IRuntimeConfig runtimeConfig = new RuntimeConfigBuilder()
.defaultsWithLogger(command, LOGGER)
.artifactStore(
new ArtifactStoreBuilder().defaults(command).download(
new DownloadConfigBuilder().defaultsForCommand(
command)
.proxyFactory(new SystemProxy())))
.build();
MongodStarter runtime = MongodStarter.getInstance(runtimeConfig);
mongoExec = runtime.prepare(conf);
mongoProc = mongoExec.start();
client = new MongoClient(new ServerAddress(conf.net()
.getServerAddress(), conf.net().getPort()));
// set the property for our config...
System.setProperty("dbhost", conf.net().getServerAddress()
.getHostAddress());
System.setProperty("dbport", Integer.toString(conf.net().getPort()));
}
@Override
public void after() {
if (client != null) {
client.close();
client = null;
}
if (mongoProc != null) {
mongoProc.stop();
mongoProc = null;
}
if (mongoExec != null) {
mongoExec.stop();
mongoExec = null;
}
}
public MongoClient client() {
return client;
}
}
| apache-2.0 |
luffy730/test | php-leraning/project/cache-demo/Hdphp/Db/Db.php | 494 | <?php namespace Hdphp\Db;
//配置项处理
class Db{
protected $link=array();
public function __construct()
{
}
/**
* 魔术方法
* @param [type] $method [description]
* @param [type] $params [description]
* @return [type] [description]
*/
public function __call($method,$params)
{
$driver ='\Hdphp\Db\\'.ucfirst(Config::get('database.read.driver'));
$instance = new $driver;
return call_user_func_array(array($instance,$method), $params);
}
} | apache-2.0 |
gamerson/bnd | biz.aQute.bndlib.tests/test/test/jpms/j/Foo.java | 884 | package test.jpms.j;
import java.lang.reflect.Type;
import java.util.Collections;
import javax.enterprise.inject.spi.Extension;
import javax.json.Json;
import javax.json.JsonObject;
import javax.json.bind.JsonbBuilder;
import javax.json.bind.serializer.DeserializationContext;
import javax.json.stream.JsonParser;
public class Foo implements Extension {
public JsonObject toJSON() {
return Json.createObjectBuilder()
.add("foo", "bar")
.build();
}
public String toJSONB() {
return JsonbBuilder.create()
.toJson(Collections.singletonMap("foo", "bar"));
}
@SuppressWarnings("unused")
private DeserializationContext deserializationContext = new DeserializationContext() {
@Override
public <T> T deserialize(Class<T> arg0, JsonParser arg1) {
return null;
}
@Override
public <T> T deserialize(Type arg0, JsonParser arg1) {
return null;
}
};
}
| apache-2.0 |
axbaretto/beam | sdks/java/core/src/test/java/org/apache/beam/sdk/coders/NullableCoderTest.java | 7132 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.coders;
import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.theInstance;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.beam.sdk.coders.Coder.Context;
import org.apache.beam.sdk.testing.CoderProperties;
import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
import org.apache.beam.sdk.values.TypeDescriptor;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Unit tests for {@link NullableCoder}. */
@RunWith(JUnit4.class)
public class NullableCoderTest {
private static final Coder<String> TEST_CODER = NullableCoder.of(StringUtf8Coder.of());
private static final List<String> TEST_VALUES =
Arrays.asList(
"",
"a",
"13",
"hello",
null,
"a longer string with spaces and all that",
"a string with a \n newline",
"スタリング");
@Test
public void testDecodeEncodeContentsInSameOrder() throws Exception {
for (String value : TEST_VALUES) {
CoderProperties.coderDecodeEncodeEqual(TEST_CODER, value);
}
}
@Test
public void testCoderSerializable() throws Exception {
CoderProperties.coderSerializable(TEST_CODER);
}
@Test
public void testCoderIsSerializableWithWellKnownCoderType() throws Exception {
CoderProperties.coderSerializable(NullableCoder.of(GlobalWindow.Coder.INSTANCE));
}
/**
* Generated data to check that the wire format has not changed. To regenerate, see {@code
* PrintBase64Encodings}.
*
* @see org.apache.beam.sdk.coders.PrintBase64Encodings
*/
private static final List<String> TEST_ENCODINGS =
Arrays.asList(
"AQ",
"AWE",
"ATEz",
"AWhlbGxv",
"AA",
"AWEgbG9uZ2VyIHN0cmluZyB3aXRoIHNwYWNlcyBhbmQgYWxsIHRoYXQ",
"AWEgc3RyaW5nIHdpdGggYSAKIG5ld2xpbmU",
"AeOCueOCv-ODquODs-OCsA");
@Test
public void testWireFormatEncode() throws Exception {
CoderProperties.coderEncodesBase64(TEST_CODER, TEST_VALUES, TEST_ENCODINGS);
}
@Test
public void testEncodedSize() throws Exception {
NullableCoder<Double> coder = NullableCoder.of(DoubleCoder.of());
assertEquals(1, coder.getEncodedElementByteSize(null));
assertEquals(9, coder.getEncodedElementByteSize(5.0));
}
@Test
public void testEncodedSizeNested() throws Exception {
NullableCoder<String> varLenCoder = NullableCoder.of(StringUtf8Coder.of());
assertEquals(1, varLenCoder.getEncodedElementByteSize(null));
assertEquals(6, varLenCoder.getEncodedElementByteSize("spam"));
}
@Test
public void testObserverIsCheap() throws Exception {
NullableCoder<Double> coder = NullableCoder.of(DoubleCoder.of());
assertTrue(coder.isRegisterByteSizeObserverCheap(5.0));
}
@Test
public void testObserverIsNotCheap() throws Exception {
NullableCoder<List<String>> coder = NullableCoder.of(ListCoder.of(StringUtf8Coder.of()));
assertFalse(coder.isRegisterByteSizeObserverCheap(ImmutableList.of("hi", "test")));
}
@Test
public void testObserverIsAlwaysCheapForNullValues() throws Exception {
NullableCoder<List<String>> coder = NullableCoder.of(ListCoder.of(StringUtf8Coder.of()));
assertTrue(coder.isRegisterByteSizeObserverCheap(null));
}
@Test
public void testStructuralValueConsistentWithEquals() throws Exception {
CoderProperties.structuralValueConsistentWithEquals(TEST_CODER, null, null);
}
@Rule public ExpectedException thrown = ExpectedException.none();
@Test
public void testDecodingError() throws Exception {
thrown.expect(CoderException.class);
thrown.expectMessage(
equalTo("NullableCoder expects either a byte valued 0 (null) " + "or 1 (present), got 5"));
InputStream input = new ByteArrayInputStream(new byte[] {5});
TEST_CODER.decode(input, Coder.Context.OUTER);
}
@Test
public void testSubcoderRecievesEntireStream() throws Exception {
NullableCoder<String> coder = NullableCoder.of(new EntireStreamExpectingCoder());
CoderProperties.coderDecodeEncodeEqualInContext(coder, Context.OUTER, null);
CoderProperties.coderDecodeEncodeEqualInContext(coder, Context.OUTER, "foo");
}
@Test
public void testNestedNullableCoder() {
NullableCoder<Double> coder = NullableCoder.of(DoubleCoder.of());
assertThat(NullableCoder.of(coder), theInstance(coder));
}
@Test
public void testEncodedTypeDescriptor() throws Exception {
assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(TypeDescriptor.of(String.class)));
}
private static class EntireStreamExpectingCoder extends AtomicCoder<String> {
@Override
public void encode(String value, OutputStream outStream) throws IOException {
encode(value, outStream, Context.NESTED);
}
@Override
public void encode(String value, OutputStream outStream, Context context) throws IOException {
checkArgument(context.isWholeStream, "Expected to get entire stream");
StringUtf8Coder.of().encode(value, outStream, context);
}
@Override
public String decode(InputStream inStream) throws CoderException, IOException {
return decode(inStream, Context.NESTED);
}
@Override
public String decode(InputStream inStream, Context context) throws CoderException, IOException {
checkArgument(context.isWholeStream, "Expected to get entire stream");
return StringUtf8Coder.of().decode(inStream, context);
}
@Override
public List<? extends Coder<?>> getCoderArguments() {
return Collections.emptyList();
}
@Override
public void verifyDeterministic() throws NonDeterministicException {}
}
}
| apache-2.0 |
0909023/project_3_data | GMapsFX/src/test/java/com/lynden/gmapsfx/javascript/JavascriptObjectTest.java | 6119 | /*
* Copyright 2014 Lynden, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lynden.gmapsfx.javascript;
import com.lynden.gmapsfx.javascript.object.GMapObjectType;
import com.lynden.gmapsfx.javascript.object.LatLong;
import com.lynden.gmapsfx.javascript.object.MapTypeIdEnum;
import netscape.javascript.JSObject;
import org.junit.After;
import org.junit.AfterClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Mockito.*;
/**
*
* @author Rob Terpilowski
*/
public class JavascriptObjectTest {
IJavascriptRuntime mockJSRuntime;
JSObject mockJsObject;
JavascriptObject testJavascriptObject;
public JavascriptObjectTest() {
}
@BeforeClass
public static void setUpClass() {
}
@AfterClass
public static void tearDownClass() {
}
@Before
public void setUp() {
mockJSRuntime = Mockito.mock(IJavascriptRuntime.class);
mockJsObject = Mockito.mock(JSObject.class);
JavascriptRuntime.runtime = mockJSRuntime;
when(mockJSRuntime.execute(any(String.class))).thenReturn(mockJsObject);
JavascriptObject.objectCounter = 0;
testJavascriptObject = new JavascriptObject(GMapObjectType.OBJECT);
}
@After
public void tearDown() {
}
@Test
public void testGetJSObject() {
assertEquals(mockJsObject, testJavascriptObject.getJSObject());
}
@Test
public void testCreateJavascriptObject() {
JavascriptObject jso = new JavascriptObject(GMapObjectType.OBJECT, mockJsObject);
assertEquals( mockJsObject, jso.getJSObject());
}
@Test
public void testGetNextVariableName() {
JavascriptObject.objectCounter = 1;
assertEquals( "JavascriptObject1", testJavascriptObject.getNextVariableName() );
}
@Test
public void testGetVariableName() {
assertEquals( "JavascriptObject0", testJavascriptObject.getVariableName());
}
@Test
public void testSetProperty_Object() {
testJavascriptObject.setProperty("MyBoolProp", Boolean.FALSE);
verify(mockJsObject).setMember("MyBoolProp", Boolean.FALSE);
}
@Test
public void testSetProperty_JavascriptObject() {
LatLong latLong = new LatLong(1, 1);
testJavascriptObject.setProperty("LatLong", latLong);
verify(mockJsObject).setMember("LatLong", latLong.getJSObject());
}
@Test
public void testSetProperty_JavascriptEnum() {
MapTypeIdEnum mapType = MapTypeIdEnum.TERRAIN;
testJavascriptObject.setProperty("MapType", mapType);
verify(mockJsObject).setMember("MapType", mapType.getEnumValue());
}
@Test
public void testGetProperty() {
when(mockJsObject.getMember("myprop")).thenReturn("myvalue");
assertEquals( testJavascriptObject.getProperty("myprop"), "myvalue");
}
@Test
public void testGetPropertyGeneric() {
LatLong latLong = new LatLong(1, 1);
when(mockJsObject.getMember("LatLong")).thenReturn(latLong);
LatLong actualLatLong = testJavascriptObject.getProperty("LatLong", LatLong.class);
assertEquals( latLong, actualLatLong);
}
@Test
public void testGetPropertyGeneric_Null() {
assertNull(testJavascriptObject.getProperty("foo", String.class));
}
@Test
public void testInvokeJavascript() {
String arg = "myArg";
Object[] argArray = new Object[] {arg};
String function = "myFunction";
String result = "result";
when(mockJsObject.call(function, argArray)).thenReturn(result);
Object returnValue = testJavascriptObject.invokeJavascript(function, arg);
assertEquals( result, returnValue );
}
@Test
public void testInvokeJavascript_JavascriptObject() {
JSObject returnJSObject = mock(JSObject.class);
JavascriptObject functionArg = mock(JavascriptObject.class);
Object[] argArray = new Object[] {functionArg};
Object[] jsCallArgArray = new Object[]{returnJSObject};
String function = "myFunction";
String result = "result";
when(functionArg.getJSObject()).thenReturn(returnJSObject);
when(mockJsObject.call(function, jsCallArgArray)).thenReturn(result);
Object returnValue = testJavascriptObject.invokeJavascript(function, functionArg);
assertEquals( result, returnValue );
}
@Test
public void testInvokeJavascriptReturnValue() {
String arg = "myArg";
Object[] argArray = new Object[] {arg};
String function = "myFunction";
String result = "result";
when(mockJsObject.call(function, argArray)).thenReturn(result);
String returnValue = testJavascriptObject.invokeJavascriptReturnValue(function, String.class, arg);
assertEquals( result, returnValue );
}
@Test
public void testInvokeJavascriptReturnValue_Null() {
String arg = "myArg";
Object[] argArray = new Object[] {arg};
String function = "myFunction";
when(mockJsObject.call(function, argArray)).thenReturn(null);
String returnValue = testJavascriptObject.invokeJavascriptReturnValue(function, String.class, arg);
assertNull( returnValue );
}
}
| apache-2.0 |
tarikgwa/test | html/dev/tests/functional/tests/app/Magento/Backend/Test/Page/AdminAuthLogin.php | 2284 | <?php
/**
* Copyright © 2015 Magento. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\Backend\Test\Page;
use Magento\Mtf\Client\Locator;
use Magento\Mtf\Factory\Factory;
use Magento\Mtf\Page\Page;
/**
* Login page for backend.
*/
class AdminAuthLogin extends Page
{
/**
* URL part for backend authorization.
*/
const MCA = 'admin/auth/login';
/**
* Form for login.
*
* @var string
*/
protected $loginBlock = '#login-form';
/**
* Header panel of admin dashboard.
*
* @var string
*/
protected $headerBlock = '.page-header .admin-user';
/**
* Global messages block.
*
* @var string
*/
protected $messagesBlock = '.messages';
/**
* Constructor.
*/
protected function initUrl()
{
$this->url = $_ENV['app_backend_url'] . self::MCA;
}
/**
* Get the login form block.
*
* @return \Magento\Backend\Test\Block\Admin\Login
*/
public function getLoginBlock()
{
return Factory::getBlockFactory()->getMagentoBackendAdminLogin(
$this->browser->find($this->loginBlock, Locator::SELECTOR_CSS)
);
}
/**
* Get the header panel block of admin dashboard.
*
* @return \Magento\Backend\Test\Block\Page\Header
*/
public function getHeaderBlock()
{
return Factory::getBlockFactory()->getMagentoBackendPageHeader(
$this->browser->find($this->headerBlock, Locator::SELECTOR_CSS)
);
}
/**
* Get global messages block.
*
* @return \Magento\Backend\Test\Block\Messages
*/
public function getMessagesBlock()
{
return Factory::getBlockFactory()->getMagentoBackendMessages($this->browser->find($this->messagesBlock));
}
/**
* Wait for Header block is visible in the page.
*
* @return void
*/
public function waitForHeaderBlock()
{
$browser = $this->browser;
$selector = $this->headerBlock;
$browser->waitUntil(
function () use ($browser, $selector) {
$item = $browser->find($selector);
return $item->isVisible() ? true : null;
}
);
}
}
| apache-2.0 |
fengshao0907/incubator-geode | gemfire-core/src/main/java/com/gemstone/gemfire/internal/OSProcess.java | 27333 | /*=========================================================================
* Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* more patents listed at http://www.pivotal.io/patents.
*
*
*========================================================================
*/
package com.gemstone.gemfire.internal;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.CharArrayWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.lang.management.LockInfo;
import java.lang.management.ManagementFactory;
import java.lang.management.MonitorInfo;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.zip.GZIPOutputStream;
import org.apache.logging.log4j.Logger;
import com.gemstone.gemfire.SystemFailure;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.io.TeePrintStream;
import com.gemstone.gemfire.internal.logging.LogService;
import com.gemstone.gemfire.internal.logging.LoggingThreadGroup;
/**
* Used to interact with operating system processes.
* Use <code>exec</code> to create a new process by executing a command.
* Use <code>kill</code> to kill a process.
*
* @author darrel
*
*/
public class OSProcess {
private static final Logger logger = LogService.getLogger();
public static final String DISABLE_OUTPUT_REDIRECTION_PROPERTY = "gemfire.OSProcess.DISABLE_OUTPUT_REDIRECTION";
public static final String ENABLE_OUTPUT_REDIRECTION_PROPERTY = "gemfire.OSProcess.ENABLE_OUTPUT_REDIRECTION";
private static final boolean DISABLE_OUTPUT_REDIRECTION = Boolean.getBoolean(DISABLE_OUTPUT_REDIRECTION_PROPERTY);
private static final boolean ENABLE_OUTPUT_REDIRECTION = Boolean.getBoolean(ENABLE_OUTPUT_REDIRECTION_PROPERTY);
static final boolean pureMode = PureJavaMode.isPure();
static {
if (!pureMode) {
registerSigQuitHandler();
}
}
/**
* Starts a background command writing its stdout and stderr to
* the specified log file.
*
* @param cmdarray An array of strings that specify the command to run.
* The first element must be the executable.
* Each additional command line argument should have its own entry
* in the array.
* @param workdir the current directory of the created process
* @param logfile the file the created process will write
* stdout and stderr to.
* @param inheritLogfile can be set to false if the child process
* is willing to create its own log file. Setting to false can help
* on Windows because it keeps the child process from inheriting
* handles from the parent.
* @return the process id of the created process; -1 on failure
* @exception IOException if a child process could not be created.
*/
private static native int bgexecInternal(String[] cmdarray,
String workdir,
String logfile,
boolean inheritLogfile)
throws IOException;
/**
* Starts execution of the specified command and arguments in a separate
* detached process in the specified
* working directory writing output to the specified log file.
* <p>
* If there is a security manager, its <code>checkExec</code> method
* is called with the first component of the array
* <code>cmdarray</code> as its argument. This may result in a security
* exception.
* <p>
* Given an array of strings <code>cmdarray</code>, representing the
* tokens of a command line,
* this method creates a new process in which to execute
* the specified command.
*
* @param cmdarray array containing the command to call and its arguments.
* @param workdir the current directory of the created process; null
* causes working directory to default to the current directory.
* @param logfile the file the created process will write
* stdout and stderr to; null causes a default log file name
* to be used.
* @param inheritLogfile can be set to false if the child process
* is willing to create its own log file. Setting to false can help
* on Windows because it keeps the child process from inheriting
* handles from the parent.
* @param env any extra environment variables as key,value map;
* these will be in addition to those inherited from the
* parent process and will overwrite same keys
* @return the process id of the created process; -1 on failure
* @exception SecurityException if the current thread cannot create a
* subprocess.
* @see java.lang.SecurityException
* @see java.lang.SecurityManager#checkExec(java.lang.String)
*/
public static int bgexec(String cmdarray[],
File workdir,
File logfile, boolean inheritLogfile,
Map<String, String> env)
throws IOException
{
String commandShell = System.getProperty("gemfire.commandShell", "bash");
if (cmdarray.length == 0) {
throw new java.lang.IndexOutOfBoundsException();
}
boolean isWindows = false;
String os = System.getProperty("os.name");
if (os != null) {
if (os.indexOf("Windows") != -1) {
isWindows = true;
}
}
for (int i = 0; i < cmdarray.length; i++) {
if (cmdarray[i] == null) {
throw new NullPointerException();
}
if (isWindows) {
if (i == 0) {
// do the following before quotes get added.
File cmd = new File(cmdarray[0]);
if (!cmd.exists()) {
cmd = new File(cmdarray[0] + ".exe");
if (cmd.exists()) {
cmdarray[0] = cmd.getPath();
}
}
}
String s = cmdarray[i];
if (i != 0) {
if (s.length() == 0) {
cmdarray[i] = "\"\""; // fix for bug 22207
} else if ((s.indexOf(' ') >= 0 || s.indexOf('\t') >= 0)) {
String unquotedS = s;
if (s.indexOf('\"') != -1) {
// Note that Windows provides no way to embed a double
// quote in a double quoted string so need to remove
// any internal quotes and let the outer quotes
// preserve the whitespace.
StringBuffer b = new StringBuffer(s);
int quoteIdx = s.lastIndexOf('\"');
while (quoteIdx != -1) {
b.deleteCharAt(quoteIdx);
quoteIdx = s.lastIndexOf('\"', quoteIdx-1);
}
unquotedS = b.toString();
}
// It has whitespace and its not quoted
cmdarray[i] = '"' + unquotedS + '"';
}
}
}
}
File cmd = new File(cmdarray[0]);
if (!cmd.exists()) {
throw new IOException(LocalizedStrings.OSProcess_THE_EXECUTABLE_0_DOES_NOT_EXIST.toLocalizedString(cmd.getPath()));
}
SecurityManager security = System.getSecurityManager();
if (security != null) {
security.checkExec(cmdarray[0]);
}
if (workdir != null && !workdir.isDirectory()) {
String curDir = new File("").getAbsolutePath();
System.out.println( LocalizedStrings.OSProcess_WARNING_0_IS_NOT_A_DIRECTORY_DEFAULTING_TO_CURRENT_DIRECTORY_1.toLocalizedString(new Object[] {workdir, curDir}));
workdir = null;
}
if (workdir == null) {
workdir = new File("").getAbsoluteFile();
}
if (logfile == null) {
logfile = File.createTempFile("bgexec", ".log", workdir);
}
if (!logfile.isAbsolute()) {
// put it in the working directory
logfile = new File(workdir, logfile.getPath());
}
// fix for bug 24575
if (logfile.exists()) {
// it already exists so make sure its a file and can be written
if (!logfile.isFile()) {
throw new IOException(LocalizedStrings.OSProcess_THE_LOG_FILE_0_WAS_NOT_A_NORMAL_FILE.toLocalizedString(logfile.getPath()));
}
if (!logfile.canWrite()) {
throw new IOException(LocalizedStrings.OSProcess_NEED_WRITE_ACCESS_FOR_THE_LOG_FILE_0.toLocalizedString(logfile.getPath()));
}
} else {
try {
logfile.createNewFile();
} catch (IOException io) {
throw new IOException(LocalizedStrings.OSProcess_COULD_NOT_CREATE_LOG_FILE_0_BECAUSE_1.toLocalizedString(new Object[] {logfile.getPath(), io.getMessage()}));
}
}
String trace = System.getProperty("com.gemstone.gemfire.internal.OSProcess.trace");
if (trace != null && trace.length() > 0) {
for (int i=0; i < cmdarray.length; i++) {
System.out.println("cmdarray[" + i + "] = "
+ cmdarray[i]);
}
System.out.println("workdir=" + workdir.getPath());
System.out.println("logfile=" + logfile.getPath());
}
int result = 0;
if (pureMode || (env != null && env.size() > 0)) {
StringBuffer sb = new StringBuffer();
Vector cmdVec = new Vector();
//Add shell code to spawn a process silently
if(isWindows) {
cmdVec.add("cmd.exe");
cmdVec.add("/c");
sb.append("start /b \"\" ");
} else {
//to address issue with users that don't have bash shell installed
if(commandShell.equals("bash")){
cmdVec.add("bash");
cmdVec.add("--norc");
cmdVec.add("-c");
}else{
cmdVec.add(commandShell);
}
}
//Add the actual command
for (int i=0; i < cmdarray.length; i++) {
if (i != 0) sb.append(" ");
if(cmdarray[i].length() != 0 && cmdarray[i].charAt(0) == '\"') {
//The token has already been quoted, see bug 40835
sb.append(cmdarray[i]);
} else {
sb.append("\"");
sb.append(cmdarray[i]);
sb.append("\"");
}
}
//Add the IO redirction code, this prevents hangs and IO blocking
sb.append(" >> ");
sb.append(logfile.getPath());
sb.append(" 2>&1");
if(isWindows) {
sb.append(" <NUL");
} else {
sb.append(" </dev/null &");
}
cmdVec.add(sb.toString());
String[] cmdStrings = (String[]) cmdVec.toArray( new String[0] );
if (trace != null && trace.length() > 0) {
for (int i=0; i < cmdStrings.length; i++) {
System.out.println("cmdStrings[" + i + "] = "
+ cmdStrings[i]);
}
System.out.println("workdir=" + workdir.getPath());
System.out.println("logfile=" + logfile.getPath());
}
final ProcessBuilder procBuilder = new ProcessBuilder(cmdStrings);
if (env != null && env.size() > 0) {
// adjust the environment variables inheriting from parent
procBuilder.environment().putAll(env);
}
procBuilder.directory(workdir);
final Process process = procBuilder.start();
try { process.getInputStream().close(); } catch(IOException ignore){}
try { process.getOutputStream().close(); } catch(IOException ignore){}
try { process.getErrorStream().close(); } catch(IOException ignore){}
try {
// short count = 1000;
boolean processIsStillRunning = true;
while(processIsStillRunning) {
Thread.sleep(10);
try {
process.exitValue();
processIsStillRunning = false;
} catch(IllegalThreadStateException itse) {
// Ignore this, we are polling the exitStatus
// instead of using the blocking Process#waitFor()
}
}
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
}
} else {
result = bgexecInternal(cmdarray,
workdir.getPath(),
logfile.getPath(), inheritLogfile);
if (result != -1) {
if (pids != null) {
pids.add(Integer.valueOf(result));
if (trace != null && trace.length() > 0) {
System.out.println("bgexec child pid is: " + result);
}
}
}
}
return result; //Always 0 for pureJava
}
/**
* Checks to make sure that we are operating on a valid process id.
* Sending signals to processes with <code>pid</code> 0 or -1 can
* have unintended consequences.
*
* @throws IllegalArgumentException
* If <code>pid</code> is not positive
*
* @since 4.0
*/
private static void checkPid(int pid) {
if (pid <= 0) {
throw new IllegalArgumentException(LocalizedStrings.OSProcess_SHOULD_NOT_SEND_A_SIGNAL_TO_PID_0.toLocalizedString(Integer.valueOf(pid)));
}
}
/**
* Ask a process to shut itself down.
* The process may catch and ignore this shutdown request.
* @param pid the id of the process to shutdown
* @return true if the request was sent to the process;
* false if the process does not exist or can not be asked to shutdown.
*/
static public boolean shutdown(int pid) {
if (pureMode) {
throw new RuntimeException(LocalizedStrings.OSProcess_SHUTDOWN_NOT_ALLOWED_IN_PURE_JAVA_MODE.toLocalizedString());
} else {
checkPid(pid);
return _shutdown(pid);
}
}
static private native boolean _shutdown(int pid);
/**
* Terminate a process without warning and without a chance of an
* orderly shutdown. This method should only be used as a last resort.
* The {@link #shutdown(int)} method should be used in most cases.
* @param pid the id of the process to kill
* @return true if the process was killed;
* false if it does not exist or can not be killed.
*/
static public boolean kill(int pid) {
if (pureMode) {
throw new RuntimeException(LocalizedStrings.OSProcess_KILL_NOT_ALLOWED_IN_PURE_JAVA_MODE.toLocalizedString());
} else {
checkPid(pid);
return _kill(pid);
}
}
static private native boolean _kill(int pid);
/**
* Tells a process to print its stacks to its standard output
* @param pid the id of the process that will print its stacks, or zero for the current process
* @return true if the process was told;
* false if it does not exist or can not be told.
*/
static public boolean printStacks(int pid) {
return printStacks(pid, false);
}
/**
* Tells a process to print its stacks to its standard output or the given log writer
* @param pid the id of the process that will print its stacks, or zero for the current process
* @param useNative if true we attempt to use native code, which goes to stdout
* @return true if the process was told;
* false if it does not exist or can not be told.
*/
static public boolean printStacks(int pid, boolean useNative) {
if (pureMode || !useNative) {
if (pid > 0 && pid != myPid[0]) {
return false;
}
CharArrayWriter cw = new CharArrayWriter(50000);
PrintWriter sb = new PrintWriter(cw, true);
sb.append("\n******** full thread dump ********\n");
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
long[] threadIds = bean.getAllThreadIds();
ThreadInfo[] infos = bean.getThreadInfo(threadIds, true, true);
long thisThread = Thread.currentThread().getId();
for (int i=0; i<infos.length; i++) {
if (i != thisThread && infos[i] != null) {
formatThreadInfo(infos[i], sb);
}
}
sb.flush();
logger.warn(cw.toString());
return true;
} else {
if (pid < 0)
checkPid(pid);
return _printStacks(pid);
}
}
/** dumps this vm's stacks and returns gzipped result */
public static byte[] zipStacks() throws IOException {
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
long[] threadIds = bean.getAllThreadIds();
ThreadInfo[] infos = bean.getThreadInfo(threadIds, true, true);
long thisThread = Thread.currentThread().getId();
ByteArrayOutputStream baos = new ByteArrayOutputStream(10000);
GZIPOutputStream zipOut = new GZIPOutputStream(baos, 10000);
PrintWriter pw = new PrintWriter(zipOut, true);
for (int i=0; i<infos.length; i++) {
if (i != thisThread && infos[i] != null) {
formatThreadInfo(infos[i], pw);
}
}
pw.flush();
zipOut.close();
byte[] result = baos.toByteArray();
return result;
}
static private native boolean _printStacks(int pid);
final static int MAX_STACK_FRAMES = 75;
private static void formatThreadInfo(ThreadInfo t, PrintWriter pw) {
// this is largely copied from the JDK's ThreadInfo.java, but it limits the
// stacks to 8 elements
pw.append("\"" + t.getThreadName() + "\"" +
" tid=0x" + Long.toHexString(t.getThreadId()));
// this is in the stack trace elements so we don't need to add it
// if (t.getLockName() != null) {
// pw.append(" ");
// pw.append(StringUtils.toLowerCase(t.getThreadState().toString()));
// pw.append(" on " + t.getLockName());
// }
// priority is not known
// daemon status is not known
if (t.isSuspended()) {
pw.append(" (suspended)");
}
if (t.isInNative()) {
pw.append(" (in native)");
}
if (t.getLockOwnerName() != null) {
pw.append(" owned by \"" + t.getLockOwnerName() +
"\" tid=0x" + Long.toHexString(t.getLockOwnerId()));
}
pw.append('\n');
pw.append(" java.lang.Thread.State: " + t.getThreadState() + "\n");
int i = 0;
StackTraceElement[] stackTrace = t.getStackTrace();
for (; i < stackTrace.length && i < MAX_STACK_FRAMES; i++) {
StackTraceElement ste = stackTrace[i];
pw.append("\tat " + ste.toString());
pw.append('\n');
if (i == 0 && t.getLockInfo() != null) {
Thread.State ts = t.getThreadState();
switch (ts) {
case BLOCKED:
pw.append("\t- blocked on " + t.getLockInfo());
pw.append('\n');
break;
case WAITING:
pw.append("\t- waiting on " + t.getLockInfo());
pw.append('\n');
break;
case TIMED_WAITING:
pw.append("\t- waiting on " + t.getLockInfo());
pw.append('\n');
break;
default:
}
}
for (MonitorInfo mi : t.getLockedMonitors()) {
if (mi.getLockedStackDepth() == i) {
pw.append("\t- locked " + mi);
pw.append('\n');
}
}
}
if (i < stackTrace.length) {
pw.append("\t...");
pw.append('\n');
}
LockInfo[] locks = t.getLockedSynchronizers();
if (locks.length > 0) {
pw.append("\n\tNumber of locked synchronizers = " + locks.length);
pw.append('\n');
for (LockInfo li : locks) {
pw.append("\t- " + li);
pw.append('\n');
}
}
pw.append('\n');
}
/**
* Find out if a process exists.
* @param pid the id of the process to check for
* @return true if the process exists; false if it does not.
*/
static public boolean exists(int pid) {
if (pureMode) {
throw new RuntimeException(LocalizedStrings.OSProcess_EXISTS_NOT_ALLOWED_IN_PURE_JAVA_MODE.toLocalizedString());
}
checkPid(pid);
if (reapPid(pid)) {
try {
pids.remove(Integer.valueOf(pid));
} catch (Exception ignore) {}
String trace = System.getProperty("com.gemstone.gemfire.internal.OSProcess.trace");
if (trace != null && trace.length() > 0) {
System.out.println("reaped pid: " + pid);
}
}
return nativeExists(pid);
}
private static native boolean nativeExists(int pid);
// Private stuff
/**
* Waits for a child process to die and reaps it.
*/
static private native void waitForPid(int pid);
/**
* Waits until the identified process exits. If the process does
* not exist then returns immediately.
*/
static public void waitForPidToExit(int pid) {
if (pureMode) {
throw new RuntimeException(LocalizedStrings.OSProcess_WAITFORPIDTOEXIT_NOT_ALLOWED_IN_PURE_JAVA_MODE.toLocalizedString());
}
checkPid(pid);
waitForPid(pid);
}
/**
* Sets the current directory of this process.
* @return true if current directory was set; false if not.
*/
static public boolean setCurrentDirectory(File curDir)
{
if (pureMode) {
throw new RuntimeException(LocalizedStrings.OSProcess_SETCURRENTDIRECTORY_NOT_ALLOWED_IN_PURE_JAVA_MODE.toLocalizedString());
}
return jniSetCurDir(curDir.getAbsolutePath());
}
/**
* Returns true on success. Returns false and current directory
* is unchanged on failure.
*/
private static native boolean jniSetCurDir(String dir);
/**
* Reaps a child process if it has died.
* Does not wait for the child.
* @param pid the id of the process to reap
* @return true if it was reaped or lost (someone else reaped it);
* false if the child still exists.
* HACK: If pid is -1 then returns true if this platform needs reaping.
*/
protected static native boolean reapPid(int pid);
private static Thread reaperThread;
protected static Set pids = null;
// myPid caches result of getProcessId . To provide a stable processId
// on Linux, where processId may differ per thread, we cache the
// processId of the reaper thread .
static final int[] myPid = new int[1]; // cache of my processId
static boolean reaperStarted = false; // true if cache is valid
/** On Linux, getProcessId returns the processId of the calling thread
*/
static native int getProcessId();
static {
if (pureMode) {
//just initialize the pid cache
synchronized (myPid) {
int pid = 0;
// Windows checks have been disabled as the ManagementFactory hack
// to find the PID has been seen to work on Windows 7. Add checks
// for more specific versions of Windows if this fails on them
// if(! System.getProperty("os.name", "").startsWith("Windows")) {
String name = java.lang.management.ManagementFactory.getRuntimeMXBean().getName();
int idx = name.indexOf('@');
try {
pid = Integer.parseInt(name.substring(0,idx));
} catch(NumberFormatException nfe) {
//something changed in the RuntimeMXBean name
}
// }
myPid[0] = pid;
reaperStarted = true;
}
} else {
if (reapPid(-1)) {
pids = Collections.synchronizedSet(new HashSet());
ThreadGroup group =
LoggingThreadGroup.createThreadGroup(LocalizedStrings.OSProcess_REAPER_THREAD.toLocalizedString());
reaperThread = new Thread(group, new Runnable() {
public void run() {
synchronized (myPid) {
myPid[0] = getProcessId();
reaperStarted = true;
}
String trace = System.getProperty("com.gemstone.gemfire.internal.OSProcess.trace");
int secondsToSleep = (1000 * 60) * 1; // one minute
if (trace != null && trace.length() > 0) {
secondsToSleep = 1000; // every second
}
// reap all the pids we have every once in a while
while (true) {
SystemFailure.checkFailure();
try {
Iterator it = pids.iterator();
while (it.hasNext()) {
Object o = it.next();
int pid = ((Integer)o).intValue();
if (reapPid(pid)) {
try {
it.remove();
if (trace != null && trace.length() > 0) {
System.out.println("reaped pid: "
+ pid);
}
} catch (Exception e) {
// make sure and remove it since it was
// reaped.
pids.remove(o);
if (trace != null && trace.length() > 0) {
System.out.println("reaped pid: "
+ pid);
}
throw e;
}
}
}
Thread.sleep(secondsToSleep);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
} catch (Exception e) {
// e.printStackTrace(); // DEBUG
// ignore
}
}
}}, "osprocess reaper");
reaperThread.setDaemon(true);
reaperThread.start();
} else {
// platform does not need a reaper thread,
// just initialize the pid cache
synchronized (myPid) {
myPid[0] = getProcessId();
reaperStarted = true;
}
}
}
}
/**
* Get the vm's process id. On Linux, this returns the processId
* of the reaper thread. If we are in {@linkplain
* PureJavaMode#isPure pure Java mode}, then <code>0</code> is
* returned.
*
* @return the vm's process id.
*/
public static int getId() {
boolean done = false;
int result = -1;
for (;;) {
synchronized (myPid) {
done = reaperStarted;
result = myPid[0];
}
if (done)
break;
// wait for reaper thread to initialize myPid
try {
Thread.sleep(100);
}
catch (InterruptedException ignore) {
Thread.currentThread().interrupt();
}
}
return result;
}
public static PrintStream redirectOutput(File newOutput) throws IOException {
return redirectOutput(newOutput, true);
}
public static PrintStream redirectOutput(File newOutput, boolean setOut) throws IOException {
FileOutputStream newFileStream = null;
try {
newFileStream = new FileOutputStream(newOutput, true);
} catch (FileNotFoundException e) {
throw new IOException("File not found: " + newOutput, e);
}
final PrintStream newPrintStream = new PrintStream(new BufferedOutputStream(newFileStream, 128), true);
if (ENABLE_OUTPUT_REDIRECTION && !DISABLE_OUTPUT_REDIRECTION && setOut) {
System.setOut(newPrintStream);
if (System.err instanceof TeePrintStream) {
((TeePrintStream) System.err).getTeeOutputStream().setBranchOutputStream(new BufferedOutputStream(newFileStream, 128));
} else {
System.setErr(newPrintStream);
}
if (!pureMode) {
redirectCOutput(newOutput.getPath());
}
}
assert newPrintStream != null;
return newPrintStream;
}
private static native void redirectCOutput(String file);
/**
* Registers a signal handler for SIGQUIT on UNIX platforms.
*/
private static native void registerSigQuitHandler();
}
| apache-2.0 |
tarikgwa/test | html/dev/tests/functional/tests/app/Magento/Store/Test/Fixture/Store/GroupId.php | 1334 | <?php
/**
* Copyright © 2015 Magento. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\Store\Test\Fixture\Store;
use Magento\Mtf\Fixture\DataSource;
use Magento\Mtf\Fixture\FixtureFactory;
use Magento\Store\Test\Fixture\StoreGroup;
/**
* Prepare StoreGroup for Store.
*/
class GroupId extends DataSource
{
/**
* StoreGroup fixture.
*
* @var StoreGroup
*/
protected $storeGroup;
/**
* @constructor
* @param FixtureFactory $fixtureFactory
* @param array $params
* @param array $data [optional]
*/
public function __construct(FixtureFactory $fixtureFactory, array $params, array $data = [])
{
$this->params = $params;
if (isset($data['dataset'])) {
$storeGroup = $fixtureFactory->createByCode('storeGroup', ['dataset' => $data['dataset']]);
/** @var StoreGroup $storeGroup */
if (!$storeGroup->getGroupId()) {
$storeGroup->persist();
}
$this->storeGroup = $storeGroup;
$this->data = $storeGroup->getWebsiteId() . "/" . $storeGroup->getName();
}
}
/**
* Return StoreGroup fixture
*
* @return StoreGroup
*/
public function getStoreGroup()
{
return $this->storeGroup;
}
}
| apache-2.0 |
flingone/frameworks_base_cmds_remoted | libs/boost/libs/iterator/test/minimum_category.cpp | 993 | // Copyright Andrey Semashev 2014.
//
// Use, modification and distribution is subject to
// the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/iterator/minimum_category.hpp>
#include <boost/core/lightweight_test_trait.hpp>
#include <boost/type_traits/is_same.hpp>
#include <iterator>
using boost::is_same;
using boost::iterators::minimum_category;
int main(int, char*[])
{
BOOST_TEST_TRAIT_TRUE((is_same<minimum_category<std::forward_iterator_tag, std::random_access_iterator_tag>::type, std::forward_iterator_tag>));
BOOST_TEST_TRAIT_TRUE((is_same<minimum_category<std::random_access_iterator_tag, std::forward_iterator_tag>::type, std::forward_iterator_tag>));
BOOST_TEST_TRAIT_TRUE((is_same<minimum_category<std::random_access_iterator_tag, std::random_access_iterator_tag>::type, std::random_access_iterator_tag>));
return boost::report_errors();
}
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.