max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
348 |
<filename>docs/data/leg-t2/077/07710479.json
{"nom":"Vaires-sur-Marne","circ":"10ème circonscription","dpt":"Seine-et-Marne","inscrits":8559,"abs":5091,"votants":3468,"blancs":243,"nuls":113,"exp":3112,"res":[{"nuance":"REM","nom":"<NAME>","voix":1862},{"nuance":"FI","nom":"<NAME>","voix":1250}]}
| 124 |
353 |
package org.nutz.nop.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.util.Date;
import org.junit.Test;
import org.nutz.http.Request.METHOD;
import org.nutz.http.Response;
import org.nutz.json.Json;
import org.nutz.json.JsonFormat;
import org.nutz.lang.Lang;
import org.nutz.lang.Times;
import org.nutz.lang.random.R;
import org.nutz.lang.util.NutMap;
import org.nutz.plugins.nop.client.NOPRequest;
public class NOPPOST extends Base {
@Test
public void simple() {
Response response = client.send(NOPRequest.create("/post/simple", METHOD.POST));
if (response.isOK()) {
System.err.println(response.getContent());
assertNotNull(response.getContent());
}
}
@Test
public void args() {
int i = R.random(0, 100);
String s = R.sg(10).next() + "中文";
Date d = Times.now();
Response response = client.send(NOPRequest.create("/post/args", METHOD.POST, NutMap.NEW().addv("i", i).addv("s", s).addv("d", Times.format("yyyy-MM-dd HH:mm:ss", d))));
if (response.isOK()) {
NutMap data = Lang.map(response.getContent());
System.err.println(data);
assertEquals(i, data.getInt("i"));
assertEquals(s, data.getString("s"));
assertEquals(Times.format("yyyy-MM-dd HH:mm:ss", d), Times.format("yyyy-MM-dd HH:mm:ss", data.getTime("d")));
}
}
@Test
public void array() {
Integer[] ids = Lang.array(R.random(0, 100), R.random(0, 100), R.random(0, 100), R.random(0, 100));
Response response = client.send(NOPRequest.create("/post/array", METHOD.POST, NutMap.NEW().addv("ids", ids)));
if (response.isOK()) {
NutMap data = Lang.map(response.getContent());
System.err.println(data);
assertEquals(ids.length, data.getArray("ids", Integer.class).length);
}
}
@Test
public void object() {
NutMap data = NutMap.NEW().addv("id", 1).addv("name", "Kerbores").addv("birth", Times.now());
Response response = client.send(NOPRequest.create("/post/object", METHOD.POST, NutMap.NEW().addv("n", Json.toJson(data, JsonFormat.compact()))));
if (response.isOK()) {
NutMap temp = Lang.map(response.getContent());
System.err.println(temp);
}
}
@Test
public void body() {
NutMap data = NutMap.NEW().addv("id", 1).addv("name", "Kerbores").addv("birth", Times.now());
Response response = client.send(NOPRequest.create("/post/body", METHOD.POST).setData(Json.toJson(data, JsonFormat.compact())));
if (response.isOK()) {
NutMap temp = Lang.map(response.getContent());
System.err.println(temp);
}
}
}
| 1,069 |
3,897 |
<reponame>adelcrosge1/mbed-os
/*
* Copyright (c) 2017, Arm Limited and affiliates.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SARA4_PPP.h"
#include "SARA4_PPP_CellularNetwork.h"
#include "CellularUtil.h"
#include "CellularLog.h"
using namespace mbed;
using namespace events;
using namespace mbed_cellular_util;
static const intptr_t cellular_properties[AT_CellularDevice::PROPERTY_MAX] = {
AT_CellularNetwork::RegistrationModeLAC, // C_EREG
AT_CellularNetwork::RegistrationModeLAC, // C_GREG
AT_CellularNetwork::RegistrationModeDisable, // C_REG
0, // AT_CGSN_WITH_TYPE
0, // AT_CGDATA
1, // AT_CGAUTH
1, // AT_CNMI
1, // AT_CSMP
1, // AT_CMGF
1, // AT_CSDH
0, // PROPERTY_IPV4_STACK
0, // PROPERTY_IPV6_STACK
1, // PROPERTY_IPV4V6_STACK
0, // PROPERTY_NON_IP_PDP_TYPE
1, // PROPERTY_AT_CGEREP
1, // PROPERTY_AT_COPS_FALLBACK_AUTO
0, // PROPERTY_SOCKET_COUNT
0, // PROPERTY_IP_TCP
0, // PROPERTY_IP_UDP
0, // PROPERTY_AT_SEND_DELAY
};
SARA4_PPP::SARA4_PPP(FileHandle *fh) : AT_CellularDevice(fh)
{
set_cellular_properties(cellular_properties);
}
AT_CellularNetwork *SARA4_PPP::open_network_impl(ATHandler &at)
{
return new SARA4_PPP_CellularNetwork(at, *this);
}
nsapi_error_t SARA4_PPP::set_power_save_mode(int periodic_time, int active_time)
{
_at.lock();
if (periodic_time == 0 && active_time == 0) {
// disable PSM
_at.at_cmd_discard("+CPSMS", "=0");
} else {
const int PSMTimerBits = 5;
/**
Table 10.5.163a/3GPP TS 24.008: GPRS Timer 3 information element
Bits 5 to 1 represent the binary coded timer value.
Bits 6 to 8 defines the timer value unit for the GPRS timer as follows:
8 7 6
0 0 0 value is incremented in multiples of 10 minutes
0 0 1 value is incremented in multiples of 1 hour
0 1 0 value is incremented in multiples of 10 hours
0 1 1 value is incremented in multiples of 2 seconds
1 0 0 value is incremented in multiples of 30 seconds
1 0 1 value is incremented in multiples of 1 minute
1 1 0 value is incremented in multiples of 320 hours (NOTE 1)
1 1 1 value indicates that the timer is deactivated (NOTE 2).
*/
char pt[8 + 1]; // timer value encoded as 3GPP IE
const int ie_value_max = 0x1f;
uint32_t periodic_timer = 0;
if (periodic_time <= 2 * ie_value_max) { // multiples of 2 seconds
periodic_timer = periodic_time / 2;
strcpy(pt, "01100000");
} else {
if (periodic_time <= 30 * ie_value_max) { // multiples of 30 seconds
periodic_timer = periodic_time / 30;
strcpy(pt, "10000000");
} else {
if (periodic_time <= 60 * ie_value_max) { // multiples of 1 minute
periodic_timer = periodic_time / 60;
strcpy(pt, "10100000");
} else {
if (periodic_time <= 10 * 60 * ie_value_max) { // multiples of 10 minutes
periodic_timer = periodic_time / (10 * 60);
strcpy(pt, "00000000");
} else {
if (periodic_time <= 60 * 60 * ie_value_max) { // multiples of 1 hour
periodic_timer = periodic_time / (60 * 60);
strcpy(pt, "00100000");
} else {
if (periodic_time <= 10 * 60 * 60 * ie_value_max) { // multiples of 10 hours
periodic_timer = periodic_time / (10 * 60 * 60);
strcpy(pt, "01000000");
} else { // multiples of 320 hours
int t = periodic_time / (320 * 60 * 60);
if (t > ie_value_max) {
t = ie_value_max;
}
periodic_timer = t;
strcpy(pt, "11000000");
}
}
}
}
}
}
uint_to_binary_str(periodic_timer, &pt[3], sizeof(pt) - 3, PSMTimerBits);
pt[8] = '\0';
/**
Table 10.5.172/3GPP TS 24.008: GPRS Timer information element
Bits 5 to 1 represent the binary coded timer value.
Bits 6 to 8 defines the timer value unit for the GPRS timer as follows:
8 7 6
0 0 0 value is incremented in multiples of 2 seconds
0 0 1 value is incremented in multiples of 1 minute
0 1 0 value is incremented in multiples of decihours
1 1 1 value indicates that the timer is deactivated.
Other values shall be interpreted as multiples of 1 minute in this version of the protocol.
*/
char at[8 + 1];
uint32_t active_timer; // timer value encoded as 3GPP IE
if (active_time <= 2 * ie_value_max) { // multiples of 2 seconds
active_timer = active_time / 2;
strcpy(at, "00000000");
} else {
if (active_time <= 60 * ie_value_max) { // multiples of 1 minute
active_timer = (1 << 5) | (active_time / 60);
strcpy(at, "00100000");
} else { // multiples of decihours
int t = active_time / (6 * 60);
if (t > ie_value_max) {
t = ie_value_max;
}
active_timer = t;
strcpy(at, "01000000");
}
}
uint_to_binary_str(active_timer, &at[3], sizeof(at) - 3, PSMTimerBits);
at[8] = '\0';
// request for both GPRS and LTE
_at.at_cmd_discard("+CPSMS", "=1,,,", "%s%s", pt, at);
if (_at.get_last_error() != NSAPI_ERROR_OK) {
tr_warn("Power save mode not enabled!");
} else {
// network may not agree with power save options but
// that should be fine as timeout is not longer than requested
}
}
return _at.unlock_return_error();
}
#if MBED_CONF_SARA4_PPP_PROVIDE_DEFAULT
#include "drivers/BufferedSerial.h"
CellularDevice *CellularDevice::get_default_instance()
{
static BufferedSerial serial(MBED_CONF_SARA4_PPP_TX, MBED_CONF_SARA4_PPP_RX, MBED_CONF_SARA4_PPP_BAUDRATE);
#if defined (MBED_CONF_SARA4_PPP_RTS) && defined (MBED_CONF_SARA4_PPP_CTS)
tr_debug("SARA4_PPP flow control: RTS %d CTS %d", MBED_CONF_SARA4_PPP_RTS, MBED_CONF_SARA4_PPP_CTS);
serial.set_flow_control(SerialBase::RTSCTS, MBED_CONF_SARA4_PPP_RTS, MBED_CONF_SARA4_PPP_CTS);
#endif
static SARA4_PPP device(&serial);
return &device;
}
#endif
| 3,648 |
14,668 |
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromeos/dbus/cups_proxy/fake_cups_proxy_client.h"
#include <utility>
#include "base/bind.h"
#include "base/callback.h"
#include "base/threading/thread_task_runner_handle.h"
namespace chromeos {
FakeCupsProxyClient::FakeCupsProxyClient() = default;
FakeCupsProxyClient::~FakeCupsProxyClient() = default;
void FakeCupsProxyClient::WaitForServiceToBeAvailable(
dbus::ObjectProxy::WaitForServiceToBeAvailableCallback callback) {
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), true));
}
void FakeCupsProxyClient::BootstrapMojoConnection(
base::ScopedFD fd,
base::OnceCallback<void(bool success)> result_callback) {
const bool success = true;
std::move(result_callback).Run(success);
}
} // namespace chromeos
| 313 |
679 |
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#include "precompiled_dbaccess.hxx"
#include "recovery/dbdocrecovery.hxx"
#include "sdbcoretools.hxx"
#include "storagetextstream.hxx"
#include "subcomponentrecovery.hxx"
#include "subcomponents.hxx"
#include "dbastrings.hrc"
/** === begin UNO includes === **/
#include <com/sun/star/sdb/application/XDatabaseDocumentUI.hpp>
#include <com/sun/star/embed/ElementModes.hpp>
#include <com/sun/star/document/XStorageBasedDocument.hpp>
#include <com/sun/star/io/XTextOutputStream.hpp>
#include <com/sun/star/io/XTextInputStream.hpp>
#include <com/sun/star/io/XActiveDataSource.hpp>
#include <com/sun/star/io/XActiveDataSink.hpp>
#include <com/sun/star/util/XModifiable.hpp>
#include <com/sun/star/beans/XPropertySet.hpp>
/** === end UNO includes === **/
#include <comphelper/componentcontext.hxx>
#include <comphelper/namedvaluecollection.hxx>
#include <rtl/ustrbuf.hxx>
#include <tools/diagnose_ex.h>
#include <algorithm>
//........................................................................
namespace dbaccess
{
//........................................................................
/** === begin UNO using === **/
using ::com::sun::star::uno::Reference;
using ::com::sun::star::uno::XInterface;
using ::com::sun::star::uno::UNO_QUERY;
using ::com::sun::star::uno::UNO_QUERY_THROW;
using ::com::sun::star::uno::UNO_SET_THROW;
using ::com::sun::star::uno::Exception;
using ::com::sun::star::uno::RuntimeException;
using ::com::sun::star::uno::Any;
using ::com::sun::star::uno::makeAny;
using ::com::sun::star::uno::Sequence;
using ::com::sun::star::uno::Type;
using ::com::sun::star::embed::XStorage;
using ::com::sun::star::frame::XController;
using ::com::sun::star::sdb::application::XDatabaseDocumentUI;
using ::com::sun::star::lang::XComponent;
using ::com::sun::star::document::XStorageBasedDocument;
using ::com::sun::star::beans::PropertyValue;
using ::com::sun::star::io::XStream;
using ::com::sun::star::io::XTextOutputStream;
using ::com::sun::star::io::XActiveDataSource;
using ::com::sun::star::io::XTextInputStream;
using ::com::sun::star::io::XActiveDataSink;
using ::com::sun::star::frame::XModel;
using ::com::sun::star::util::XModifiable;
using ::com::sun::star::beans::XPropertySet;
using ::com::sun::star::lang::XMultiServiceFactory;
/** === end UNO using === **/
namespace ElementModes = ::com::sun::star::embed::ElementModes;
//====================================================================
//= helpers
//====================================================================
namespace
{
// .........................................................................
static void lcl_getPersistentRepresentation( const MapStringToCompDesc::value_type& i_rComponentDesc, ::rtl::OUStringBuffer& o_rBuffer )
{
o_rBuffer.append( i_rComponentDesc.first );
o_rBuffer.append( sal_Unicode( '=' ) );
o_rBuffer.append( i_rComponentDesc.second.sName );
o_rBuffer.append( sal_Unicode( ',' ) );
o_rBuffer.append( sal_Unicode( i_rComponentDesc.second.bForEditing ? '1' : '0' ) );
}
// .........................................................................
static bool lcl_extractCompDesc( const ::rtl::OUString& i_rIniLine, ::rtl::OUString& o_rStorName, SubComponentDescriptor& o_rCompDesc )
{
const sal_Int32 nEqualSignPos = i_rIniLine.indexOf( sal_Unicode( '=' ) );
if ( nEqualSignPos < 1 )
{
OSL_ENSURE( false, "lcl_extractCompDesc: invalid map file entry - unexpected pos of '='" );
return false;
}
o_rStorName = i_rIniLine.copy( 0, nEqualSignPos );
const sal_Int32 nCommaPos = i_rIniLine.lastIndexOf( sal_Unicode( ',' ) );
if ( nCommaPos != i_rIniLine.getLength() - 2 )
{
OSL_ENSURE( false, "lcl_extractCompDesc: invalid map file entry - unexpected pos of ','" );
return false;
}
o_rCompDesc.sName = i_rIniLine.copy( nEqualSignPos + 1, nCommaPos - nEqualSignPos - 1 );
o_rCompDesc.bForEditing = ( i_rIniLine.getStr()[ nCommaPos + 1 ] == '1' );
return true;
}
// .........................................................................
static const ::rtl::OUString& lcl_getRecoveryDataSubStorageName()
{
static const ::rtl::OUString s_sRecDataStorName( RTL_CONSTASCII_USTRINGPARAM( "recovery" ) );
return s_sRecDataStorName;
}
// .........................................................................
static const ::rtl::OUString& lcl_getObjectMapStreamName()
{
static const ::rtl::OUString s_sObjectMapStreamName( RTL_CONSTASCII_USTRINGPARAM( "storage-component-map.ini" ) );
return s_sObjectMapStreamName;
}
// .........................................................................
static const ::rtl::OUString& lcl_getMapStreamEncodingName()
{
static const ::rtl::OUString s_sMapStreamEncodingName( RTL_CONSTASCII_USTRINGPARAM( "UTF-8" ) );
return s_sMapStreamEncodingName;
}
// .........................................................................
static void lcl_writeObjectMap_throw( const ::comphelper::ComponentContext& i_rContext, const Reference< XStorage >& i_rStorage,
const MapStringToCompDesc& i_mapStorageToCompDesc )
{
if ( i_mapStorageToCompDesc.empty() )
// nothing to do
return;
StorageTextOutputStream aTextOutput( i_rContext, i_rStorage, lcl_getObjectMapStreamName() );
aTextOutput.writeLine( ::rtl::OUString( RTL_CONSTASCII_USTRINGPARAM( "[storages]" ) ) );
for ( MapStringToCompDesc::const_iterator stor = i_mapStorageToCompDesc.begin();
stor != i_mapStorageToCompDesc.end();
++stor
)
{
::rtl::OUStringBuffer aLine;
lcl_getPersistentRepresentation( *stor, aLine );
aTextOutput.writeLine( aLine.makeStringAndClear() );
}
aTextOutput.writeLine();
}
// .........................................................................
static bool lcl_isSectionStart( const ::rtl::OUString& i_rIniLine, ::rtl::OUString& o_rSectionName )
{
const sal_Int32 nLen = i_rIniLine.getLength();
if ( ( nLen > 0 ) && ( i_rIniLine.getStr()[0] == '[' ) && ( i_rIniLine.getStr()[ nLen - 1 ] == ']' ) )
{
o_rSectionName = i_rIniLine.copy( 1, nLen -2 );
return true;
}
return false;
}
// .........................................................................
static void lcl_stripTrailingLineFeed( ::rtl::OUString& io_rLine )
{
const sal_Int32 nLen = io_rLine.getLength();
if ( ( nLen > 0 ) && ( io_rLine.getStr()[ nLen - 1 ] == '\n' ) )
io_rLine = io_rLine.copy( 0, nLen - 1 );
}
// .........................................................................
static void lcl_readObjectMap_throw( const ::comphelper::ComponentContext& i_rContext, const Reference< XStorage >& i_rStorage,
MapStringToCompDesc& o_mapStorageToObjectName )
{
ENSURE_OR_THROW( i_rStorage.is(), "invalid storage" );
if ( !i_rStorage->hasByName( lcl_getObjectMapStreamName() ) )
{ // nothing to do, though suspicious
OSL_ENSURE( false, "lcl_readObjectMap_throw: if there's no map file, then there's expected to be no storage, too!" );
return;
}
Reference< XStream > xIniStream( i_rStorage->openStreamElement(
lcl_getObjectMapStreamName(), ElementModes::READ ), UNO_SET_THROW );
Reference< XTextInputStream > xTextInput( i_rContext.createComponent( "com.sun.star.io.TextInputStream" ), UNO_QUERY_THROW );
xTextInput->setEncoding( lcl_getMapStreamEncodingName() );
Reference< XActiveDataSink > xDataSink( xTextInput, UNO_QUERY_THROW );
xDataSink->setInputStream( xIniStream->getInputStream() );
::rtl::OUString sCurrentSection;
bool bCurrentSectionIsKnownToBeUnsupported = true;
while ( !xTextInput->isEOF() )
{
::rtl::OUString sLine = xTextInput->readLine();
lcl_stripTrailingLineFeed( sLine );
if ( sLine.getLength() == 0 )
continue;
if ( lcl_isSectionStart( sLine, sCurrentSection ) )
{
bCurrentSectionIsKnownToBeUnsupported = false;
continue;
}
if ( bCurrentSectionIsKnownToBeUnsupported )
continue;
// the only section we support so far is "storages"
if ( !sCurrentSection.equalsAscii( "storages" ) )
{
bCurrentSectionIsKnownToBeUnsupported = true;
continue;
}
::rtl::OUString sStorageName;
SubComponentDescriptor aCompDesc;
if ( !lcl_extractCompDesc( sLine, sStorageName, aCompDesc ) )
continue;
o_mapStorageToObjectName[ sStorageName ] = aCompDesc;
}
}
// .........................................................................
static void lcl_markModified( const Reference< XComponent >& i_rSubComponent )
{
const Reference< XModifiable > xModify( i_rSubComponent, UNO_QUERY );
if ( !xModify.is() )
{
OSL_ENSURE( false, "lcl_markModified: unhandled case!" );
return;
}
xModify->setModified( sal_True );
}
}
//====================================================================
//= DatabaseDocumentRecovery_Data
//====================================================================
struct DBACCESS_DLLPRIVATE DatabaseDocumentRecovery_Data
{
const ::comphelper::ComponentContext aContext;
DatabaseDocumentRecovery_Data( const ::comphelper::ComponentContext& i_rContext )
:aContext( i_rContext )
{
}
};
//====================================================================
//= DatabaseDocumentRecovery
//====================================================================
//--------------------------------------------------------------------
DatabaseDocumentRecovery::DatabaseDocumentRecovery( const ::comphelper::ComponentContext& i_rContext )
:m_pData( new DatabaseDocumentRecovery_Data( i_rContext ) )
{
}
//--------------------------------------------------------------------
DatabaseDocumentRecovery::~DatabaseDocumentRecovery()
{
}
//--------------------------------------------------------------------
void DatabaseDocumentRecovery::saveModifiedSubComponents( const Reference< XStorage >& i_rTargetStorage,
const ::std::vector< Reference< XController > >& i_rControllers )
{
ENSURE_OR_THROW( i_rTargetStorage.is(), "invalid document storage" );
// create a sub storage for recovery data
if ( i_rTargetStorage->hasByName( lcl_getRecoveryDataSubStorageName() ) )
i_rTargetStorage->removeElement( lcl_getRecoveryDataSubStorageName() );
Reference< XStorage > xRecoveryStorage = i_rTargetStorage->openStorageElement( lcl_getRecoveryDataSubStorageName(), ElementModes::READWRITE );
// store recovery data for open sub components of the given controller(s)
if ( !i_rControllers.empty() )
{
ENSURE_OR_THROW( i_rControllers.size() == 1, "can't handle more than one controller" );
// At the moment, there can be only one view to a database document. If we ever allow for more than this,
// then we need a concept for sub documents opened from different controllers (i.e. two document views,
// and the user opens the very same form in both views). And depending on this, we need a concept for
// how those are saved to the recovery file.
MapCompTypeToCompDescs aMapCompDescs;
for ( ::std::vector< Reference< XController > >::const_iterator ctrl = i_rControllers.begin();
ctrl != i_rControllers.end();
++ctrl
)
{
Reference< XDatabaseDocumentUI > xDatabaseUI( *ctrl, UNO_QUERY_THROW );
Sequence< Reference< XComponent > > aComponents( xDatabaseUI->getSubComponents() );
const Reference< XComponent >* component = aComponents.getConstArray();
const Reference< XComponent >* componentEnd = aComponents.getConstArray() + aComponents.getLength();
for ( ; component != componentEnd; ++component )
{
SubComponentRecovery aComponentRecovery( m_pData->aContext, xDatabaseUI, *component );
aComponentRecovery.saveToRecoveryStorage( xRecoveryStorage, aMapCompDescs );
}
}
for ( MapCompTypeToCompDescs::const_iterator map = aMapCompDescs.begin();
map != aMapCompDescs.end();
++map
)
{
Reference< XStorage > xComponentsStor( xRecoveryStorage->openStorageElement(
SubComponentRecovery::getComponentsStorageName( map->first ), ElementModes::WRITE | ElementModes::NOCREATE ) );
lcl_writeObjectMap_throw( m_pData->aContext, xComponentsStor, map->second );
tools::stor::commitStorageIfWriteable( xComponentsStor );
}
}
// commit the recovery storage
tools::stor::commitStorageIfWriteable( xRecoveryStorage );
}
//--------------------------------------------------------------------
void DatabaseDocumentRecovery::recoverSubDocuments( const Reference< XStorage >& i_rDocumentStorage,
const Reference< XController >& i_rTargetController )
{
ENSURE_OR_THROW( i_rDocumentStorage.is(), "illegal document storage" );
Reference< XDatabaseDocumentUI > xDocumentUI( i_rTargetController, UNO_QUERY_THROW );
if ( !i_rDocumentStorage->hasByName( lcl_getRecoveryDataSubStorageName() ) )
// that's allowed
return;
// the "recovery" sub storage
Reference< XStorage > xRecoveryStorage = i_rDocumentStorage->openStorageElement( lcl_getRecoveryDataSubStorageName(), ElementModes::READ );
// read the map from sub storages to object names
MapCompTypeToCompDescs aMapCompDescs;
SubComponentType aKnownTypes[] = { TABLE, QUERY, FORM, REPORT, RELATION_DESIGN };
for ( size_t i = 0; i < sizeof( aKnownTypes ) / sizeof( aKnownTypes[0] ); ++i )
{
if ( !xRecoveryStorage->hasByName( SubComponentRecovery::getComponentsStorageName( aKnownTypes[i] ) ) )
continue;
Reference< XStorage > xComponentsStor( xRecoveryStorage->openStorageElement(
SubComponentRecovery::getComponentsStorageName( aKnownTypes[i] ), ElementModes::READ ) );
lcl_readObjectMap_throw( m_pData->aContext, xComponentsStor, aMapCompDescs[ aKnownTypes[i] ] );
xComponentsStor->dispose();
}
// recover all sub components as indicated by the map
for ( MapCompTypeToCompDescs::const_iterator map = aMapCompDescs.begin();
map != aMapCompDescs.end();
++map
)
{
const SubComponentType eComponentType = map->first;
// the storage for all components of the current type
Reference< XStorage > xComponentsStor( xRecoveryStorage->openStorageElement(
SubComponentRecovery::getComponentsStorageName( eComponentType ), ElementModes::READ ), UNO_QUERY_THROW );
// loop through all components of this type
for ( MapStringToCompDesc::const_iterator stor = map->second.begin();
stor != map->second.end();
++stor
)
{
const ::rtl::OUString sComponentName( stor->second.sName );
if ( !xComponentsStor->hasByName( stor->first ) )
{
#if OSL_DEBUG_LEVEL > 0
::rtl::OStringBuffer message;
message.append( "DatabaseDocumentRecovery::recoverSubDocuments: inconsistent recovery storage: storage '" );
message.append( ::rtl::OUStringToOString( stor->first, RTL_TEXTENCODING_ASCII_US ) );
message.append( "' not found in '" );
message.append( ::rtl::OUStringToOString( SubComponentRecovery::getComponentsStorageName( eComponentType ), RTL_TEXTENCODING_ASCII_US ) );
message.append( "', but required per map file!" );
OSL_ENSURE( false, message.getStr() );
#endif
continue;
}
// the controller needs to have a connection to be able to open sub components
if ( !xDocumentUI->isConnected() )
xDocumentUI->connect();
// recover the single component
Reference< XStorage > xCompStor( xComponentsStor->openStorageElement( stor->first, ElementModes::READ ) );
SubComponentRecovery aComponentRecovery( m_pData->aContext, xDocumentUI, eComponentType );
Reference< XComponent > xSubComponent( aComponentRecovery.recoverFromStorage( xCompStor, sComponentName, stor->second.bForEditing ) );
// at the moment, we only store, during session save, sub components which are modified. So, set this
// recovered sub component to "modified", too.
lcl_markModified( xSubComponent );
}
xComponentsStor->dispose();
}
xRecoveryStorage->dispose();
// now that we successfully recovered, removed the "recovery" sub storage
try
{
i_rDocumentStorage->removeElement( lcl_getRecoveryDataSubStorageName() );
}
catch( const Exception& )
{
DBG_UNHANDLED_EXCEPTION();
}
}
//........................................................................
} // namespace dbaccess
//........................................................................
| 8,135 |
30,023 |
<gh_stars>1000+
"""Test the init functions for AEH."""
from datetime import timedelta
import logging
from unittest.mock import patch
from azure.eventhub.exceptions import EventHubError
import pytest
from homeassistant.components import azure_event_hub
from homeassistant.components.azure_event_hub.const import CONF_SEND_INTERVAL, DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import STATE_ON
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from .conftest import FilterTest
from .const import AZURE_EVENT_HUB_PATH, BASIC_OPTIONS, CS_CONFIG_FULL, SAS_CONFIG_FULL
from tests.common import MockConfigEntry, async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
async def test_import(hass):
"""Test the popping of the filter and further import of the config."""
config = {
DOMAIN: {
"send_interval": 10,
"max_delay": 10,
"filter": {
"include_domains": ["light"],
"include_entity_globs": ["sensor.included_*"],
"include_entities": ["binary_sensor.included"],
"exclude_domains": ["light"],
"exclude_entity_globs": ["sensor.excluded_*"],
"exclude_entities": ["binary_sensor.excluded"],
},
}
}
config[DOMAIN].update(CS_CONFIG_FULL)
assert await async_setup_component(hass, DOMAIN, config)
async def test_filter_only_config(hass):
"""Test the popping of the filter and further import of the config."""
config = {
DOMAIN: {
"filter": {
"include_domains": ["light"],
"include_entity_globs": ["sensor.included_*"],
"include_entities": ["binary_sensor.included"],
"exclude_domains": ["light"],
"exclude_entity_globs": ["sensor.excluded_*"],
"exclude_entities": ["binary_sensor.excluded"],
},
}
}
assert await async_setup_component(hass, DOMAIN, config)
async def test_unload_entry(hass, entry, mock_create_batch):
"""Test being able to unload an entry.
Queue should be empty, so adding events to the batch should not be called,
this verifies that the unload, calls async_stop, which calls async_send and
shuts down the hub.
"""
assert await hass.config_entries.async_unload(entry.entry_id)
mock_create_batch.add.assert_not_called()
assert entry.state == ConfigEntryState.NOT_LOADED
async def test_failed_test_connection(hass, mock_get_eventhub_properties):
"""Test being able to unload an entry."""
entry = MockConfigEntry(
domain=azure_event_hub.DOMAIN,
data=SAS_CONFIG_FULL,
title="test-instance",
options=BASIC_OPTIONS,
)
entry.add_to_hass(hass)
mock_get_eventhub_properties.side_effect = EventHubError("Test")
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ConfigEntryState.SETUP_RETRY
async def test_send_batch_error(hass, entry_with_one_event, mock_send_batch):
"""Test a error in send_batch, including recovering at the next interval."""
mock_send_batch.reset_mock()
mock_send_batch.side_effect = [EventHubError("Test"), None]
async_fire_time_changed(
hass,
utcnow() + timedelta(seconds=entry_with_one_event.options[CONF_SEND_INTERVAL]),
)
await hass.async_block_till_done()
mock_send_batch.assert_called_once()
mock_send_batch.reset_mock()
hass.states.async_set("sensor.test2", STATE_ON)
async_fire_time_changed(
hass,
utcnow() + timedelta(seconds=entry_with_one_event.options[CONF_SEND_INTERVAL]),
)
await hass.async_block_till_done()
mock_send_batch.assert_called_once()
async def test_late_event(hass, entry_with_one_event, mock_create_batch):
"""Test the check on late events."""
with patch(
f"{AZURE_EVENT_HUB_PATH}.utcnow",
return_value=utcnow() + timedelta(hours=1),
):
async_fire_time_changed(
hass,
utcnow()
+ timedelta(seconds=entry_with_one_event.options[CONF_SEND_INTERVAL]),
)
await hass.async_block_till_done()
mock_create_batch.add.assert_not_called()
async def test_full_batch(hass, entry_with_one_event, mock_create_batch):
"""Test the full batch behaviour."""
mock_create_batch.add.side_effect = [ValueError, None]
async_fire_time_changed(
hass,
utcnow() + timedelta(seconds=entry_with_one_event.options[CONF_SEND_INTERVAL]),
)
await hass.async_block_till_done()
assert mock_create_batch.add.call_count == 2
@pytest.mark.parametrize(
"filter_schema, tests",
[
(
{
"include_domains": ["light"],
"include_entity_globs": ["sensor.included_*"],
"include_entities": ["binary_sensor.included"],
},
[
FilterTest("climate.excluded", 0),
FilterTest("light.included", 1),
FilterTest("sensor.excluded_test", 0),
FilterTest("sensor.included_test", 1),
FilterTest("binary_sensor.included", 1),
FilterTest("binary_sensor.excluded", 0),
],
),
(
{
"exclude_domains": ["climate"],
"exclude_entity_globs": ["sensor.excluded_*"],
"exclude_entities": ["binary_sensor.excluded"],
},
[
FilterTest("climate.excluded", 0),
FilterTest("light.included", 1),
FilterTest("sensor.excluded_test", 0),
FilterTest("sensor.included_test", 1),
FilterTest("binary_sensor.included", 1),
FilterTest("binary_sensor.excluded", 0),
],
),
(
{
"include_domains": ["light"],
"include_entity_globs": ["*.included_*"],
"exclude_domains": ["climate"],
"exclude_entity_globs": ["*.excluded_*"],
"exclude_entities": ["light.excluded"],
},
[
FilterTest("light.included", 1),
FilterTest("light.excluded_test", 0),
FilterTest("light.excluded", 0),
FilterTest("sensor.included_test", 1),
FilterTest("climate.included_test", 0),
],
),
(
{
"include_entities": ["climate.included", "sensor.excluded_test"],
"exclude_domains": ["climate"],
"exclude_entity_globs": ["*.excluded_*"],
"exclude_entities": ["light.excluded"],
},
[
FilterTest("climate.excluded", 0),
FilterTest("climate.included", 1),
FilterTest("switch.excluded_test", 0),
FilterTest("sensor.excluded_test", 1),
FilterTest("light.excluded", 0),
FilterTest("light.included", 1),
],
),
],
ids=["allowlist", "denylist", "filtered_allowlist", "filtered_denylist"],
)
async def test_filter(hass, entry, tests, mock_create_batch):
"""Test different filters.
Filter_schema is also a fixture which is replaced by the filter_schema
in the parametrize and added to the entry fixture.
"""
for test in tests:
hass.states.async_set(test.entity_id, STATE_ON)
async_fire_time_changed(
hass, utcnow() + timedelta(seconds=entry.options[CONF_SEND_INTERVAL])
)
await hass.async_block_till_done()
assert mock_create_batch.add.call_count == test.expected_count
mock_create_batch.add.reset_mock()
| 3,650 |
5,169 |
{
"name": "NearbyObjects",
"version": "1.0.0",
"summary": "Greate library for searching objects along polyline.",
"description": "NearbyObjects helps you to find objects along MKPolyline with great performance.",
"homepage": "https://github.com/RomanIvaniv/NearbyObjects",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/RomanIvaniv/NearbyObjects.git",
"tag": "1.0.0"
},
"source_files": "NearbyObjects/**/*",
"pushed_with_swift_version": "4.0"
}
| 222 |
2,603 |
<filename>FreeRTOS/Demo/T-HEAD_CB2201_CDK/csi/csi_driver/csky/hobbit3/devices.c
/*
* Copyright (C) 2017 C-SKY Microsystems Co., Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file devices.c
* @brief source file for the devices
* @version V1.0
* @date 02. June 2017
******************************************************************************/
#include <soc.h>
#include <config.h>
#include <drv_usart.h>
#include <stdio.h>
#include <drv_timer.h>
#include <drv_rtc.h>
#include <drv_trng.h>
#include <drv_crc.h>
#include <drv_aes.h>
#include <drv_rsa.h>
#include <drv_eflash.h>
#include <drv_spi.h>
#include <drv_gpio.h>
#include "pin_name.h"
#include "pinmux.h"
//typedef int32_t int32_t;
#define readl(addr) \
({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
#define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#if 0
struct {
uint32_t base;
uint32_t irq;
}
const sg_usi_config[CONFIG_USI_NUM] = {
{CSKY_USI0_BASE, USI0_IRQn},
{CSKY_USI1_BASE, USI1_IRQn},
};
typedef struct {
int32_t sclk;
int32_t sd0;
int32_t sd1;
int32_t nss;
uint16_t cfg_idx; //idx of sg_usi_config[]
uint16_t function;
} usi_pin_map_t;
const static usi_pin_map_t s_usi_pin_map[] = {
{
PA10_UART0CTS_USI0SCLK_SPU4_I2C0SCL,
PA11_UART0RTS_USI0SD0_SPU5_I2C0SDA,
PA12_XX_USI0SD1_XX_UART2RX,
PA13_XX_USI0NSS_XX_UART2TX,
0,
1
},
{
PA16_SPI0CS0_PWMTRIG0_XX_USI1SCLK,
PA17_SPI0MOSI_PWMTRIG1_XX_USI1SD0,
PA18_SPI0MISO_XX_SPU6_USI1SD1,
PA19_SPI0SCK_FAULT_SPU7_USI1NSS,
1,
3
},
};
#endif
struct {
uint32_t base;
uint32_t irq;
}
const static sg_usart_config[CONFIG_USART_NUM] = {
{CSKY_UART0_BASE, UART0_IRQn},
{CSKY_UART1_BASE, UART1_IRQn},
{CSKY_UART2_BASE, UART2_IRQn},
{CSKY_UART3_BASE, UART3_IRQn}
};
typedef struct {
int32_t tx;
int32_t rx;
#if 0
int32_t cts;
int32_t rts;
#endif
uint16_t cfg_idx; //idx of sg_usart_config[]
uint16_t function;
} usart_pin_map_t;
const static usart_pin_map_t s_usart_pin_map[] = {
{
PA8_UART0TX_XX_SPU2_SIROUT0,
PA9_UART0RX_XX_SPU3_SIRIN0,
0,
0
},
{
PA21_UART1TX_PWM1_SPU9_SIROUT1,
PA20_UART1RX_PWM0_SPU8_SIRIN1,
1,
0
},
{
PA0_I2C0SCL_SPI1CS1_SPU0_UART1TX,
PA1_I2C0SDA_SPI1CS2_SPU1_UART1RX,
1,
4,
},
{
PB0_UART2TX_XX_XX_SIROUT2,
PB1_UART2RX_XX_XX_SIRIN2,
2,
0
},
{
PB13_UART3TX_SPI1MISO_SPU29_SIROUT3,
PB12_UART3RX_SPI1CS0_SPU28_SIRIN3,
3,
0
}
};
/**
\param[in] instance idx, must not exceed return value of target_get_usart_count()
\brief get usart instance.
\return pointer to usart instance
*/
int32_t target_usart_init(pin_t tx, pin_t rx, uint32_t *base, uint32_t *irq)
{
uint32_t idx;
for (idx = 0; idx < sizeof(s_usart_pin_map) / sizeof(usart_pin_map_t); idx++) {
if (s_usart_pin_map[idx].tx == tx && s_usart_pin_map[idx].rx == rx) {
*base = sg_usart_config[s_usart_pin_map[idx].cfg_idx].base;
*irq = sg_usart_config[s_usart_pin_map[idx].cfg_idx].irq;
/*pinmux*/
pin_mux(s_usart_pin_map[idx].tx, s_usart_pin_map[idx].function);
pin_mux(s_usart_pin_map[idx].rx, s_usart_pin_map[idx].function);
return s_usart_pin_map[idx].cfg_idx;
}
}
return -1;
}
/**
\brief control usart flow.
\param[in] tx_flow The TX flow pin name
\param[in] rx_flow The RX flow pin name
\param[in] flag 0-disable, 1-enable.
\return 0 if setting ready ,negative for error code
*/
int32_t target_usart_flowctrl_init(int32_t tx_flow, int32_t rx_flow, uint32_t flag)
{
#if 0
uint32_t idx;
for (idx = 0; idx < sizeof(s_usart_pin_map) / sizeof(usart_pin_map_t); idx++) {
if ((s_usart_pin_map[idx].cts == tx_flow) &&(s_usart_pin_map[idx].rts == rx_flow))
break;
}
if (idx >= sizeof(s_usart_pin_map) / sizeof(usart_pin_map_t)) {
return -1;
}
if ((s_usart_pin_map[idx].cts == tx_flow) && flag) {
pin_mux(s_usart_pin_map[idx].cts, s_usart_pin_map[idx].function);
} else if ((s_usart_pin_map[idx].cts == tx_flow) && (flag == 0)) {
pin_mux(s_usart_pin_map[idx].cts, 0xff);
} else {
return -1;
}
if ((s_usart_pin_map[idx].rts == rx_flow) && flag) {
pin_mux(s_usart_pin_map[idx].rts, s_usart_pin_map[idx].function);
} else if ((s_usart_pin_map[idx].rts == rx_flow) && (flag == 0)) {
pin_mux(s_usart_pin_map[idx].rts, 0xff);
} else {
return -1;
}
return 0;
#endif
return 0;
}
struct {
uint32_t base;
uint32_t irq;
uint32_t pin_num;
port_name_t port;
}
const sg_gpio_config[CONFIG_GPIO_NUM] = {
{CSKY_GPIO0_BASE, GPIOA_IRQn, 32, PORTA},
{CSKY_GPIO1_BASE, GPIOB_IRQn, 16, PORTB}
};
typedef struct {
int32_t gpio_pin;
uint32_t cfg_idx; //idx of sg_gpio_config[]
} gpio_pin_map_t;
const static gpio_pin_map_t s_gpio_pin_map[] = {
{PA0_I2C0SCL_SPI1CS1_SPU0_UART1TX ,0},
{PA1_I2C0SDA_SPI1CS2_SPU1_UART1RX,0},
{PA2_QSPI0CLK_XX_XX_XX,0},
{PA3_QSPI0MISO_XX_XX_XX,0},
{PA4_QSPI0MOSI_XX_XX_XX,0},
{PA5_QSPI0HOLD_XX_XX_XX,0},
{PA6_QSPI0WP_XX_XX_XX,0},
{PA7_QSPI0CS0_XX_XX_XX,0},
{PA8_UART0TX_XX_SPU2_SIROUT0,0},
{PA9_UART0RX_XX_SPU3_SIRIN0,0},
{PA10_UART0CTS_USI0SCLK_SPU4_I2C0SCL,0},
{PA11_UART0RTS_USI0SD0_SPU5_I2C0SDA,0},
{PA12_XX_USI0SD1_XX_UART2RX,0},
{PA13_XX_USI0NSS_XX_UART2TX,0},
{PA14_SPI0CS2_FAULT_I2C1SDA_XX,0},
{PA15_SPI0CS1_XX_I2C1SCL_XX,0},
{PA16_SPI0CS0_PWMTRIG0_XX_USI1SCLK,0},
{PA17_SPI0MOSI_PWMTRIG1_XX_USI1SD0,0},
{PA18_SPI0MISO_XX_SPU6_USI1SD1,0},
{PA19_SPI0SCK_FAULT_SPU7_USI1NSS,0},
{PA20_UART1RX_PWM0_SPU8_SIRIN1,0},
{PA21_UART1TX_PWM1_SPU9_SIROUT1,0},
{PA22_UART1CTS_PWM2_SPU10_XX,0},
{PA23_UART1RTS_PWM3_SPU11_XX,0},
{PA24_USI1NSS_PWM4_SPU12_XX,0},
{PA25_USI1SD1_PWM5_SPU13_XX,0},
{PA26_USI1SD0_PWM6_SPU14_XX,0},
{PA27_USI1SCLK_PWM7_SPU15_XX,0},
{PA28_I2C1SCL_PWM8_SPU16_XX,0},
{PA29_I2C1SDA_PWM9_SPU17_XX,0},
{PA30_I2C0SDA_PWM10_SPU18_XX,0},
{PA31_I2C0SCL_PWM11_SPU19_XX,0},
{PB0_UART2TX_XX_XX_SIROUT2,1},
{PB1_UART2RX_XX_XX_SIRIN2,1},
{PB2_UART2RTS_XX_XX_XX,1},
{PB3_UART2CTS_XX_XX_XX,1},
{PB4_XX_XX_SPU20_UART3TX,1},
{PB5_QSPI1CS1_XX_SPU21_UART3RX,1},
{PB6_QSPI1WP_XX_SPU22_XX,1},
{PB7_QSPI1HOLD_XX_SPU23_XX,1},
{PB8_QSPI1CS0_PWMTRIG0_SPU24_XX,1},
{PB9_QSPI1MOSI_PWMTRIG1_SPU25_XX,1},
{PB10_QSPI1MISO_XX_SPU26_I2C1SDA,1},
{PB11_QSPI1CLK_XX_SPU27_I2C1SCL,1},
{PB12_UART3RX_SPI1CS0_SPU28_SIRIN3,1},
{PB13_UART3TX_SPI1MISO_SPU29_SIROUT3,1},
{PB14_UART3RTS_SPI1MOSI_SPU30_XX,1},
{PB15_UART3CTS_SPI1SCK_SPU31_XX,1}
};
int32_t target_gpio_port_init(port_name_t port, uint32_t *base, uint32_t *irq, uint32_t *pin_num)
{
int i;
for (i = 0; i < CONFIG_GPIO_NUM; i++) {
if (sg_gpio_config[i].port == port) {
*base = sg_gpio_config[i].base;
*irq = sg_gpio_config[i].irq;
*pin_num = sg_gpio_config[i].pin_num;
return i;
}
}
return -1;
}
/**
\param[in] instance idx, must not exceed return value of target_get_gpio_count()
\brief get gpio instance.
\return pointer to gpio instance
*/
int32_t target_gpio_pin_init(int32_t gpio_pin, uint32_t *port_idx)
{
uint32_t idx;
for (idx = 0; idx < sizeof(s_gpio_pin_map) / sizeof(gpio_pin_map_t); idx++) {
if (s_gpio_pin_map[idx].gpio_pin == gpio_pin) {
*port_idx = s_gpio_pin_map[idx].cfg_idx;
/*pinmux*/
pin_mux(s_gpio_pin_map[idx].gpio_pin, 0xff);
return idx;
}
}
return -1;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_timer_config[CONFIG_TIMER_NUM] = {
{CSKY_TIM0_BASE, TIMA0_IRQn},
{CSKY_TIM0_BASE + 0x14, TIMA1_IRQn},
{CSKY_TIM1_BASE, TIMB0_IRQn},
{CSKY_TIM1_BASE + 0x14, TIMB1_IRQn},
{CSKY_TIM2_BASE, TIM34567_IRQn},
{CSKY_TIM2_BASE + 0x14, TIM34567_IRQn},
{CSKY_TIM3_BASE, TIM34567_IRQn},
{CSKY_TIM3_BASE + 0x14, TIM34567_IRQn},
{CSKY_TIM4_BASE, TIM34567_IRQn},
{CSKY_TIM4_BASE + 0x14, TIM34567_IRQn},
{CSKY_TIM5_BASE, TIM34567_IRQn},
{CSKY_TIM5_BASE + 0x14, TIM34567_IRQn},
{CSKY_TIM6_BASE, TIM34567_IRQn},
{CSKY_TIM6_BASE + 0x14, TIM34567_IRQn},
};
int32_t target_get_timer_count(void)
{
return CONFIG_TIMER_NUM;
}
int32_t target_get_timer(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_timer_count()) {
return NULL;
}
*base = sg_timer_config[idx].base;
*irq = sg_timer_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_sha_config[CONFIG_SHA_NUM] = {
{CSKY_SHA_BASE, SHA_IRQn}
};
/**
\brief get sha instance count.
\return sha instance count
*/
int32_t target_get_sha_count(void)
{
return CONFIG_SHA_NUM;
}
/**
\param[in] instance idx, must not exceed return value of target_get_sha_count()
\brief get sha instance.
\return pointer to sha instance
*/
int32_t target_get_sha(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_sha_count()) {
return NULL;
}
*base = sg_sha_config[idx].base;
*irq = sg_sha_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_rsa_config[CONFIG_RSA_NUM] = {
{CSKY_RSA_BASE, RSA_IRQn}
};
/**
\brief get rsa instance count.
\return rsa instance count
*/
int32_t target_get_rsa_count(void)
{
return CONFIG_RSA_NUM;
}
/**
\param[in] instance idx, must not exceed return value of target_get_rsa_count()
\brief get rsa instance.
\return pointer to rsa instance
*/
int32_t target_get_rsa(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_rsa_count()) {
return NULL;
}
*base = sg_rsa_config[idx].base;
*irq = sg_rsa_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_aes_config[CONFIG_AES_NUM] = {
{CSKY_AES_BASE, AES_IRQn}
};
/**
\brief get aes instance count.
\return aes instance count
*/
int32_t target_get_aes_count(void)
{
return CONFIG_AES_NUM;
}
/**
\param[in] instance idx, must not exceed return value of target_get_aes_count()
\brief get aes instance.
\return pointer to aes instance
*/
int32_t target_get_aes(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_aes_count()) {
return NULL;
}
*base = sg_aes_config[idx].base;
*irq = sg_aes_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_trng_config[CONFIG_TRNG_NUM] = {
{CSKY_TRNG_BASE, TRNG_IRQn}
};
/**
\param[in] instance idx
\brief get trng instance.
\return pointer to trng instance
*/
int32_t target_get_trng(int32_t idx, uint32_t *base)
{
*base = sg_trng_config[idx].base;
return idx;
}
struct {
uint32_t base;
}
const sg_crc_config[CONFIG_CRC_NUM] = {
{CSKY_CRC_BASE}
};
/**
\param[in] instance idx
\brief get crc instance.
\return pointer to crc instance
*/
int32_t target_get_crc(int32_t idx, uint32_t *base)
{
*base = sg_crc_config[idx].base;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_iic_config[CONFIG_IIC_NUM] = {
{CSKY_I2C0_BASE, I2C0_IRQn},
{CSKY_I2C1_BASE, I2C1_IRQn}
};
typedef struct {
int32_t scl;
int32_t sda;
uint16_t cfg_idx; //idx of sg_iic_config[]
uint16_t function;
} iic_pin_map_t;
const static iic_pin_map_t s_iic_pin_map[] = {
{
PA31_I2C0SCL_PWM11_SPU19_XX,
PA30_I2C0SDA_PWM10_SPU18_XX,
0,
0
},
{
PA28_I2C1SCL_PWM8_SPU16_XX,
PA29_I2C1SDA_PWM9_SPU17_XX,
1,
0
}
};
/**
\param[in] instance idx, must not exceed return value of target_get_iic_count()
\brief get iic instance.
\return pointer to iic instance
*/
int32_t target_iic_init(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= sizeof(s_iic_pin_map) / sizeof(iic_pin_map_t)) {
return -1;
}
*base = sg_iic_config[s_iic_pin_map[idx].cfg_idx].base;
*irq = sg_iic_config[s_iic_pin_map[idx].cfg_idx].irq;
/*pinmux*/
pin_mux(s_iic_pin_map[idx].scl, s_iic_pin_map[idx].function);
pin_mux(s_iic_pin_map[idx].sda, s_iic_pin_map[idx].function);
return s_iic_pin_map[idx].cfg_idx;
}
#define BIT1 (0x1)
struct {
uint32_t base;
uint32_t irq;
}
const sg_rtc_config[CONFIG_RTC_NUM] = {
{CSKY_RTC0_BASE, RTC_IRQn},
};
int32_t target_get_rtc_count(void)
{
return CONFIG_RTC_NUM;
}
int32_t target_get_rtc(int32_t idx, uint32_t *base, uint32_t *irq)
{
unsigned int value;
if (idx >= target_get_rtc_count()) {
return NULL;
}
value = readl(CSKY_PMU_BASE);
value &= ~BIT1;
writel(value, CSKY_PMU_BASE);
*base = sg_rtc_config[idx].base;
*irq = sg_rtc_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_spi_config[CONFIG_SPI_NUM] = {
{CSKY_SPI0_BASE, SPI0_IRQn},
{CSKY_SPI1_BASE, SPI1_IRQn}
};
typedef struct {
int32_t mosi;
int32_t miso;
int32_t sclk;
int32_t ssel;
uint32_t cfg_idx; //idx of sg_iic_config[]
uint16_t function;
} spi_pin_map_t;
const static spi_pin_map_t s_spi_pin_map[] = {
{
PA18_SPI0MISO_XX_SPU6_USI1SD1,
PA17_SPI0MOSI_PWMTRIG1_XX_USI1SD0,
PA19_SPI0SCK_FAULT_SPU7_USI1NSS,
PA16_SPI0CS0_PWMTRIG0_XX_USI1SCLK,
0,
0
},
{
PB13_UART3TX_SPI1MISO_SPU29_SIROUT3,
PB14_UART3RTS_SPI1MOSI_SPU30_XX,
PB15_UART3CTS_SPI1SCK_SPU31_XX,
PB12_UART3RX_SPI1CS0_SPU28_SIRIN3,
1,
1
}
};
/**
\param[in] instance idx, must not exceed return value of target_get_spi_count()
\brief get spi instance.
\return pointer to spi instance
*/
int32_t target_spi_init(int32_t idx, uint32_t *base, uint32_t *irq, uint32_t *ssel)
{
if (idx >= sizeof(s_spi_pin_map) / sizeof(spi_pin_map_t)) {
return -1;
}
*base = sg_spi_config[s_spi_pin_map[idx].cfg_idx].base;
*irq = sg_spi_config[s_spi_pin_map[idx].cfg_idx].irq;
*ssel = s_spi_pin_map[idx].ssel;
/*pinmux*/
pin_mux(s_spi_pin_map[idx].mosi, s_spi_pin_map[idx].function);
pin_mux(s_spi_pin_map[idx].miso, s_spi_pin_map[idx].function);
pin_mux(s_spi_pin_map[idx].sclk, s_spi_pin_map[idx].function);
pin_mux(s_spi_pin_map[idx].ssel, s_spi_pin_map[idx].function);
return s_spi_pin_map[idx].cfg_idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_dmac_config[CONFIG_DMAC_NUM] = {
{CSKY_DMAC0_BASE, DMAC_IRQn},
};
int32_t target_get_dmac_count(void)
{
return CONFIG_DMAC_NUM;
}
int32_t target_get_dmac(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_dmac_count()) {
return NULL;
}
*base = sg_dmac_config[idx].base;
*irq = sg_dmac_config[idx].irq;
return idx;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_pwm_config[CONFIG_PWM_NUM] = {
{CSKY_PWM_BASE, PWM_IRQn},
};
typedef struct {
int32_t pwm_pin;
uint32_t cfg_idx; //idx of sg_pwm_config[]
uint32_t ch_num;
uint16_t function;
} pwm_pin_map_t;
const static pwm_pin_map_t s_pwm_pin_map[] = {
{PA20_UART1RX_PWM0_SPU8_SIRIN1, 0, 0, 1},
{PA21_UART1TX_PWM1_SPU9_SIROUT1, 0, 1, 1},
{PA22_UART1CTS_PWM2_SPU10_XX, 0, 2, 1},
{PA23_UART1RTS_PWM3_SPU11_XX, 0, 3, 1},
{PA24_USI1NSS_PWM4_SPU12_XX, 0, 4, 1},
{PA25_USI1SD1_PWM5_SPU13_XX, 0, 5, 1},
{PA26_USI1SD0_PWM6_SPU14_XX, 0, 6, 1},
{PA27_USI1SCLK_PWM7_SPU15_XX, 0, 7, 1},
{PA28_I2C1SCL_PWM8_SPU16_XX, 0, 8, 1},
{PA29_I2C1SDA_PWM9_SPU17_XX, 0, 9, 1},
{PA30_I2C0SDA_PWM10_SPU18_XX, 0, 10, 1},
{PA31_I2C0SCL_PWM11_SPU19_XX, 0, 11, 1}
};
/**
\param[in] instance idx, must not exceed return value of target_get_pwm_count()
\brief get pwm instance.
\return pointer to pwm instance
*/
int32_t target_pwm_init(int32_t pwm_pin, uint32_t *ch_num, uint32_t *base, uint32_t *irq)
{
uint32_t idx;
for (idx = 0; idx < sizeof(s_pwm_pin_map) / sizeof(pwm_pin_map_t); idx++) {
if (s_pwm_pin_map[idx].pwm_pin == pwm_pin) {
*base = sg_pwm_config[s_pwm_pin_map[idx].cfg_idx].base;
*irq = sg_pwm_config[s_pwm_pin_map[idx].cfg_idx].irq;
*ch_num = s_pwm_pin_map[idx].ch_num;
/*pinmux*/
pin_mux(s_pwm_pin_map[idx].pwm_pin, s_pwm_pin_map[idx].function);
return s_pwm_pin_map[idx].cfg_idx;
}
}
return -1;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_wdt_config[CONFIG_WDT_NUM] = {
{CSKY_WDT_BASE, WDT_IRQn}
};
int32_t target_get_wdt_count(void)
{
return CONFIG_WDT_NUM;
}
int32_t target_get_wdt(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_wdt_count()) {
return NULL;
}
*base = sg_wdt_config[idx].base;
*irq = sg_wdt_config[idx].irq;
return idx;
}
int32_t target_get_etb_count(void)
{
return CONFIG_ETB_NUM;
}
int32_t target_get_etb(int32_t idx, uint32_t *base, uint32_t *irq)
{
if (idx >= target_get_etb_count()) {
return NULL;
}
// *base = sg_etb_config[idx].base;
// *irq = sg_etb_config[idx].irq;
return 0;
}
struct {
uint32_t base;
uint32_t irq;
}
const sg_qspi_config[CONFIG_QSPI_NUM] = {
{CSKY_QSPIC0_BASE, QSPIC1_IRQn},
{CSKY_QSPIC1_BASE, QSPIC1_IRQn}
};
typedef struct {
pin_name_t sclk;
pin_name_t miso;
pin_name_t mosi;
pin_name_t hold;
pin_name_t wp;
pin_name_t ssel;
uint32_t cfg_idx;
uint16_t function;
} qspi_pin_map_t;
const static qspi_pin_map_t s_qspi_pin_map[] = {
{
PA2_QSPI0CLK_XX_XX_XX,
PA3_QSPI0MISO_XX_XX_XX,
PA4_QSPI0MOSI_XX_XX_XX,
PA5_QSPI0HOLD_XX_XX_XX,
PA6_QSPI0WP_XX_XX_XX,
PA7_QSPI0CS0_XX_XX_XX,
0,
0
},
{
PB11_QSPI1CLK_XX_SPU27_I2C1SCL,
PB10_QSPI1MISO_XX_SPU26_I2C1SDA,
PB9_QSPI1MOSI_PWMTRIG1_SPU25_XX,
PB7_QSPI1HOLD_XX_SPU23_XX,
PB6_QSPI1WP_XX_SPU22_XX,
PB8_QSPI1CS0_PWMTRIG0_SPU24_XX,
1,
0
}
};
/**
\param[in] instance idx, must not exceed return value of target_get_qspi_count()
\brief get qspi instance.
\return pointer to qspi instance
*/
int32_t target_qspi_init(pin_name_t mosi, pin_name_t miso, pin_name_t sclk, pin_name_t ssel, pin_name_t wp, pin_name_t hold, uint32_t *base, uint32_t *irq)
{
uint32_t idx;
for (idx = 0; idx < sizeof(s_qspi_pin_map) / sizeof(qspi_pin_map_t); idx++) {
if (s_qspi_pin_map[idx].mosi == mosi && s_qspi_pin_map[idx].miso == miso
&& s_qspi_pin_map[idx].sclk == sclk && s_qspi_pin_map[idx].ssel == ssel
&& s_qspi_pin_map[idx].hold == hold && s_qspi_pin_map[idx].wp == wp) {
pin_mux(s_qspi_pin_map[idx].mosi, s_qspi_pin_map[idx].function);
pin_mux(s_qspi_pin_map[idx].miso, s_qspi_pin_map[idx].function);
pin_mux(s_qspi_pin_map[idx].sclk, s_qspi_pin_map[idx].function);
pin_mux(s_qspi_pin_map[idx].hold, s_qspi_pin_map[idx].function);
pin_mux(s_qspi_pin_map[idx].wp, s_qspi_pin_map[idx].function);
pin_mux(s_qspi_pin_map[idx].ssel, s_qspi_pin_map[idx].function);
return s_qspi_pin_map[idx].cfg_idx;
}
}
return -1;
}
| 11,653 |
1,104 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Discretize.java
* Copyright (C) 2008 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core.pmml;
import java.io.Serializable;
import java.util.ArrayList;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Utils;
/**
* Class encapsulating a Discretize Expression.
*
* @author <NAME> (mhall{[at]}pentaho{[dot]}com)
* @version $Revision 1.0 $
*/
public class Discretize extends Expression {
/**
* Inner class to encapsulate DiscretizeBin elements
*/
protected class DiscretizeBin implements Serializable {
/**
* For serialization
*/
private static final long serialVersionUID = 5810063243316808400L;
/** The intervals for this DiscretizeBin */
private ArrayList<FieldMetaInfo.Interval> m_intervals =
new ArrayList<FieldMetaInfo.Interval>();
/** The bin value for this DiscretizeBin */
private String m_binValue;
/**
* If the optype is continuous or ordinal, we will attempt to parse
* the bin value as a number and store it here.
*/
private double m_numericBinValue = Utils.missingValue();
protected DiscretizeBin(Element bin,
FieldMetaInfo.Optype opType) throws Exception {
NodeList iL = bin.getElementsByTagName("Interval");
for (int i = 0; i < iL.getLength(); i++) {
Node iN = iL.item(i);
if (iN.getNodeType() == Node.ELEMENT_NODE) {
FieldMetaInfo.Interval tempInterval = new FieldMetaInfo.Interval((Element)iN);
m_intervals.add(tempInterval);
}
}
m_binValue = bin.getAttribute("binValue");
if (opType == FieldMetaInfo.Optype.CONTINUOUS ||
opType == FieldMetaInfo.Optype.ORDINAL) {
try {
m_numericBinValue = Double.parseDouble(m_binValue);
} catch (NumberFormatException ex) {
// quietly ignore...
}
}
}
/**
* Get the bin value for this DiscretizeBin
*
* @return the bin value
*/
protected String getBinValue() {
return m_binValue;
}
/**
* Get the value of this bin as a number (parsed from the string value).
*
* @return the value of this bin as a number or Double.NaN if the string
* value of the bin could not be interpreted as a number.
*/
protected double getBinValueNumeric() {
return m_numericBinValue;
}
/**
* Returns true if there is an interval that contains the incoming
* value.
*
* @param value the value to check against
* @return true if there is an interval that containst the supplied value
*/
protected boolean containsValue(double value) {
boolean result = false;
for (FieldMetaInfo.Interval i : m_intervals) {
if (i.containsValue(value)) {
result = true;
break;
}
}
return result;
}
public String toString() {
StringBuffer buff = new StringBuffer();
buff.append("\"" + m_binValue + "\" if value in: ");
boolean first = true;
for (FieldMetaInfo.Interval i : m_intervals) {
if (!first) {
buff.append(", ");
} else {
first = false;
}
buff.append(i.toString());
}
return buff.toString();
}
}
/** The name of the field to be discretized */
protected String m_fieldName;
/** The index of the field */
protected int m_fieldIndex;
/** True if a replacement for missing values has been specified */
protected boolean m_mapMissingDefined = false;
/** The value of the missing value replacement (if defined) */
protected String m_mapMissingTo;
/** True if a default value has been specified */
protected boolean m_defaultValueDefined = false;
/** The default value (if defined) */
protected String m_defaultValue;
/** The bins for this discretization */
protected ArrayList<DiscretizeBin> m_bins = new ArrayList<DiscretizeBin>();
/** The output structure of this discretization */
protected Attribute m_outputDef;
/**
* Constructs a Discretize Expression
*
* @param discretize the Element containing the discretize expression
* @param opType the optype of this Discretize Expression
* @param fieldDefs the structure of the incoming fields
* @throws Exception if the optype is not categorical/ordinal or if there
* is a problem parsing this element
*/
public Discretize(Element discretize, FieldMetaInfo.Optype opType, ArrayList<Attribute> fieldDefs)
throws Exception {
super(opType, fieldDefs);
/* if (m_opType == FieldMetaInfo.Optype.CONTINUOUS) {
throw new Exception("[Discretize] must have a categorical or ordinal optype");
} */
m_fieldName = discretize.getAttribute("field");
m_mapMissingTo = discretize.getAttribute("mapMissingTo");
if (m_mapMissingTo != null && m_mapMissingTo.length() > 0) {
m_mapMissingDefined = true;
}
m_defaultValue = discretize.getAttribute("defaultValue");
if (m_defaultValue != null && m_defaultValue.length() > 0) {
m_defaultValueDefined = true;
}
// get the DiscretizeBin Elements
NodeList dbL = discretize.getElementsByTagName("DiscretizeBin");
for (int i = 0; i < dbL.getLength(); i++) {
Node dbN = dbL.item(i);
if (dbN.getNodeType() == Node.ELEMENT_NODE) {
Element dbE = (Element)dbN;
DiscretizeBin db = new DiscretizeBin(dbE, m_opType);
m_bins.add(db);
}
}
if (fieldDefs != null) {
setUpField();
}
}
/**
* Set the field definitions for this Expression to use
*
* @param fieldDefs the field definitions to use
* @throws Exception if there is a problem setting the field definitions
*/
public void setFieldDefs(ArrayList<Attribute> fieldDefs) throws Exception {
super.setFieldDefs(fieldDefs);
setUpField();
}
private void setUpField() throws Exception {
m_fieldIndex = -1;
if (m_fieldDefs != null) {
m_fieldIndex = getFieldDefIndex(m_fieldName);
if (m_fieldIndex < 0) {
throw new Exception("[Discretize] Can't find field " + m_fieldName
+ " in the supplied field definitions.");
}
Attribute field = m_fieldDefs.get(m_fieldIndex);
if (!field.isNumeric()) {
throw new Exception("[Discretize] reference field " + m_fieldName
+" must be continuous.");
}
}
// set up the output structure
Attribute tempAtt = null;
boolean categorical = false;
if (m_opType == FieldMetaInfo.Optype.CONTINUOUS ||
m_opType == FieldMetaInfo.Optype.ORDINAL) {
// check to see if all bin values could be parsed as numbers
for (DiscretizeBin d : m_bins) {
if (Utils.isMissingValue(d.getBinValueNumeric())) {
categorical = true;
break;
}
}
} else {
categorical = true;
}
tempAtt = (categorical)
? new Attribute("temp", (ArrayList<String>)null)
: new Attribute(m_fieldName + "_discretized(optype=continuous)");
if (categorical) {
for (DiscretizeBin d : m_bins) {
tempAtt.addStringValue(d.getBinValue());
}
// add the default value (just in case it is some other value than one
// of the bins
if (m_defaultValueDefined) {
tempAtt.addStringValue(m_defaultValue);
}
// add the map missing to value (just in case it is some other value than one
// of the bins
if (m_mapMissingDefined) {
tempAtt.addStringValue(m_mapMissingTo);
}
// now make this into a nominal attribute
ArrayList<String> values = new ArrayList<String>();
for (int i = 0; i < tempAtt.numValues(); i++) {
values.add(tempAtt.value(i));
}
m_outputDef = new Attribute(m_fieldName + "_discretized", values);
} else {
m_outputDef = tempAtt;
}
}
/**
* Return the structure of the result of applying this Expression
* as an Attribute.
*
* @return the structure of the result of applying this Expression as an
* Attribute.
*/
protected Attribute getOutputDef() {
if (m_outputDef == null) {
// return a "default" output def. This will get replaced
// by a final one when the final field defs are are set
// for all expressions after all derived fields are collected
return (m_opType == FieldMetaInfo.Optype.CATEGORICAL ||
m_opType == FieldMetaInfo.Optype.ORDINAL)
? new Attribute(m_fieldName + "_discretized", new ArrayList<String>())
: new Attribute(m_fieldName + "_discretized(optype=continuous)");
}
return m_outputDef;
}
/**
* Get the result of evaluating the expression. In the case
* of a continuous optype, a real number is returned; in
* the case of a categorical/ordinal optype, the index of the nominal
* value is returned as a double.
*
* @param incoming the incoming parameter values
* @return the result of evaluating the expression
* @throws Exception if there is a problem computing the result
*/
public double getResult(double[] incoming) throws Exception {
// default of a missing value for the result if none of the following
// logic applies
double result = Utils.missingValue();
double value = incoming[m_fieldIndex];
if (Utils.isMissingValue(value)) {
if (m_mapMissingDefined) {
if (m_outputDef.isNominal()) {
result = m_outputDef.indexOfValue(m_mapMissingTo);
} else {
try {
result = Double.parseDouble(m_mapMissingTo);
} catch (NumberFormatException ex) {
throw new Exception("[Discretize] Optype is continuous but value of mapMissingTo "
+"can not be parsed as a number!");
}
}
}
} else {
// look for a bin that has an interval that contains this value
boolean found = false;
for (DiscretizeBin b : m_bins) {
if (b.containsValue(value)) {
found = true;
if (m_outputDef.isNominal()) {
result = m_outputDef.indexOfValue(b.getBinValue());
} else {
result = b.getBinValueNumeric();
}
break;
}
}
if (!found) {
if (m_defaultValueDefined) {
if (m_outputDef.isNominal()) {
result = m_outputDef.indexOfValue(m_defaultValue);
} else {
try {
result = Double.parseDouble(m_defaultValue);
} catch (NumberFormatException ex) {
throw new Exception("[Discretize] Optype is continuous but value of " +
"default value can not be parsed as a number!");
}
}
}
}
}
return result;
}
/**
* Gets the result of evaluating the expression when the
* optype is categorical or ordinal as the actual String
* value.
*
* @param incoming the incoming parameter values
* @return the result of evaluating the expression
* @throws Exception if the optype is continuous
*/
public String getResultCategorical(double[] incoming) throws Exception {
double index = getResult(incoming);
if (Utils.isMissingValue(index)) {
return "**Missing Value**";
}
return m_outputDef.value((int)index);
}
/* (non-Javadoc)
* @see weka.core.pmml.Expression#toString(java.lang.String)
*/
public String toString(String pad) {
StringBuffer buff = new StringBuffer();
buff.append(pad + "Discretize (" + m_fieldName + "):");
for (DiscretizeBin d : m_bins) {
buff.append("\n" + pad + d.toString());
}
if (m_outputDef.isNumeric()) {
buff.append("\n" + pad + "(bin values interpreted as numbers)");
}
if (m_mapMissingDefined) {
buff.append("\n" + pad + "map missing values to: " + m_mapMissingTo);
}
if (m_defaultValueDefined) {
buff.append("\n" + pad + "default value: " + m_defaultValue);
}
return buff.toString();
}
}
| 5,144 |
374 |
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| 7,361 |
1,665 |
#include "IRVisitor.h"
#include "IR.h"
using namespace Bish;
IRVisitor::~IRVisitor() { }
void IRVisitor::visit(Module *node) {
if (visited(node)) return;
visited_set.insert(node);
node->global_variables->accept(this);
for (std::vector<Function *>::const_iterator I = node->functions.begin(),
E = node->functions.end(); I != E; ++I) {
(*I)->accept(this);
}
}
void IRVisitor::visit(Block *node) {
if (visited(node)) return;
visited_set.insert(node);
for (std::vector<IRNode *>::const_iterator I = node->nodes.begin(),
E = node->nodes.end(); I != E; ++I) {
(*I)->accept(this);
}
}
void IRVisitor::visit(Variable *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(Location *node) {
if (visited(node)) return;
visited_set.insert(node);
node->variable->accept(this);
if (node->offset) node->offset->accept(this);
}
void IRVisitor::visit(ReturnStatement *node) {
if (visited(node)) return;
visited_set.insert(node);
if (node->value) node->value->accept(this);
}
void IRVisitor::visit(ImportStatement *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(LoopControlStatement *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(IfStatement *node) {
if (visited(node)) return;
visited_set.insert(node);
node->pblock->condition->accept(this);
node->pblock->body->accept(this);
for (std::vector<PredicatedBlock *>::const_iterator I = node->elses.begin(),
E = node->elses.end(); I != E; ++I) {
(*I)->condition->accept(this);
(*I)->body->accept(this);
}
if (node->elseblock) node->elseblock->accept(this);
}
void IRVisitor::visit(ForLoop *node) {
if (visited(node)) return;
visited_set.insert(node);
node->variable->accept(this);
node->lower->accept(this);
if (node->upper) node->upper->accept(this);
node->body->accept(this);
}
void IRVisitor::visit(Function *node) {
if (visited(node)) return;
visited_set.insert(node);
for (std::vector<Variable *>::const_iterator I = node->args.begin(),
E = node->args.end(); I != E; ++I) {
(*I)->accept(this);
}
if (node->body) node->body->accept(this);
}
void IRVisitor::visit(FunctionCall *node) {
if (visited(node)) return;
visited_set.insert(node);
for (std::vector<Assignment *>::const_iterator I = node->args.begin(),
E = node->args.end(); I != E; ++I) {
(*I)->accept(this);
}
}
void IRVisitor::visit(ExternCall *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(IORedirection *node) {
if (visited(node)) return;
visited_set.insert(node);
node->a->accept(this);
node->b->accept(this);
}
void IRVisitor::visit(Assignment *node) {
if (visited(node)) return;
visited_set.insert(node);
node->location->accept(this);
for (std::vector<IRNode *>::const_iterator I = node->values.begin(),
E = node->values.end(); I != E; ++I) {
(*I)->accept(this);
}
}
void IRVisitor::visit(BinOp *node) {
if (visited(node)) return;
visited_set.insert(node);
node->a->accept(this);
node->b->accept(this);
}
void IRVisitor::visit(UnaryOp *node) {
if (visited(node)) return;
visited_set.insert(node);
node->a->accept(this);
}
void IRVisitor::visit(Integer *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(Fractional *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(String *node) {
if (visited(node)) return;
visited_set.insert(node);
}
void IRVisitor::visit(Boolean *node) {
if (visited(node)) return;
visited_set.insert(node);
}
| 1,635 |
4,168 |
package net.corda.node.services;
import co.paralleluniverse.fibers.Suspendable;
import net.corda.core.flows.FlowLogic;
import net.corda.core.flows.StartableByRPC;
import net.corda.core.node.AppServiceHub;
import net.corda.core.node.services.CordaService;
import net.corda.core.node.services.ServiceLifecycleEvent;
import net.corda.core.serialization.SingletonSerializeAsToken;
import java.util.ArrayList;
import java.util.List;
import static net.corda.core.node.AppServiceHub.SERVICE_PRIORITY_NORMAL;
public class JavaCordaServiceLifecycle {
static final List<ServiceLifecycleEvent> eventsCaptured = new ArrayList<>();
@StartableByRPC
public static class JavaComputeTextLengthThroughCordaService extends FlowLogic<Integer> {
private final String text;
public JavaComputeTextLengthThroughCordaService(String text) {
this.text = text;
}
@Override
@Suspendable
public Integer call() {
JavaTextLengthComputingService service = getServiceHub().cordaService(JavaTextLengthComputingService.class);
return service.computeLength(text);
}
}
@CordaService
public static class JavaTextLengthComputingService extends SingletonSerializeAsToken {
private final AppServiceHub serviceHub;
public JavaTextLengthComputingService(AppServiceHub serviceHub) {
this.serviceHub = serviceHub;
serviceHub.register(SERVICE_PRIORITY_NORMAL, this::addEvent);
}
private void addEvent(ServiceLifecycleEvent event) {
switch (event) {
case STATE_MACHINE_STARTED:
eventsCaptured.add(event);
break;
default:
// Process other typed of events
break;
}
}
public int computeLength(String text) {
assert !text.isEmpty();
return text.length();
}
}
}
| 834 |
308 |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from hamcrest import *
from amplify.agent.common.context import context
from test.base import BaseTestCase
from test.fixtures.defaults import *
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ContextTestCase(BaseTestCase):
def test_freeze_api_url(self):
# check that if api_url is not set it will not prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = ''
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(False))
# check that an api_url from our receiver's domain will not prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = 'https://receiver.amplify.nginx.com:443/1.1'
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(False))
# check that a custom api_url will prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = 'http://some.other.domain/endpoint/'
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(True))
def test_uuid(self):
assert_that(context.app_config['credentials'], has_entry('imagename', ''))
assert_that(context.app_config['credentials'], has_entry('hostname', DEFAULT_HOST))
assert_that(context.app_config['credentials'], has_entry('api_key', DEFAULT_API_KEY))
assert_that(context.app_config['credentials'], has_entry('uuid', DEFAULT_UUID))
assert_that(context.uuid, equal_to(DEFAULT_UUID))
class ContextContainerTestCase(BaseTestCase):
def setup_method(self, method):
super(ContextContainerTestCase, self).setup_method(method)
context.app_config['credentials']['imagename'] = 'DockerTest'
context.setup(app='test', app_config=context.app_config.default)
def teardown_method(self, method):
context.app_config['credentials']['imagename'] = None
context.app_config['credentials']['uuid'] = DEFAULT_UUID
context.setup(app='test', app_config=context.app_config.default)
def test_uuid(self):
assert_that(context.app_config['credentials'], has_entry('imagename', 'DockerTest'))
assert_that(context.app_config['credentials'], has_entry('api_key', DEFAULT_API_KEY))
assert_that(context.app_config['credentials'], has_entry('uuid', 'container-DockerTest'))
assert_that(context.uuid, equal_to('container-DockerTest'))
| 1,038 |
989 |
/*
* Copyright (c) 2019 The StreamX Project
* <p>
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.streamxhub.streamx.console.base.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.cors.CorsConfiguration;
import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
import org.springframework.web.filter.CorsFilter;
/**
* @author benjobs
*/
@Configuration
public class CorssOriginConfig {
private CorsConfiguration buildConfig() {
CorsConfiguration corsConfiguration = new CorsConfiguration();
// 允许任何域名访问
corsConfiguration.addAllowedOrigin("*");
// 允许任何header访问
corsConfiguration.addAllowedHeader("*");
// 允许任何方法访问
corsConfiguration.addAllowedMethod("*");
corsConfiguration.setAllowCredentials(true);
return corsConfiguration;
}
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
source.registerCorsConfiguration("/**", buildConfig());
return new CorsFilter(source);
}
}
| 623 |
1,013 |
<filename>src/circularhexagonmaze.cpp
#include "circularhexagonmaze.h"
#include <cmath>
#include <iostream>
CircularHexagonMaze::CircularHexagonMaze(int size) : HexagonalMaze(size) {}
std::shared_ptr<CellBorder> CircularHexagonMaze::GetEdge(int sector, int row,
int column,
int edge) const {
if (edge == 0) {
// Edge 0 is the bottom edge, hence connecting
// (row+1,column)-(row+1,column+1) with an arc
return std::make_shared<ArcBorder>(
0, 0, row + 1, (sector - 2) * M_PI / 3 + column * M_PI / 3 / (row + 1),
(sector - 2) * M_PI / 3 + (column + 1) * M_PI / 3 / (row + 1));
}
double ex1, ey1, ex2, ey2;
if (edge == 1) {
// (row,column)-(row+1,colum+1)
double theta1 = (sector - 2) * M_PI / 3, theta2 = (sector - 2) * M_PI / 3;
if (row > 0) theta1 += column * M_PI / 3 / row;
theta2 += (column + 1) * M_PI / 3 / (row + 1);
ex1 = row * cos(theta1);
ey1 = row * sin(theta1);
ex2 = (row + 1) * cos(theta2);
ey2 = (row + 1) * sin(theta2);
} else {
// (row,column)-(row+1,colum)
double theta1 = (sector - 2) * M_PI / 3, theta2 = (sector - 2) * M_PI / 3;
if (row > 0) theta1 += column * M_PI / 3 / row;
theta2 += column * M_PI / 3 / (row + 1);
ex1 = row * cos(theta1);
ey1 = row * sin(theta1);
ex2 = (row + 1) * cos(theta2);
ey2 = (row + 1) * sin(theta2);
}
return std::make_shared<LineBorder>(ex1, ey1, ex2, ey2);
}
std::tuple<double, double, double, double>
CircularHexagonMaze::GetCoordinateBounds() const {
return std::make_tuple(-size_, -size_, size_, size_);
}
| 821 |
369 |
<gh_stars>100-1000
/*
* Copyright © 2021 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package io.cdap.cdap.etl.api.connector;
import io.cdap.cdap.api.data.format.StructuredRecord;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* The result for the sample request
*/
public class SampleDetail {
private final List<StructuredRecord> sample;
private final Map<String, String> properties;
private SampleDetail(List<StructuredRecord> sample, Map<String, String> properties) {
this.sample = sample;
this.properties = properties;
}
public List<StructuredRecord> getSample() {
return sample;
}
/**
* Get the all the properties used to generate this sample, these properties can be directly used by a source/sink
*/
public Map<String, String> getProperties() {
return properties;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SampleDetail that = (SampleDetail) o;
return Objects.equals(sample, that.sample) &&
Objects.equals(properties, that.properties);
}
@Override
public int hashCode() {
return Objects.hash(sample, properties);
}
/**
* Get the builder to build this object
*/
public static Builder builder() {
return new Builder();
}
/**
* Builder for {@link SampleDetail}
*/
public static class Builder {
private List<StructuredRecord> sample;
private Map<String, String> properties;
public Builder() {
this.sample = new ArrayList<>();
this.properties = new HashMap<>();
}
public Builder setSample(List<StructuredRecord> sample) {
this.sample.clear();
this.sample.addAll(sample);
return this;
}
public Builder setProperties(Map<String, String> properties) {
this.properties.clear();
this.properties.putAll(properties);
return this;
}
public SampleDetail build() {
return new SampleDetail(sample, properties);
}
}
}
| 871 |
327 |
#ifndef CONNECT_H
#define CONNECT_H
#include <QWidget>
namespace Ui {
class Connect;
}
class Connect : public QWidget
{
Q_OBJECT
public:
explicit Connect(QWidget *parent = nullptr);
~Connect();
bool hasMails = false;
public slots:
void check_mail();
void gotoInbox();
signals:
void newMail();
private slots:
void on_message_textChanged();
void on_send_clicked();
void on_email_textChanged(const QString &arg1);
bool isValid();
void on_inbox_clicked();
void on_backToMessages_clicked();
void on_send_anonymously_clicked();
void sendMessage(bool anonymous);
void reset();
void getMessages();
void processReply(const QByteArray data);
void on_messageIds_currentTextChanged(const QString ¤tText);
void setMessage(const QByteArray data);
void on_deleteCurrentMessage_clicked();
private:
Ui::Connect *ui;
QString uid;
int messageMaxLength,messageMinLength;
QString postUrl,getMessagesUrl,getMessageUrl,deleteMessageUrl,checkMailUrl;
};
#endif // CONNECT_H
| 391 |
760 |
"""ML-ENSEMBLE
:author: <NAME>
:copyright: 2017-2018
:licence: MIT
Testing objects
"""
from .dummy import (Data,
EstimatorContainer,
run_learner,
get_learner,
get_layer,
run_layer,
InitMixin,
)
__all__ = ['Data',
'EstimatorContainer',
'get_learner',
'run_learner',
'get_layer',
'run_layer',
'InitMixin'
]
| 332 |
460 |
package de.saxsys.mvvmfx;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* This annotation is used to inject a {@link Scope} object into a {@link ViewModel}.
*
* @author alexander.casall
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface InjectScope {
}
| 134 |
836 |
/*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.test.services.events.platform;
import static androidx.test.internal.util.Checks.checkNotNull;
import android.os.Parcel;
import androidx.annotation.NonNull;
import androidx.test.services.events.TestRunInfo;
import androidx.test.services.events.TestStatus;
import androidx.test.services.events.TimeStamp;
/**
* This event indicates that all tests in a test run are finished running. No more events should be
* sent after this. This event should always be emitted.
*
* @see TestRunStartedEvent to begin a test run.
*/
public class TestRunFinishedEvent extends TestPlatformEvent {
/* The test run that finished */
public final TestRunInfo testRun;
/* The overall status of the test run */
public final TestStatus runStatus;
/* The time that this test run finished */
public final TimeStamp timeStamp;
/**
* Creates a {@link TestRunFinishedEvent}.
*
* @param testRun the test run that finished.
* @param runStatus the overall status of the test run.
* @param timeStamp the time that this test run finished.
*/
public TestRunFinishedEvent(
@NonNull TestRunInfo testRun, @NonNull TestStatus runStatus, @NonNull TimeStamp timeStamp) {
this.testRun = checkNotNull(testRun, "testRun cannot be null");
this.runStatus = checkNotNull(runStatus, "runStatus cannot be null");
this.timeStamp = checkNotNull(timeStamp, "timeStamp cannot be null");
}
/**
* Creates a {@link TestRunFinishedEvent} from an {@link Parcel}.
*
* @param source {@link Parcel} to create the {@link TestRunFinishedEvent} from.
*/
TestRunFinishedEvent(Parcel source) {
testRun = new TestRunInfo(source);
runStatus = new TestStatus(source);
timeStamp = new TimeStamp(source);
}
@Override
public void writeToParcel(Parcel parcel, int i) {
super.writeToParcel(parcel, i);
testRun.writeToParcel(parcel, i);
runStatus.writeToParcel(parcel, i);
timeStamp.writeToParcel(parcel, i);
}
@Override
EventType instanceType() {
return EventType.TEST_RUN_FINISHED;
}
}
| 827 |
423 |
"""
================================
Plot Example Retinotopy Flatmaps
================================
This demo shows how to plot example retinotopy data onto a subject's brain
on a flatmap. In order for this demo to work, you need to download this
dataset_, but that can also be done automatically through the `urllib`
command that is included.
.. _dataset: http://gallantlab.org/pycortex/S1_retinotopy.hdf
S1 is the example subject that comes with pycortex, but if you want to plot
data onto a different subject, you will need to have them in your filestore,
and you will also need a flatmap for them.
"""
import six
import cortex
import matplotlib.pyplot as plt
if six.PY2:
from urllib import urlretrieve
elif six.PY3:
from urllib.request import urlretrieve
# Download the dataset and load it
_ = urlretrieve("http://gallantlab.org/pycortex/S1_retinotopy.hdf",
"S1_retinotopy.hdf")
ret_data = cortex.load("S1_retinotopy.hdf")
# The retinotopy data has to be divided into left and right hemispheres
left_data = ret_data.angle_left
cortex.quickshow(left_data, with_curvature=True,
curvature_contrast=0.5,
curvature_brightness=0.5,
curvature_threshold=True)
plt.show()
right_data = ret_data.angle_right
cortex.quickshow(right_data, with_curvature=True,
curvature_contrast=0.5,
curvature_brightness=0.5,
curvature_threshold=True)
plt.show()
| 520 |
2,415 |
<reponame>al3pht/cloud-custodian<gh_stars>1000+
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
from c7n_openstack.resources import (
project,
flavor,
server,
user,
)
log = logging.getLogger('custodian.openstack')
ALL = [
flavor,
project,
server,
user]
def initialize_openstack():
"""openstack entry point
"""
| 160 |
324 |
<reponame>tormath1/jclouds
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.profitbricks.features;
import java.util.List;
import javax.inject.Named;
import javax.ws.rs.Consumes;
import javax.ws.rs.POST;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.jclouds.Fallbacks;
import org.jclouds.http.filters.BasicAuthentication;
import org.jclouds.profitbricks.binder.firewall.AddFirewallRuleToNicRequestBinder;
import org.jclouds.profitbricks.binder.firewall.FirewallBinder.ActivateFirewallRequestBinder;
import org.jclouds.profitbricks.binder.firewall.FirewallBinder.DeactivateFirewallRequestBinder;
import org.jclouds.profitbricks.binder.firewall.FirewallBinder.DeleteFirewallRequestBinder;
import org.jclouds.profitbricks.binder.firewall.FirewallBinder.RemoveFirewallRuleRequestBinder;
import org.jclouds.profitbricks.domain.Firewall;
import org.jclouds.profitbricks.http.filters.ProfitBricksSoapMessageEnvelope;
import org.jclouds.profitbricks.http.parser.firewall.FirewallListResponseHandler;
import org.jclouds.profitbricks.http.parser.firewall.FirewallResponseHandler;
import org.jclouds.rest.annotations.MapBinder;
import org.jclouds.rest.annotations.Payload;
import org.jclouds.rest.annotations.PayloadParam;
import org.jclouds.rest.annotations.RequestFilters;
import org.jclouds.rest.annotations.XMLResponseParser;
import org.jclouds.rest.annotations.Fallback;
@RequestFilters({BasicAuthentication.class, ProfitBricksSoapMessageEnvelope.class})
@Consumes(MediaType.TEXT_XML)
@Produces(MediaType.TEXT_XML)
public interface FirewallApi {
@POST
@Named("firewall:get")
@Payload("<ws:getFirewall><firewallId>{id}</firewallId></ws:getFirewall>")
@XMLResponseParser(FirewallResponseHandler.class)
@Fallback(Fallbacks.NullOnNotFoundOr404.class)
Firewall getFirewall(@PayloadParam("id") String identifier);
@POST
@Named("firewall:getall")
@Payload("<ws:getAllFirewalls/>")
@XMLResponseParser(FirewallListResponseHandler.class)
@Fallback(Fallbacks.EmptyListOnNotFoundOr404.class)
List<Firewall> getAllFirewalls();
@POST
@Named("firewall:addrule")
@MapBinder(AddFirewallRuleToNicRequestBinder.class)
@XMLResponseParser(FirewallResponseHandler.class)
Firewall addFirewallRuleToNic(@PayloadParam("firewall") Firewall.Request.AddRulePayload payload);
@POST
@Named("firewall:removerule")
@MapBinder(RemoveFirewallRuleRequestBinder.class)
@Fallback(Fallbacks.FalseOnNotFoundOr404.class)
boolean removeFirewallRules(@PayloadParam("ids") List<String> firewallRuleIds);
@POST
@Named("firewall:activate")
@MapBinder(ActivateFirewallRequestBinder.class)
@Fallback(Fallbacks.FalseOnNotFoundOr404.class)
boolean activateFirewall(@PayloadParam("ids") List<String> firewallIds);
@POST
@Named("firewall:activate")
@MapBinder(DeactivateFirewallRequestBinder.class)
@Fallback(Fallbacks.FalseOnNotFoundOr404.class)
boolean deactivateFirewall(@PayloadParam("ids") List<String> firewallIds);
@POST
@Named("firewall:activate")
@MapBinder(DeleteFirewallRequestBinder.class)
@Fallback(Fallbacks.FalseOnNotFoundOr404.class)
boolean deleteFirewall(@PayloadParam("ids") List<String> firewallIds);
}
| 1,318 |
2,542 |
<gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include "TestClientFactory.h"
#include "Naming/ComCallbackWaiter.h"
#include "api/wrappers/ApiWrappers.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
namespace ClientTest
{
using namespace Common;
using namespace Client;
using namespace std;
using namespace ServiceModel;
using namespace Api;
using namespace Naming;
const StringLiteral ComFabricClientTestSource = "ComFabricClientTest";
class ComFabricClientTest
{
protected:
~ComFabricClientTest() { BOOST_REQUIRE(MethodCleanup()); }
TEST_METHOD_CLEANUP( MethodCleanup );
};
//
// This test verifies that the comfabricclient reference count is properly maintained
// across begin and end calls.
//
BOOST_FIXTURE_TEST_SUITE(ComFabricClientTestSuite,ComFabricClientTest)
BOOST_AUTO_TEST_CASE(RefCountTest)
{
FabricNodeConfigSPtr config = make_shared<FabricNodeConfig>();
IClientFactoryPtr factoryPtr;
DWORD timeout = 2000;
auto error = TestClientFactory::CreateLocalClientFactory(config, factoryPtr);
VERIFY_IS_TRUE(error.IsSuccess());
ComPointer<ComFabricClient> comClient;
auto hr = ComFabricClient::CreateComFabricClient(factoryPtr, comClient);
VERIFY_IS_TRUE(!FAILED(hr));
ComPointer<ComCallbackWaiter> callbackWaiter = make_com<ComCallbackWaiter>();
ComPointer<IFabricAsyncOperationContext> asyncOperation;
comClient->BeginResolveServicePartition(
L"fabric:/System",
FABRIC_PARTITION_KEY_TYPE_STRING,
(void*)L"dummy",
nullptr,
timeout, // wait for 2 seconds
callbackWaiter.GetRawPointer(), // callback
asyncOperation.InitializationAddress()); // context
// set the com pointer to null before the callback fires(2 seconds)
comClient.Release();
// wait for 10 seconds for the callback to finish successfully.
bool callbackSuccessful = callbackWaiter->WaitOne(timeout + 8000);
VERIFY_IS_TRUE(callbackSuccessful);
}
BOOST_AUTO_TEST_SUITE_END()
bool ComFabricClientTest::MethodCleanup()
{
ClientConfig::GetConfig().Test_Reset();
return true;
}
}
| 961 |
3,579 |
<gh_stars>1000+
package com.querydsl.jpa.domain.sql;
import static com.querydsl.core.types.PathMetadataFactory.forVariable;
import javax.annotation.Generated;
import com.querydsl.core.types.Path;
import com.querydsl.core.types.PathMetadata;
import com.querydsl.core.types.dsl.NumberPath;
import com.querydsl.core.types.dsl.StringPath;
import com.querydsl.sql.ColumnMetadata;
/**
* SUser is a Querydsl query type for SUser
*/
@Generated("com.querydsl.sql.codegen.MetaDataSerializer")
public class SUser extends com.querydsl.sql.RelationalPathBase<SUser> {
private static final long serialVersionUID = -109124701;
public static final SUser user_ = new SUser("user_");
public final NumberPath<Integer> companyId = createNumber("companyId", Integer.class);
public final StringPath firstName = createString("firstName");
public final NumberPath<Long> id = createNumber("id", Long.class);
public final StringPath lastName = createString("lastName");
public final StringPath userName = createString("userName");
public final com.querydsl.sql.PrimaryKey<SUser> primary = createPrimaryKey(id);
public final com.querydsl.sql.ForeignKey<SCompany> fk6a68df4dc953998 = createForeignKey(companyId, "id");
public final com.querydsl.sql.ForeignKey<SEmployee> _fk9d39ef712743b59c = createInvForeignKey(id, "user_id");
public SUser(String variable) {
super(SUser.class, forVariable(variable), "", "user_");
addMetadata();
}
public SUser(String variable, String schema, String table) {
super(SUser.class, forVariable(variable), schema, table);
addMetadata();
}
public SUser(Path<? extends SUser> path) {
super(path.getType(), path.getMetadata(), "", "user_");
addMetadata();
}
public SUser(PathMetadata metadata) {
super(SUser.class, metadata, "", "user_");
addMetadata();
}
public void addMetadata() {
addMetadata(companyId, ColumnMetadata.named("company_id").withIndex(5).ofType(4).withSize(10));
addMetadata(firstName, ColumnMetadata.named("firstName").withIndex(2).ofType(12).withSize(255));
addMetadata(id, ColumnMetadata.named("id").withIndex(1).ofType(-5).withSize(19).notNull());
addMetadata(lastName, ColumnMetadata.named("lastName").withIndex(3).ofType(12).withSize(255));
addMetadata(userName, ColumnMetadata.named("userName").withIndex(4).ofType(12).withSize(255));
}
}
| 900 |
308 |
<filename>syntaxnet/syntaxnet/morphology_label_set.cc
/* Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "syntaxnet/morphology_label_set.h"
namespace syntaxnet {
const char MorphologyLabelSet::kSeparator[] = "\t";
int MorphologyLabelSet::Add(const TokenMorphology &morph) {
string repr = StringForMatch(morph);
auto it = fast_lookup_.find(repr);
if (it != fast_lookup_.end()) return it->second;
fast_lookup_[repr] = label_set_.size();
label_set_.push_back(morph);
return label_set_.size() - 1;
}
// Look up an existing TokenMorphology. If it is not present, return -1.
int MorphologyLabelSet::LookupExisting(const TokenMorphology &morph) const {
string repr = StringForMatch(morph);
auto it = fast_lookup_.find(repr);
if (it != fast_lookup_.end()) return it->second;
return -1;
}
// Return the TokenMorphology at position i. The input i should be in the range
// 0..size().
const TokenMorphology &MorphologyLabelSet::Lookup(int i) const {
CHECK_GE(i, 0);
CHECK_LT(i, label_set_.size());
return label_set_[i];
}
void MorphologyLabelSet::Read(const string &filename) {
ProtoRecordReader reader(filename);
Read(&reader);
}
void MorphologyLabelSet::Read(ProtoRecordReader *reader) {
TokenMorphology morph;
while (reader->Read(&morph).ok()) {
CHECK_EQ(-1, LookupExisting(morph));
Add(morph);
}
}
void MorphologyLabelSet::Write(const string &filename) const {
ProtoRecordWriter writer(filename);
Write(&writer);
}
void MorphologyLabelSet::Write(ProtoRecordWriter *writer) const {
for (const TokenMorphology &morph : label_set_) {
writer->Write(morph);
}
}
string MorphologyLabelSet::StringForMatch(const TokenMorphology &morph) const {
vector<string> attributes;
for (const auto &a : morph.attribute()) {
attributes.push_back(
tensorflow::strings::StrCat(a.name(), kSeparator, a.value()));
}
std::sort(attributes.begin(), attributes.end());
return utils::Join(attributes, kSeparator);
}
string FullLabelFeatureType::GetFeatureValueName(FeatureValue value) const {
const TokenMorphology &morph = label_set_->Lookup(value);
vector<string> attributes;
for (const auto &a : morph.attribute()) {
attributes.push_back(tensorflow::strings::StrCat(a.name(), ":", a.value()));
}
std::sort(attributes.begin(), attributes.end());
return utils::Join(attributes, ",");
}
} // namespace syntaxnet
| 948 |
2,542 |
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace ServiceModel
{
namespace ModelV2
{
class VolumeQueryResult
: public Serialization::FabricSerializable
, public Common::IFabricJsonSerializable
, public Common::ISizeEstimator
, public IPageContinuationToken
{
DENY_COPY(VolumeQueryResult)
public:
VolumeQueryResult() = default;
VolumeQueryResult(VolumeQueryResult &&) = default;
VolumeQueryResult& operator=(VolumeQueryResult &&) = default;
VolumeQueryResult(Management::ClusterManager::VolumeDescriptionSPtr const & volumeDescription);
std::wstring CreateContinuationToken() const override;
__declspec(property(get=get_VolumeName)) std::wstring const & VolumeName;
std::wstring const & get_VolumeName() const { return volumeName_; }
__declspec(property(get=get_VolumeDescription)) Management::ClusterManager::VolumeDescriptionSPtr const & VolumeDescription;
Management::ClusterManager::VolumeDescriptionSPtr const & get_VolumeDescription() const { return volumeDescription_; }
FABRIC_FIELDS_02(volumeName_, volumeDescription_)
BEGIN_JSON_SERIALIZABLE_PROPERTIES()
SERIALIZABLE_PROPERTY(Constants::nameCamelCase, volumeName_)
SERIALIZABLE_PROPERTY(Constants::properties, volumeDescription_)
END_JSON_SERIALIZABLE_PROPERTIES()
BEGIN_DYNAMIC_SIZE_ESTIMATION()
DYNAMIC_SIZE_ESTIMATION_MEMBER(volumeName_)
DYNAMIC_SIZE_ESTIMATION_MEMBER(volumeDescription_)
END_DYNAMIC_SIZE_ESTIMATION()
private:
std::wstring volumeName_;
Management::ClusterManager::VolumeDescriptionSPtr volumeDescription_;
};
QUERY_JSON_LIST(VolumeQueryResultList, VolumeQueryResult)
}
}
| 848 |
1,063 |
<reponame>doc-E-brown/botocore
[
{
"category": "``ssm``",
"description": "Update ssm client to latest version",
"type": "feature"
},
{
"category": "user-agent",
"description": "Default user agent now includes the environment variable $AWS_EXECUTION_ENVIRONMENT",
"type": "feature"
},
{
"category": "Python 3.6",
"description": "Adds compatibility with the new Python 3.6 HTTPConnection.",
"type": "bugfix"
},
{
"category": "sigv4",
"description": "Do not sign x-amzn-trace-id as it can be mutated along the way.",
"type": "bugfix"
},
{
"category": "``cognito-idp``",
"description": "Update cognito-idp client to latest version",
"type": "feature"
}
]
| 295 |
521 |
<filename>third_party/virtualbox/src/VBox/HostServices/SharedOpenGL/unpacker/unpack_clipplane.cpp
/* Copyright (c) 2001, Stanford University
* All rights reserved
*
* See the file LICENSE.txt for information on redistributing this software.
*/
#include "unpacker.h"
#include "cr_mem.h"
#include "unpack_extend.h"
void crUnpackClipPlane(PCrUnpackerState pState)
{
GLdouble equation[4];
CHECK_BUFFER_SIZE_STATIC(pState, 4 + sizeof(equation));
GLenum plane = READ_DATA(pState, 0, GLenum );
crMemcpy( equation, DATA_POINTER(pState, 4, GLdouble ), sizeof(equation) );
pState->pDispatchTbl->ClipPlane( plane, equation );
INCR_DATA_PTR(pState, sizeof( GLenum ) + 4*sizeof( GLdouble ));
}
| 255 |
3,200 |
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_TEXT_DATA_UTILS_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_TEXT_DATA_UTILS_H_
#include <memory>
#include <string>
#include <vector>
#include "minddata/dataset/util/status.h"
#include "minddata/dataset/include/dataset/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/tensor_shape.h"
#include "minddata/dataset/core/tensor_row.h"
namespace mindspore {
namespace dataset {
/// \brief Helper method that perform sliding window on input tensor.
/// \param[in] input - Input tensor.
/// \param[in] out_shape - Output shape of output tensor.
/// \param[in] width - The axis along which sliding window is computed.
/// \param[in] axis - The width of the window.
/// \param[out] output - Output tensor
/// \return Status return code
Status SlidingWindowHelper(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, TensorShape out_shape,
uint32_t width, int32_t axis);
/// \brief Helper method that append offsets tensor to output TensorRow.
/// \param[in] offsets_start - Offsets start index vector.
/// \param[in] offsets_limit - Offsets length vector.
/// \param[out] output - Output TensorRow
/// \return Status return code
Status AppendOffsetsHelper(const std::vector<uint32_t> &offsets_start, const std::vector<uint32_t> &offsets_limit,
TensorRow *output);
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_TEXT_DATA_UTILS_H_
| 763 |
3,066 |
/*
* Licensed to Crate.io GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.expression.scalar.string;
import io.crate.data.Input;
import io.crate.expression.scalar.ScalarFunctionModule;
import io.crate.metadata.NodeContext;
import io.crate.metadata.Scalar;
import io.crate.metadata.TransactionContext;
import io.crate.metadata.functions.Signature;
import io.crate.types.DataTypes;
import java.util.Locale;
import java.util.function.BiFunction;
public class StringLeftRightFunction extends Scalar<String, Object> {
public static void register(ScalarFunctionModule module) {
module.register(
Signature.scalar(
"left",
DataTypes.STRING.getTypeSignature(),
DataTypes.INTEGER.getTypeSignature(),
DataTypes.STRING.getTypeSignature()
),
(signature, boundSignature) ->
new StringLeftRightFunction(signature, boundSignature, StringLeftRightFunction::left)
);
module.register(
Signature.scalar(
"right",
DataTypes.STRING.getTypeSignature(),
DataTypes.INTEGER.getTypeSignature(),
DataTypes.STRING.getTypeSignature()
),
(signature, boundSignature) ->
new StringLeftRightFunction(signature, boundSignature, StringLeftRightFunction::right)
);
}
private final Signature signature;
private final Signature boundSignature;
private final BiFunction<String, Integer, String> func;
private StringLeftRightFunction(Signature signature,
Signature boundSignature,
BiFunction<String, Integer, String> func) {
this.signature = signature;
this.boundSignature = boundSignature;
this.func = func;
}
@Override
public Signature signature() {
return signature;
}
@Override
public Signature boundSignature() {
return boundSignature;
}
@Override
public String evaluate(TransactionContext txnCtx, NodeContext nodeCtx, Input[] args) {
assert args.length == 2 : String.format(Locale.ENGLISH,
"number of arguments must be 2, got %d instead",
args.length);
String str = (String) args[0].value();
Number len = (Number) args[1].value();
if (str == null || len == null) {
return null;
}
return len.intValue() == 0 || str.isEmpty() ? "" : func.apply(str, len.intValue());
}
private static String left(String str, int len) {
if (len > 0) {
return str.substring(0, Math.min(len, str.length()));
}
final int finalLen = str.length() + len;
return finalLen > 0 ? str.substring(0, finalLen) : "";
}
private static String right(String str, int len) {
if (len < 0) {
return str.substring(Math.min(-len, str.length()));
}
final int finalLen = str.length() - len;
return finalLen <= 0 ? str : str.substring(finalLen);
}
}
| 1,623 |
852 |
<reponame>malbouis/cmssw
#ifndef DataFormatsHcalCalibObjectsHcalIsoTrkCalibVariables_h
#define DataFormatsHcalCalibObjectsHcalIsoTrkCalibVariables_h
#include <string>
#include <vector>
class HcalIsoTrkCalibVariables {
public:
HcalIsoTrkCalibVariables() { clear(); }
void clear() {
eventWeight_ = rhoh_ = 0;
nVtx_ = goodPV_ = nTrk_ = 0;
trgbits_.clear();
mindR1_ = l1pt_ = l1eta_ = l1phi_ = 0;
mindR2_ = l3pt_ = l3eta_ = l3phi_ = 0;
p_ = pt_ = phi_ = gentrackP_ = 0;
ieta_ = iphi_ = 0;
eMipDR_.clear();
eHcal_ = eHcal10_ = eHcal30_ = 0;
eHcalRaw_ = eHcal10Raw_ = eHcal30Raw_ = 0;
eHcalAux_ = eHcal10Aux_ = eHcal30Aux_ = 0;
emaxNearP_ = eAnnular_ = hmaxNearP_ = hAnnular_ = 0;
selectTk_ = qltyFlag_ = qltyMissFlag_ = qltyPVFlag_ = false;
detIds_.clear();
hitEnergies_.clear();
hitEnergiesRaw_.clear();
hitEnergiesAux_.clear();
detIds1_.clear();
hitEnergies1_.clear();
hitEnergies1Raw_.clear();
hitEnergies1Aux_.clear();
detIds3_.clear();
hitEnergies3_.clear();
hitEnergies3Raw_.clear();
hitEnergies3Aux_.clear();
};
double eventWeight_, rhoh_;
int goodPV_, nVtx_, nTrk_;
std::vector<bool> trgbits_;
double mindR1_, l1pt_, l1eta_, l1phi_;
double mindR2_, l3pt_, l3eta_, l3phi_;
double p_, pt_, phi_, gentrackP_;
int ieta_, iphi_;
std::vector<double> eMipDR_;
double eHcal_, eHcal10_, eHcal30_;
double eHcalRaw_, eHcal10Raw_, eHcal30Raw_;
double eHcalAux_, eHcal10Aux_, eHcal30Aux_;
double emaxNearP_, eAnnular_, hmaxNearP_, hAnnular_;
bool selectTk_, qltyFlag_, qltyMissFlag_, qltyPVFlag_;
std::vector<unsigned int> detIds_, detIds1_, detIds3_;
std::vector<double> hitEnergies_, hitEnergies1_, hitEnergies3_;
std::vector<double> hitEnergiesRaw_, hitEnergies1Raw_, hitEnergies3Raw_;
std::vector<double> hitEnergiesAux_, hitEnergies1Aux_, hitEnergies3Aux_;
};
typedef std::vector<HcalIsoTrkCalibVariables> HcalIsoTrkCalibVariablesCollection;
#endif
| 958 |
14,425 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.ipc;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.thirdparty.protobuf.ServiceException;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class RPCUtil {
/**
* Returns an instance of {@link YarnException}
*/
public static YarnException getRemoteException(Throwable t) {
return new YarnException(t);
}
/**
* Returns an instance of {@link YarnException}
*/
public static YarnException getRemoteException(String message) {
return new YarnException(message);
}
private static <T extends Throwable> T instantiateException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
try {
Constructor<? extends T> cn = cls.getConstructor(String.class);
cn.setAccessible(true);
T ex = cn.newInstance(re.getMessage());
ex.initCause(re);
return ex;
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} catch (NoSuchMethodException e) {
throw re;
} catch (IllegalArgumentException e) {
throw re;
} catch (SecurityException e) {
throw re;
} catch (InstantiationException e) {
throw re;
} catch (IllegalAccessException e) {
throw re;
} catch (InvocationTargetException e) {
throw re;
}
}
private static <T extends YarnException> T instantiateYarnException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
private static <T extends IOException> T instantiateIOException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
private static <T extends RuntimeException> T instantiateRuntimeException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
/**
* Utility method that unwraps and returns appropriate exceptions.
*
* @param se
* ServiceException
* @return An instance of the actual exception, which will be a subclass of
* {@link YarnException} or {@link IOException}
*/
public static Void unwrapAndThrowException(ServiceException se)
throws IOException, YarnException {
Throwable cause = se.getCause();
if (cause == null) {
// SE generated by the RPC layer itself.
throw new IOException(se);
} else {
if (cause instanceof RemoteException) {
RemoteException re = (RemoteException) cause;
Class<?> realClass = null;
try {
realClass = Class.forName(re.getClassName());
} catch (ClassNotFoundException cnf) {
// Assume this to be a new exception type added to YARN. This isn't
// absolutely correct since the RPC layer could add an exception as
// well.
throw instantiateYarnException(YarnException.class, re);
}
if (YarnException.class.isAssignableFrom(realClass)) {
throw instantiateYarnException(
realClass.asSubclass(YarnException.class), re);
} else if (IOException.class.isAssignableFrom(realClass)) {
throw instantiateIOException(realClass.asSubclass(IOException.class),
re);
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
throw instantiateRuntimeException(
realClass.asSubclass(RuntimeException.class), re);
} else {
throw re;
}
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} else if (cause instanceof IOException) {
// RPC Client exception.
throw (IOException) cause;
} else if (cause instanceof RuntimeException) {
// RPC RuntimeException
throw (RuntimeException) cause;
} else {
// Should not be generated.
throw new IOException(se);
}
}
}
}
| 1,727 |
572 |
<reponame>CyberFlameGO/examples
"""Create an executable with runfiles.
Runfiles are files that are needed at runtime (when the executable in run).
This example also shows a use of `ctx.expand_location`.
"""
def _impl(ctx):
# Expand the label in the command string to a runfiles-relative path.
# The second arg is the list of labels that may be expanded.
command = ctx.expand_location(ctx.attr.command, ctx.attr.data)
# Create the output executable file with command as its content.
ctx.actions.write(
output = ctx.outputs.executable,
content = command,
is_executable = True,
)
# Create runfiles from the files specified in the data attribute.
# The shell executable - the output of this rule - can use them at
# runtime.
return [DefaultInfo(
runfiles = ctx.runfiles(files = ctx.files.data),
)]
execute = rule(
implementation = _impl,
executable = True,
attrs = {
"command": attr.string(),
"data": attr.label_list(allow_files = True),
},
)
| 371 |
350 |
import os
import pytest
@pytest.fixture
def travis():
return 'TRAVIS' in os.environ.keys()
@pytest.fixture
def appveyor():
return 'APPVEYOR' in os.environ.keys()
@pytest.fixture
def testdir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
@pytest.fixture
def testdata(testdir):
out = {
's1': os.path.join(testdir, 'S1A_IW_GRDH_1SDV_20150222T170750_20150222T170815_004739_005DD8_3768.zip'),
's1_2': os.path.join(testdir, 'S1A_IW_GRDH_1SDV_20150222T170725_20150222T170750_004739_005DD8_CEAB.zip'),
's1_3': os.path.join(testdir, 'S1A_IW_GRDH_1SDV_20150203T043109_20150203T043134_004454_00574F_6D00.zip'),
's1_4': os.path.join(testdir, 'S1A_IW_GRDH_1SDV_20150203T043109_20150203T043134_004454_00574F_FEC3.zip'),
's1_orbit': os.path.join(testdir, 'S1A_IW_GRDH_1SDV_20210119T031653_20210119T031718_036201_043ED0_8255.zip'),
# ftp://ftp.eorc.jaxa.jp/pub/ALOS-2/1501sample/310_forestbrazil/0000022708_001001_ALOS2015976960-140909.zip
'psr2': os.path.join(testdir, '0000022708_001001_ALOS2015976960-140909.zip'),
'tif': os.path.join(testdir, 'S1A__IW___A_20150309T173017_VV_grd_mli_geo_norm_db.tif'),
'archive_old': os.path.join(testdir, 'archive_outdated.csv'),
'dempar': os.path.join(testdir, 'dem.par'),
'mlipar': os.path.join(testdir, 'mli.par')
}
return out
@pytest.fixture
def auxdata_dem_cases():
cases = [('AW3D30', ['N050E010/N051E011.tar.gz']),
('SRTM 1Sec HGT', ['N51E011.SRTMGL1.hgt.zip']),
('SRTM 3Sec', ['srtm_39_02.zip']),
('TDX90m', ['90mdem/DEM/N51/E010/TDM1_DEM__30_N51E011.zip'])]
return cases
| 895 |
1,283 |
/*
* CrossSignal.cpp
*
* Created on: 2015年2月20日
* Author: fasiondog
*/
#include "../../../indicator/crt/KDATA.h"
#include "CrossSignal.h"
namespace hku {
CrossSignal::CrossSignal() : SignalBase("SG_Cross") {
setParam<string>("kpart", "CLOSE");
}
CrossSignal::CrossSignal(const Indicator& fast, const Indicator& slow, const string& kpart)
: SignalBase("SG_Cross"), m_fast(fast), m_slow(slow) {
setParam<string>("kpart", kpart);
}
CrossSignal::~CrossSignal() {}
SignalPtr CrossSignal::_clone() {
CrossSignal* p = new CrossSignal();
p->m_fast = m_fast;
p->m_slow = m_slow;
return SignalPtr(p);
}
void CrossSignal::_calculate() {
string kpart = getParam<string>("kpart");
Indicator kdata = KDATA_PART(m_kdata, kpart);
Indicator fast = m_fast(kdata);
Indicator slow = m_slow(kdata);
HKU_ERROR_IF_RETURN(fast.size() != slow.size(), void(), "fast.size() != slow.size()");
size_t discard = fast.discard() > slow.discard() ? fast.discard() : slow.discard();
size_t total = fast.size();
for (size_t i = discard + 1; i < total; ++i) {
if (fast[i - 1] < slow[i - 1] && fast[i] > slow[i]) {
_addBuySignal(m_kdata[i].datetime);
} else if (fast[i - 1] > slow[i - 1] && fast[i] < slow[i]) {
_addSellSignal(m_kdata[i].datetime);
}
}
}
SignalPtr HKU_API SG_Cross(const Indicator& fast, const Indicator& slow, const string& kpart) {
return SignalPtr(new CrossSignal(fast, slow, kpart));
}
} /* namespace hku */
| 652 |
841 |
<reponame>brunolmfg/resteasy
package org.jboss.resteasy.test.exception.resource;
import jakarta.ws.rs.core.Response;
import jakarta.ws.rs.ext.ExceptionMapper;
import jakarta.ws.rs.ext.Provider;
@Provider
public class ExceptionMapperRuntimeExceptionMapper implements ExceptionMapper<RuntimeException> {
@Override
public Response toResponse(RuntimeException exception) {
return Response.status(Response.Status.NOT_ACCEPTABLE).build();
}
}
| 147 |
852 |
// -*- C++ -*-
//
// Package: PluginManager
// Class : TwoDummy
//
// Implementation:
// <Notes on implementation>
//
// Original Author: <NAME>
// Created: Fri Apr 6 15:32:53 EDT 2007
//
// system include files
// user include files
#include "FWCore/PluginManager/test/DummyFactory.h"
namespace testedmplugin {
struct DummyTwo : public DummyBase {
int value() const { return 2; }
};
} // namespace testedmplugin
DEFINE_EDM_PLUGIN(testedmplugin::DummyFactory, testedmplugin::DummyTwo, "DummyTwo");
| 195 |
3,326 |
<gh_stars>1000+
import pytest
import cirq
from cirq.testing import assert_equivalent_op_tree
from cirq.devices.noise_properties import (
NoiseProperties,
NoiseModelFromNoiseProperties,
get_duration_ns,
)
import numpy as np
def test_invalid_arguments():
with pytest.raises(ValueError, match='At least one metric must be specified'):
NoiseProperties()
with pytest.raises(ValueError, match='xeb, pauli error, p00, and p11 must be between 0 and 1'):
NoiseProperties(p00=1.2)
with pytest.raises(ValueError, match='xeb, pauli error, p00, and p11 must be between 0 and 1'):
NoiseProperties(pauli_error=-0.2)
with pytest.raises(
ValueError,
match='Only one of xeb fidelity, pauli error, or decay constant should be defined',
):
NoiseProperties(pauli_error=0.2, xeb_fidelity=0.5)
with pytest.raises(ValueError, match='A NoiseProperties object must be specified'):
NoiseModelFromNoiseProperties(None)
def test_constructor_and_metrics():
prop = NoiseProperties(p00=0.2)
assert prop.xeb is None
assert prop.pauli_error is None
assert prop.decay_constant is None
assert prop.average_error() is None
# These and other metrics in the file are purely for testing and
# do not necessarily represent actual hardware behavior
xeb_fidelity = 0.95
p00 = 0.1
t1_ns = 200.0
# Create fidelity object with a defined XEB fidelity
from_xeb = NoiseProperties(xeb_fidelity=xeb_fidelity, p00=p00, t1_ns=t1_ns)
assert from_xeb.p00 == p00
assert from_xeb.p11 is None
assert from_xeb.t1_ns == t1_ns
assert from_xeb.xeb == xeb_fidelity
# Create another fidelity object with the decay constant from the first one
decay_constant_from_xeb = from_xeb.decay_constant
from_decay = NoiseProperties(decay_constant=decay_constant_from_xeb)
# Check that their depolarization metrics match
assert np.isclose(xeb_fidelity, from_decay.xeb)
assert np.isclose(from_xeb.pauli_error, from_decay.pauli_error)
assert np.isclose(from_xeb.average_error(), from_decay.average_error())
def test_gate_durations():
assert get_duration_ns(cirq.X) == 25.0
assert get_duration_ns(cirq.FSimGate(3 * np.pi / 2, np.pi / 6)) == 12.0
assert get_duration_ns(cirq.FSimGate(3 * np.pi / 4, np.pi / 6)) == 32.0
assert get_duration_ns(cirq.ISWAP) == 32.0
assert get_duration_ns(cirq.ZPowGate(exponent=5)) == 0.0
assert get_duration_ns(cirq.MeasurementGate(1, 'a')) == 4000.0
wait_gate = cirq.WaitGate(cirq.Duration(nanos=4))
assert get_duration_ns(wait_gate) == 4.0
assert get_duration_ns(cirq.CZ) == 25.0
def test_readout_error():
p00 = 0.05
p11 = 0.1
p = p11 / (p00 + p11)
gamma = p11 / p
# Create qubits and circuit
qubits = [cirq.LineQubit(0), cirq.LineQubit(1)]
circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
# Create noise model from NoiseProperties object with specified noise
prop = NoiseProperties(p00=p00, p11=p11)
noise_model = NoiseModelFromNoiseProperties(prop)
noisy_circuit = cirq.Circuit(noise_model.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.GeneralizedAmplitudeDampingChannel(p=p, gamma=gamma).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
assert_equivalent_op_tree(expected_circuit, noisy_circuit)
# Create Noise Model with just p00
prop_p00 = NoiseProperties(p00=p00)
noise_model_p00 = NoiseModelFromNoiseProperties(prop_p00)
noisy_circuit_p00 = cirq.Circuit(noise_model_p00.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit_p00 = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.GeneralizedAmplitudeDampingChannel(p=0.0, gamma=p00).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
assert_equivalent_op_tree(expected_circuit_p00, noisy_circuit_p00)
# Create Noise Model with just p11
prop_p11 = NoiseProperties(p11=p11)
noise_model_p11 = NoiseModelFromNoiseProperties(prop_p11)
noisy_circuit_p11 = cirq.Circuit(noise_model_p11.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit_p11 = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.GeneralizedAmplitudeDampingChannel(p=1.0, gamma=p11).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
assert_equivalent_op_tree(expected_circuit_p11, noisy_circuit_p11)
def test_depolarization_error():
# Account for floating point errors
# Needs Cirq issue 3965 to be resolved
pauli_error = 0.09999999999999998
# Create qubits and circuit
qubits = [cirq.LineQubit(0), cirq.LineQubit(1)]
circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
# Create noise model from NoiseProperties object with specified noise
prop = NoiseProperties(pauli_error=pauli_error)
noise_model = NoiseModelFromNoiseProperties(prop)
noisy_circuit = cirq.Circuit(noise_model.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.depolarize(pauli_error / 3).on_each(qubits)]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.depolarize(pauli_error / 3).on_each(qubits)]),
cirq.Moment([cirq.H(qubits[1])]),
cirq.Moment([cirq.depolarize(pauli_error / 3).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
cirq.Moment([cirq.depolarize(pauli_error / 3).on_each(qubits)]),
)
assert_equivalent_op_tree(expected_circuit, noisy_circuit)
def test_ampl_damping_error():
t1_ns = 200.0
# Create qubits and circuit
qubits = [cirq.LineQubit(0), cirq.LineQubit(1)]
circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.FSimGate(5 * np.pi / 2, np.pi).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
# Create noise model from NoiseProperties object with specified noise
prop = NoiseProperties(t1_ns=t1_ns)
noise_model = NoiseModelFromNoiseProperties(prop)
noisy_circuit = cirq.Circuit(noise_model.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-25.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-25.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.FSimGate(np.pi / 2, np.pi).on_each(qubits)]),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-12.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-4000.0 / t1_ns)).on_each(qubits)]),
)
assert_equivalent_op_tree(expected_circuit, noisy_circuit)
def test_combined_error():
# Helper function to calculate pauli error from depolarization
def pauli_error_from_depolarization(pauli_error, t1_ns, duration):
t2 = 2 * t1_ns
pauli_error_from_t1 = (1 - np.exp(-duration / t2)) / 2 + (1 - np.exp(-duration / t1_ns)) / 4
if pauli_error >= pauli_error_from_t1:
return pauli_error - pauli_error_from_t1
return pauli_error
t1_ns = 2000.0
p11 = 0.01
# Account for floating point errors
# Needs Cirq issue 3965 to be resolved
pauli_error = 0.019999999999999962
# Create qubits and circuit
qubits = [cirq.LineQubit(0), cirq.LineQubit(1)]
circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment([cirq.measure(qubits[0], key='q0')]),
cirq.Moment([cirq.ISwapPowGate().on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
)
# Create noise model from NoiseProperties object with specified noise
prop = NoiseProperties(t1_ns=t1_ns, p11=p11, pauli_error=pauli_error)
noise_model = NoiseModelFromNoiseProperties(prop)
with pytest.warns(
RuntimeWarning, match='Pauli error from T1 decay is greater than total Pauli error'
):
noisy_circuit = cirq.Circuit(noise_model.noisy_moments(circuit, qubits))
# Insert expected channels to circuit
expected_circuit = cirq.Circuit(
cirq.Moment([cirq.X(qubits[0])]),
cirq.Moment(
[
cirq.depolarize(
pauli_error_from_depolarization(pauli_error, t1_ns, 25.0) / 3
).on_each(qubits)
]
),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-25.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.CNOT(qubits[0], qubits[1])]),
cirq.Moment(
[
cirq.depolarize(
pauli_error_from_depolarization(pauli_error, t1_ns, 25.0) / 3
).on_each(qubits)
]
),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-25.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.GeneralizedAmplitudeDampingChannel(p=1.0, gamma=p11).on(qubits[0])]),
cirq.Moment([cirq.measure(qubits[0], key='q0')]),
cirq.Moment(
[
cirq.depolarize(
pauli_error_from_depolarization(pauli_error, t1_ns, 4000.0) / 3
).on_each(qubits)
]
),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-4000.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.ISwapPowGate().on_each(qubits)]),
cirq.Moment(
[
cirq.depolarize(
pauli_error_from_depolarization(pauli_error, t1_ns, 32.0) / 3
).on_each(qubits)
]
),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-32.0 / t1_ns)).on_each(qubits)]),
cirq.Moment([cirq.GeneralizedAmplitudeDampingChannel(p=1.0, gamma=p11).on_each(qubits)]),
cirq.Moment([cirq.measure(qubits[0], key='q0'), cirq.measure(qubits[1], key='q1')]),
cirq.Moment(
[
cirq.depolarize(
pauli_error_from_depolarization(pauli_error, t1_ns, 4000.0) / 3
).on_each(qubits)
]
),
cirq.Moment([cirq.amplitude_damp(1 - np.exp(-4000.0 / t1_ns)).on_each(qubits)]),
)
assert_equivalent_op_tree(expected_circuit, noisy_circuit)
| 5,583 |
310 |
import torch
from const import *
def word2idx(sents, word2idx):
results = []
for t1, t2 in sents:
t1 = [word2idx[w] if w in word2idx else UNK for w in t1]
t2 = [word2idx[w] if w in word2idx else UNK for w in t2]
results.append([t1, t2])
return results
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[PAD]: PAD,
WORD[UNK]: UNK,
WORD[SEP]: SEP,
WORD[CLS]: CLS,
WORD[MASK]: MASK
}
self.idx = len(self.word2idx)
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count=5):
words = [word for sent in sents for word in sent[0] + sent[1]]
word_count = {w: 0 for w in set(words)}
for w in words:
word_count[w] += 1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
class Corpus(object):
def __init__(self, save_data="data/corpus.pt", max_len=128):
self.train = "data/fuel.cnn"
self.save_data = save_data
self.word = Dictionary()
self.max_len = max_len
def parse_data(self, _file):
sents = []
for sentence in open(_file):
t1, t2 = sentence.strip().split(SPLIT_CODE)
words1 = t1.strip().split()
words2 = t2.strip().split()
sents.append([words1, words2])
print(f"ignored word count: {self.word(sents)}")
self.sents = sents
def save(self):
self.parse_data(self.train)
data = {
'max_len': self.max_len,
'dict': self.word.word2idx,
'word': word2idx(self.sents, self.word.word2idx),
}
torch.save(data, self.save_data)
print(f'Finish dumping the data to file - {self.save_data}')
print(f'words length - {len(self.word)}')
if __name__ == "__main__":
corpus = Corpus()
corpus.save()
| 1,150 |
3,459 |
#ifdef SCPU_CPP
uint8 sCPU::pio() { return status.pio; }
bool sCPU::joylatch() { return status.joypad_strobe_latch; }
//WMDATA
uint8 sCPU::mmio_r2180() {
uint8 r = bus.read(0x7e0000 | status.wram_addr);
status.wram_addr = (status.wram_addr + 1) & 0x01ffff;
return r;
}
//WMDATA
void sCPU::mmio_w2180(uint8 data) {
bus.write(0x7e0000 | status.wram_addr, data);
status.wram_addr = (status.wram_addr + 1) & 0x01ffff;
}
//WMADDL
void sCPU::mmio_w2181(uint8 data) {
status.wram_addr = (status.wram_addr & 0xffff00) | (data);
status.wram_addr &= 0x01ffff;
}
//WMADDM
void sCPU::mmio_w2182(uint8 data) {
status.wram_addr = (status.wram_addr & 0xff00ff) | (data << 8);
status.wram_addr &= 0x01ffff;
}
//WMADDH
void sCPU::mmio_w2183(uint8 data) {
status.wram_addr = (status.wram_addr & 0x00ffff) | (data << 16);
status.wram_addr &= 0x01ffff;
}
//JOYSER0
//bit 0 is shared between JOYSER0 and JOYSER1, therefore
//strobing $4016.d0 affects both controller port latches.
//$4017 bit 0 writes are ignored.
void sCPU::mmio_w4016(uint8 data) {
status.joypad_strobe_latch = !!(data & 1);
if(status.joypad_strobe_latch == 1) {
input.poll();
}
}
//JOYSER0
//7-2 = MDR
//1-0 = Joypad serial data
//
//TODO: test whether strobe latch of zero returns
//realtime or buffered status of joypadN.b
uint8 sCPU::mmio_r4016() {
uint8 r = regs.mdr & 0xfc;
r |= input.port_read(0) & 3;
//printf("JOYSER0_L2: 0x%02x\n", r & 0x3);
return r;
}
//JOYSER1
//7-5 = MDR
//4-2 = Always 1 (pins are connected to GND)
//1-0 = Joypad serial data
uint8 sCPU::mmio_r4017() {
uint8 r = (regs.mdr & 0xe0) | 0x1c;
r |= input.port_read(1) & 3;
//printf("JOYSER1_L2: 0x%02x\n", r & 0x3);
return r;
}
//NMITIMEN
void sCPU::mmio_w4200(uint8 data) {
status.auto_joypad_poll = !!(data & 0x01);
nmitimen_update(data);
}
//WRIO
void sCPU::mmio_w4201(uint8 data) {
if((status.pio & 0x80) && !(data & 0x80)) {
ppu.latch_counters();
}
status.pio = data;
}
//WRMPYA
void sCPU::mmio_w4202(uint8 data) {
status.mul_a = data;
}
//WRMPYB
void sCPU::mmio_w4203(uint8 data) {
status.mul_b = data;
status.r4216 = status.mul_a * status.mul_b;
status.alu_lock = true;
event.enqueue(config.cpu.alu_mul_delay, EventAluLockRelease);
}
//WRDIVL
void sCPU::mmio_w4204(uint8 data) {
status.div_a = (status.div_a & 0xff00) | (data);
}
//WRDIVH
void sCPU::mmio_w4205(uint8 data) {
status.div_a = (status.div_a & 0x00ff) | (data << 8);
}
//WRDIVB
void sCPU::mmio_w4206(uint8 data) {
status.div_b = data;
status.r4214 = (status.div_b) ? status.div_a / status.div_b : 0xffff;
status.r4216 = (status.div_b) ? status.div_a % status.div_b : status.div_a;
status.alu_lock = true;
event.enqueue(config.cpu.alu_div_delay, EventAluLockRelease);
}
//HTIMEL
void sCPU::mmio_w4207(uint8 data) {
status.hirq_pos = (status.hirq_pos & ~0xff) | (data);
status.hirq_pos &= 0x01ff;
}
//HTIMEH
void sCPU::mmio_w4208(uint8 data) {
status.hirq_pos = (status.hirq_pos & 0xff) | (data << 8);
status.hirq_pos &= 0x01ff;
}
//VTIMEL
void sCPU::mmio_w4209(uint8 data) {
status.virq_pos = (status.virq_pos & ~0xff) | (data);
status.virq_pos &= 0x01ff;
}
//VTIMEH
void sCPU::mmio_w420a(uint8 data) {
status.virq_pos = (status.virq_pos & 0xff) | (data << 8);
status.virq_pos &= 0x01ff;
}
//DMAEN
void sCPU::mmio_w420b(uint8 data) {
for(unsigned i = 0; i < 8; i++) {
channel[i].dma_enabled = data & (1 << i);
}
if(data) status.dma_pending = true;
}
//HDMAEN
void sCPU::mmio_w420c(uint8 data) {
for(unsigned i = 0; i < 8; i++) {
channel[i].hdma_enabled = data & (1 << i);
}
}
//MEMSEL
void sCPU::mmio_w420d(uint8 data) {
status.rom_speed = (data & 1 ? 6 : 8);
}
//RDNMI
//7 = NMI acknowledge
//6-4 = MDR
//3-0 = CPU (5a22) version
uint8 sCPU::mmio_r4210() {
uint8 r = (regs.mdr & 0x70);
r |= (uint8)(rdnmi()) << 7;
r |= (cpu_version & 0x0f);
return r;
}
//TIMEUP
//7 = IRQ acknowledge
//6-0 = MDR
uint8 sCPU::mmio_r4211() {
uint8 r = (regs.mdr & 0x7f);
r |= (uint8)(timeup()) << 7;
return r;
}
//HVBJOY
//7 = VBLANK acknowledge
//6 = HBLANK acknowledge
//5-1 = MDR
//0 = JOYPAD acknowledge
uint8 sCPU::mmio_r4212() {
uint8 r = (regs.mdr & 0x3e);
uint16 vs = ppu.overscan() == false ? 225 : 240;
//auto joypad polling
if(vcounter() >= vs && vcounter() <= (vs + 2))r |= 0x01;
//hblank
if(hcounter() <= 2 || hcounter() >= 1096)r |= 0x40;
//vblank
if(vcounter() >= vs)r |= 0x80;
return r;
}
//RDIO
uint8 sCPU::mmio_r4213() {
return status.pio;
}
//RDDIVL
uint8 sCPU::mmio_r4214() {
if(status.alu_lock) return 0;
return status.r4214;
}
//RDDIVH
uint8 sCPU::mmio_r4215() {
if(status.alu_lock) return 0;
return status.r4214 >> 8;
}
//RDMPYL
uint8 sCPU::mmio_r4216() {
if(status.alu_lock) return 0;
return status.r4216;
}
//RDMPYH
uint8 sCPU::mmio_r4217() {
if(status.alu_lock) return 0;
return status.r4216 >> 8;
}
//TODO: handle reads during joypad polling (v=225-227)
uint8 sCPU::mmio_r4218() { /*printf("read joy1l=0x%02x\n", status.joy1l);*/ return status.joy1l; } //JOY1L
uint8 sCPU::mmio_r4219() { /*printf("read joy1h=0x%02x\n", status.joy1h);*/ return status.joy1h; } //JOY1H
uint8 sCPU::mmio_r421a() { return status.joy2l; } //JOY2L
uint8 sCPU::mmio_r421b() { return status.joy2h; } //JOY2H
uint8 sCPU::mmio_r421c() { return status.joy3l; } //JOY3L
uint8 sCPU::mmio_r421d() { return status.joy3h; } //JOY3H
uint8 sCPU::mmio_r421e() { return status.joy4l; } //JOY4L
uint8 sCPU::mmio_r421f() { return status.joy4h; } //JOY4H
//DMAPx
uint8 sCPU::mmio_r43x0(uint8 i) {
return channel[i].dmap;
}
//BBADx
uint8 sCPU::mmio_r43x1(uint8 i) {
return channel[i].destaddr;
}
//A1TxL
uint8 sCPU::mmio_r43x2(uint8 i) {
return channel[i].srcaddr;
}
//A1TxH
uint8 sCPU::mmio_r43x3(uint8 i) {
return channel[i].srcaddr >> 8;
}
//A1Bx
uint8 sCPU::mmio_r43x4(uint8 i) {
return channel[i].srcbank;
}
//DASxL
//union { uint16 xfersize; uint16 hdma_iaddr; };
uint8 sCPU::mmio_r43x5(uint8 i) {
return channel[i].xfersize;
}
//DASxH
//union { uint16 xfersize; uint16 hdma_iaddr; };
uint8 sCPU::mmio_r43x6(uint8 i) {
return channel[i].xfersize >> 8;
}
//DASBx
uint8 sCPU::mmio_r43x7(uint8 i) {
return channel[i].hdma_ibank;
}
//A2AxL
uint8 sCPU::mmio_r43x8(uint8 i) {
return channel[i].hdma_addr;
}
//A2AxH
uint8 sCPU::mmio_r43x9(uint8 i) {
return channel[i].hdma_addr >> 8;
}
//NTRLx
uint8 sCPU::mmio_r43xa(uint8 i) {
return channel[i].hdma_line_counter;
}
//???
uint8 sCPU::mmio_r43xb(uint8 i) {
return channel[i].unknown;
}
//DMAPx
void sCPU::mmio_w43x0(uint8 i, uint8 data) {
channel[i].dmap = data;
channel[i].direction = !!(data & 0x80);
channel[i].hdma_indirect = !!(data & 0x40);
channel[i].reversexfer = !!(data & 0x10);
channel[i].fixedxfer = !!(data & 0x08);
channel[i].xfermode = data & 7;
}
//DDBADx
void sCPU::mmio_w43x1(uint8 i, uint8 data) {
channel[i].destaddr = data;
}
//A1TxL
void sCPU::mmio_w43x2(uint8 i, uint8 data) {
channel[i].srcaddr = (channel[i].srcaddr & 0xff00) | (data);
}
//A1TxH
void sCPU::mmio_w43x3(uint8 i, uint8 data) {
channel[i].srcaddr = (channel[i].srcaddr & 0x00ff) | (data << 8);
}
//A1Bx
void sCPU::mmio_w43x4(uint8 i, uint8 data) {
channel[i].srcbank = data;
}
//DASxL
//union { uint16 xfersize; uint16 hdma_iaddr; };
void sCPU::mmio_w43x5(uint8 i, uint8 data) {
channel[i].xfersize = (channel[i].xfersize & 0xff00) | (data);
}
//DASxH
//union { uint16 xfersize; uint16 hdma_iaddr; };
void sCPU::mmio_w43x6(uint8 i, uint8 data) {
channel[i].xfersize = (channel[i].xfersize & 0x00ff) | (data << 8);
}
//DASBx
void sCPU::mmio_w43x7(uint8 i, uint8 data) {
channel[i].hdma_ibank = data;
}
//A2AxL
void sCPU::mmio_w43x8(uint8 i, uint8 data) {
channel[i].hdma_addr = (channel[i].hdma_addr & 0xff00) | (data);
}
//A2AxH
void sCPU::mmio_w43x9(uint8 i, uint8 data) {
channel[i].hdma_addr = (channel[i].hdma_addr & 0x00ff) | (data << 8);
}
//NTRLx
void sCPU::mmio_w43xa(uint8 i, uint8 data) {
channel[i].hdma_line_counter = data;
}
//???
void sCPU::mmio_w43xb(uint8 i, uint8 data) {
channel[i].unknown = data;
}
void sCPU::mmio_power() {
}
void sCPU::mmio_reset() {
//$2181-$2183
status.wram_addr = 0x000000;
//$4016-$4017
status.joypad_strobe_latch = 0;
status.joypad1_bits = ~0;
status.joypad2_bits = ~0;
//$4200
status.nmi_enabled = false;
status.hirq_enabled = false;
status.virq_enabled = false;
status.auto_joypad_poll = false;
//$4201
status.pio = 0xff;
//$4202-$4203
status.mul_a = 0xff;
status.mul_b = 0xff;
//$4204-$4206
status.div_a = 0xffff;
status.div_b = 0xff;
//$4207-$420a
status.hirq_pos = 0x01ff;
status.virq_pos = 0x01ff;
//$420d
status.rom_speed = 8;
//$4214-$4217
status.r4214 = 0x0000;
status.r4216 = 0x0000;
//$4218-$421f
status.joy1l = 0x00;
status.joy1h = 0x00;
status.joy2l = 0x00;
status.joy2h = 0x00;
status.joy3l = 0x00;
status.joy3h = 0x00;
status.joy4l = 0x00;
status.joy4h = 0x00;
}
uint8 sCPU::mmio_read(unsigned addr) {
addr &= 0xffff;
//APU
if((addr & 0xffc0) == 0x2140) { //$2140-$217f
scheduler.sync_cpusmp();
return smp.port_read(addr & 3);
}
//DMA
if((addr & 0xff80) == 0x4300) { //$4300-$437f
unsigned i = (addr >> 4) & 7;
switch(addr & 0xf) {
case 0x0: return mmio_r43x0(i);
case 0x1: return mmio_r43x1(i);
case 0x2: return mmio_r43x2(i);
case 0x3: return mmio_r43x3(i);
case 0x4: return mmio_r43x4(i);
case 0x5: return mmio_r43x5(i);
case 0x6: return mmio_r43x6(i);
case 0x7: return mmio_r43x7(i);
case 0x8: return mmio_r43x8(i);
case 0x9: return mmio_r43x9(i);
case 0xa: return mmio_r43xa(i);
case 0xb: return mmio_r43xb(i);
case 0xc: return regs.mdr; //unmapped
case 0xd: return regs.mdr; //unmapped
case 0xe: return regs.mdr; //unmapped
case 0xf: return mmio_r43xb(i); //mirror of $43xb
}
}
switch(addr) {
case 0x2180: return mmio_r2180();
case 0x4016: return mmio_r4016();
case 0x4017: return mmio_r4017();
case 0x4210: return mmio_r4210();
case 0x4211: return mmio_r4211();
case 0x4212: return mmio_r4212();
case 0x4213: return mmio_r4213();
case 0x4214: return mmio_r4214();
case 0x4215: return mmio_r4215();
case 0x4216: return mmio_r4216();
case 0x4217: return mmio_r4217();
case 0x4218: return mmio_r4218();
case 0x4219: return mmio_r4219();
case 0x421a: return mmio_r421a();
case 0x421b: return mmio_r421b();
case 0x421c: return mmio_r421c();
case 0x421d: return mmio_r421d();
case 0x421e: return mmio_r421e();
case 0x421f: return mmio_r421f();
}
return regs.mdr;
}
void sCPU::mmio_write(unsigned addr, uint8 data) {
addr &= 0xffff;
//APU
if((addr & 0xffc0) == 0x2140) { //$2140-$217f
scheduler.sync_cpusmp();
port_write(addr & 3, data);
return;
}
//DMA
if((addr & 0xff80) == 0x4300) { //$4300-$437f
unsigned i = (addr >> 4) & 7;
switch(addr & 0xf) {
case 0x0: mmio_w43x0(i, data); return;
case 0x1: mmio_w43x1(i, data); return;
case 0x2: mmio_w43x2(i, data); return;
case 0x3: mmio_w43x3(i, data); return;
case 0x4: mmio_w43x4(i, data); return;
case 0x5: mmio_w43x5(i, data); return;
case 0x6: mmio_w43x6(i, data); return;
case 0x7: mmio_w43x7(i, data); return;
case 0x8: mmio_w43x8(i, data); return;
case 0x9: mmio_w43x9(i, data); return;
case 0xa: mmio_w43xa(i, data); return;
case 0xb: mmio_w43xb(i, data); return;
case 0xc: return; //unmapped
case 0xd: return; //unmapped
case 0xe: return; //unmapped
case 0xf: mmio_w43xb(i, data); return; //mirror of $43xb
}
}
switch(addr) {
case 0x2180: mmio_w2180(data); return;
case 0x2181: mmio_w2181(data); return;
case 0x2182: mmio_w2182(data); return;
case 0x2183: mmio_w2183(data); return;
case 0x4016: mmio_w4016(data); return;
case 0x4017: return; //unmapped
case 0x4200: mmio_w4200(data); return;
case 0x4201: mmio_w4201(data); return;
case 0x4202: mmio_w4202(data); return;
case 0x4203: mmio_w4203(data); return;
case 0x4204: mmio_w4204(data); return;
case 0x4205: mmio_w4205(data); return;
case 0x4206: mmio_w4206(data); return;
case 0x4207: mmio_w4207(data); return;
case 0x4208: mmio_w4208(data); return;
case 0x4209: mmio_w4209(data); return;
case 0x420a: mmio_w420a(data); return;
case 0x420b: mmio_w420b(data); return;
case 0x420c: mmio_w420c(data); return;
case 0x420d: mmio_w420d(data); return;
}
}
#endif
| 6,141 |
3,001 |
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
class MonthlySubquotaLimitClassifier(TransformerMixin):
"""
Monthly Subquota Limit classifier.
Dataset
-------
issue_date : datetime column
Date when the expense was made.
month : int column
The quota month matching the expense request.
net_value : float column
The value of the expense.
subquota_number : category column
A number to classify a category of expenses.
year : int column
The quota year matching the expense request.
"""
KEYS = ['applicant_id', 'month', 'year']
COLS = ['applicant_id',
'issue_date',
'month',
'net_value',
'subquota_number',
'year']
def fit(self, X):
self.X = X
self._X = self.X[self.COLS].copy()
self.__create_columns()
return self
def transform(self, X=None):
self.limits = [
{
# Automotive vehicle renting or charter (From 12/2013 to 03/2015)
'data': self._X.query('(subquota_number == "120") & '
'(reimbursement_month >= datetime(2013, 12, 1)) & '
'(reimbursement_month <= datetime(2015, 3, 1))'),
'monthly_limit': 1000000,
},
{
# Automotive vehicle renting or charter (From 04/2015 to 04/2017)
'data': self._X.query('(subquota_number == "120") & '
'(reimbursement_month >= datetime(2015, 4, 1)) & '
'(reimbursement_month <= datetime(2017, 4, 1))'),
'monthly_limit': 1090000,
},
{
# Automotive vehicle renting or charter (From 05/2017)
'data': self._X.query('(subquota_number == "120") & '
'(reimbursement_month >= datetime(2017, 5, 1))'),
'monthly_limit': 1271300,
},
{
# Taxi, toll and parking (From 12/2013 to 03/2015)
'data': self._X.query('(subquota_number == "122") & '
'(reimbursement_month >= datetime(2013, 12, 1)) & '
'(reimbursement_month <= datetime(2015, 3, 1))'),
'monthly_limit': 250000,
},
{
# Taxi, toll and parking (From 04/2015)
'data': self._X.query('(subquota_number == "122") & '
'(reimbursement_month >= datetime(2015, 4, 1))'),
'monthly_limit': 270000,
},
{
# Fuels and lubricants (From 07/2009 to 03/2015)
'data': self._X.query('(subquota_number == "3") & '
'(reimbursement_month >= datetime(2009, 7, 1)) & '
'(reimbursement_month <= datetime(2015, 3, 1))'),
'monthly_limit': 450000,
},
{
# Fuels and lubricants (From 04/2015 to 08/2015)
'data': self._X.query('(subquota_number == "3") & '
'(reimbursement_month >= datetime(2015, 4, 1)) & '
'(reimbursement_month <= datetime(2015, 8, 1))'),
'monthly_limit': 490000,
},
{
# Fuels and lubricants (From 9/2015)
'data': self._X.query('(subquota_number == "3") & '
'(reimbursement_month >= datetime(2015, 9, 1))'),
'monthly_limit': 600000,
},
{
# Security service provided by specialized company (From 07/2009 to 4/2014)
'data': self._X.query('(subquota_number == "8") & '
'(reimbursement_month >= datetime(2009, 7, 1)) & '
'(reimbursement_month <= datetime(2014, 4, 1))'),
'monthly_limit': 450000,
},
{
# Security service provided by specialized company (From 05/2014 to 3/2015)
'data': self._X.query('(subquota_number == "8") & '
'(reimbursement_month >= datetime(2014, 5, 1)) & '
'(reimbursement_month <= datetime(2015, 3, 1))'),
'monthly_limit': 800000,
},
{
# Security service provided by specialized company (From 04/2015)
'data': self._X.query('(subquota_number == "8") & '
'(reimbursement_month >= datetime(2015, 4, 1))'),
'monthly_limit': 870000,
},
{
# Participation in course, talk or similar event (From 10/2015)
'data': self._X.query('(subquota_number == "137") & '
'(reimbursement_month >= datetime(2015, 10, 1))'),
'monthly_limit': 769716,
},
]
return self
def predict(self, X=None):
self._X['is_over_monthly_subquota_limit'] = False
for metadata in self.limits:
data, monthly_limit = metadata['data'], metadata['monthly_limit']
if len(data):
surplus_reimbursements = self.__find_surplus_reimbursements(data, monthly_limit)
self._X.loc[surplus_reimbursements.index,
'is_over_monthly_subquota_limit'] = True
results = self._X.loc[self.X.index, 'is_over_monthly_subquota_limit']
return np.r_[results]
def predict_proba(self, X=None):
return 1.
def __create_columns(self):
self._X['net_value_int'] = (self._X['net_value'] * 100).apply(int)
self._X['coerced_issue_date'] = \
pd.to_datetime(self._X['issue_date'], errors='coerce')
self._X.sort_values('coerced_issue_date', kind='mergesort', inplace=True)
reimbursement_month = self._X[['year', 'month']].copy()
reimbursement_month['day'] = 1
self._X['reimbursement_month'] = pd.to_datetime(reimbursement_month)
def __find_surplus_reimbursements(self, data, monthly_limit):
grouped = data.groupby(self.KEYS).apply(self.__create_cumsum_cols)
return grouped[grouped['cumsum_net_value'] > monthly_limit]
def __create_cumsum_cols(self, subset):
subset['cumsum_net_value'] = subset['net_value_int'].cumsum()
return subset
| 3,599 |
435 |
{
"copyright_text": null,
"description": "Over the course of nearly a year, we migrated Pinterest's primary\nsystems from Python2 to Python3. A large, tightly coupled codebase with\nover 2 million lines of code, the Pinterest codebase contained nearly\nevery edge case that might exist in a Py2 to Py3 migration.\n\nWe'll cover our approach, gotchas, and tools, and the incredible impact\nour migration has made on infra spend and code quality.\n",
"duration": 2448,
"language": "eng",
"recorded": "2019-05-03T12:10:00",
"related_urls": [
{
"label": "Conference schedule",
"url": "https://us.pycon.org/2019/schedule/talks/"
},
{
"label": "Conference slides (github)",
"url": "https://github.com/PyCon/2019-slides"
},
{
"label": "Conference slides (speakerdeck)",
"url": "https://speakerdeck.com/pycon2019"
},
{
"label": "Talk schedule",
"url": "https://us.pycon.org/2019/schedule/presentation/147/"
}
],
"speakers": [
"<NAME>",
"<NAME>"
],
"tags": [
"talk"
],
"thumbnail_url": "https://i.ytimg.com/vi/e1vqfBEAkNA/maxresdefault.jpg",
"title": "Migrating Pinterest from Python2 to Python3",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=e1vqfBEAkNA"
}
]
}
| 526 |
1,083 |
<reponame>bieremayi/carbondata<filename>integration/presto/src/main/prestosql/org/apache/carbondata/presto/CarbondataConnectorFactory.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.presto;
import java.lang.reflect.*;
import java.util.Map;
import org.apache.carbondata.hive.CarbonHiveSerDe;
import org.apache.carbondata.hive.MapredCarbonInputFormat;
import org.apache.carbondata.hive.MapredCarbonOutputFormat;
import com.google.inject.Module;
import io.airlift.units.DataSize;
import io.prestosql.plugin.hive.HiveConnectorFactory;
import io.prestosql.plugin.hive.HiveStorageFormat;
import io.prestosql.spi.connector.Connector;
import io.prestosql.spi.connector.ConnectorContext;
import sun.misc.Unsafe;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
/**
* Build Carbondata Connector
* It will be called by CarbondataPlugin
*/
public class CarbondataConnectorFactory extends HiveConnectorFactory {
static {
try {
setCarbonEnum();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public CarbondataConnectorFactory(String connectorName) {
this(connectorName, EmptyModule.class);
}
public CarbondataConnectorFactory(String connectorName, Class<? extends Module> module) {
super(connectorName, module);
}
@Override
public Connector create(
String catalogName,
Map<String, String> config,
ConnectorContext context) {
return InternalCarbonDataConnectorFactory
.createConnector(catalogName, config, context, new EmptyModule());
}
/**
* Set the Carbon format enum to HiveStorageFormat, its a hack but for time being it is best
* choice to avoid lot of code change.
*
* @throws Exception
*/
private static void setCarbonEnum() throws Exception {
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.name().equals("CARBON") || format.name().equals("ORG.APACHE.CARBONDATA.FORMAT")
|| format.name().equals("CARBONDATA")) {
return;
}
}
addHiveStorageFormatsForCarbondata("CARBON");
addHiveStorageFormatsForCarbondata("ORG.APACHE.CARBONDATA.FORMAT");
addHiveStorageFormatsForCarbondata("CARBONDATA");
}
private static void addHiveStorageFormatsForCarbondata(String storedAs) throws Exception {
Constructor<?> constructor = Unsafe.class.getDeclaredConstructors()[0];
constructor.setAccessible(true);
Unsafe unsafe = (Unsafe) constructor.newInstance();
HiveStorageFormat enumValue =
(HiveStorageFormat) unsafe.allocateInstance(HiveStorageFormat.class);
Field nameField = Enum.class.getDeclaredField("name");
makeAccessible(nameField);
nameField.set(enumValue, storedAs);
Field ordinalField = Enum.class.getDeclaredField("ordinal");
makeAccessible(ordinalField);
ordinalField.setInt(enumValue, HiveStorageFormat.values().length);
Field serdeField = HiveStorageFormat.class.getDeclaredField("serde");
makeAccessible(serdeField);
serdeField.set(enumValue, CarbonHiveSerDe.class.getName());
Field inputFormatField = HiveStorageFormat.class.getDeclaredField("inputFormat");
makeAccessible(inputFormatField);
inputFormatField.set(enumValue, MapredCarbonInputFormat.class.getName());
Field outputFormatField = HiveStorageFormat.class.getDeclaredField("outputFormat");
makeAccessible(outputFormatField);
outputFormatField.set(enumValue, MapredCarbonOutputFormat.class.getName());
Field estimatedWriterSystemMemoryUsageField =
HiveStorageFormat.class.getDeclaredField("estimatedWriterSystemMemoryUsage");
makeAccessible(estimatedWriterSystemMemoryUsageField);
estimatedWriterSystemMemoryUsageField.set(enumValue, new DataSize((long) 256, MEGABYTE));
Field values = HiveStorageFormat.class.getDeclaredField("$VALUES");
makeAccessible(values);
HiveStorageFormat[] hiveStorageFormats =
new HiveStorageFormat[HiveStorageFormat.values().length + 1];
HiveStorageFormat[] src = (HiveStorageFormat[]) values.get(null);
System.arraycopy(src, 0, hiveStorageFormats, 0, src.length);
hiveStorageFormats[src.length] = enumValue;
values.set(null, hiveStorageFormats);
}
private static void makeAccessible(Field field) throws Exception {
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
}
}
| 1,684 |
2,151 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_WEBUI_MEDIA_ROUTER_MEDIA_ROUTER_RESOURCES_PROVIDER_H_
#define CHROME_BROWSER_UI_WEBUI_MEDIA_ROUTER_MEDIA_ROUTER_RESOURCES_PROVIDER_H_
namespace content {
class WebUIDataSource;
}
namespace media_router {
// Adds the resources needed by Media Router to |html_source|.
void AddMediaRouterUIResources(content::WebUIDataSource* html_source);
} // namespace media_router
#endif // CHROME_BROWSER_UI_WEBUI_MEDIA_ROUTER_MEDIA_ROUTER_RESOURCES_PROVIDER_H_
| 240 |
723 |
<filename>code/python/ch17/src/binary_chunk.py
from binary_reader import BinaryReader
from binary_chunk_header import BinaryChunkHeader
from prototype import Prototype
class BinaryChunk:
def __init__(self, chunk):
self.binary_reader = BinaryReader(chunk)
self.header = None
self.size_upvalues = None
self.main_func = None
@staticmethod
def is_binary_chunk(data):
if data is None or len(data) < 4:
return False
for i in range(4):
if data[i] != BinaryChunkHeader.LUA_SIGNATURE[i]:
return False
return True
def print_header(self):
self.header.dump()
def check_header(self):
self.header.check()
def print_main_func(self):
self.main_func.dump()
def get_main_func(self):
return self.main_func
def undump(self):
self.header = BinaryChunkHeader(self.binary_reader)
self.check_header()
self.size_upvalues = self.binary_reader.read_uint8()
self.main_func = Prototype()
self.main_func.init_from_br(self.binary_reader, '')
return self.main_func
| 495 |
412 |
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "graphd/graphd.h"
#include "graphd/graphd-iterator-and.h"
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
extern const pdb_iterator_type graphd_iterator_and_type;
void graphd_iterator_and_process_state_clear(and_process_state *ps) {
memset(ps, 0, sizeof(*ps));
ps->ps_id = PDB_ID_NONE;
ps->ps_producer_id = PDB_ID_NONE;
ps->ps_next_find_resume_id = PDB_ID_NONE;
ps->ps_check_order = NULL;
ps->ps_it = NULL;
ps->ps_magic = GRAPHD_AND_PROCESS_STATE_MAGIC;
}
void graphd_iterator_and_process_state_finish(graphd_iterator_and *gia,
and_process_state *ps) {
cl_handle *cl = gia->gia_cl;
GRAPHD_AND_IS_PROCESS_STATE(cl, ps);
cl_enter(cl, CL_LEVEL_VERBOSE, "ps=%p", (void *)ps);
if (ps->ps_it != NULL) {
size_t i;
for (i = 0; i < ps->ps_n; i++)
pdb_iterator_destroy(gia->gia_pdb, ps->ps_it + i);
cm_free(gia->gia_cm, ps->ps_it);
ps->ps_it = NULL;
}
if (ps->ps_check_order != NULL) {
cm_free(gia->gia_cm, ps->ps_check_order);
ps->ps_check_order = NULL;
}
cl_leave(cl, CL_LEVEL_VERBOSE, "ps=%p", (void *)ps);
}
void graphd_iterator_and_process_state_delete_subcondition(
pdb_iterator *it, and_process_state *ps, size_t i) {
graphd_iterator_and *ogia = it->it_theory;
pdb_handle *pdb = ogia->gia_pdb;
cl_handle *cl = ogia->gia_cl;
size_t k;
/* Not instantiated?
*/
if (!ps->ps_n) {
cl_assert(cl, ps->ps_it == NULL);
cl_assert(cl, ps->ps_check_order == NULL);
return;
}
cl_log(cl, CL_LEVEL_VERBOSE,
"and_process_state_delete_subcondition %zu from ps->ps_n %zu", i,
ps->ps_n);
cl_assert(cl, i < ogia->gia_n);
cl_assert(cl, i < ps->ps_n);
cl_assert(cl, it->it_original == it);
GRAPHD_AND_IS_PROCESS_STATE(cl, ps);
if (ps->ps_it != NULL) {
pdb_iterator_destroy(pdb, ps->ps_it + i);
memmove(ps->ps_it + i, ps->ps_it + i + 1,
sizeof(*ps->ps_it) * (ps->ps_n - (i + 1)));
}
if (ps->ps_check_order != NULL) {
for (k = 0; k < ps->ps_n; k++) {
if (ps->ps_check_order[k] > i)
ps->ps_check_order[k]--;
else if (ps->ps_check_order[k] == i) {
/* If we're in the middle of a slow
* check while you're deleting the guy
* we're slow-checking against, the
* call state jumps back to 0, and we'll
* resume with the guy behind the deleted
* one.
*/
if (k == ps->ps_check_i)
it->it_call_state = 0;
else if (k < ps->ps_check_i)
ps->ps_check_i--;
if (k != ps->ps_n - 1) {
memmove(ps->ps_check_order + k, ps->ps_check_order + k + 1,
(ps->ps_n - (k + 1)) * sizeof(*ps->ps_check_order));
/* Reexamine the index we just
* pulled over the deleted one!
*/
k--;
}
ps->ps_n--;
}
}
}
if (ps->ps_check_i > ps->ps_n) ps->ps_check_i = ps->ps_n;
}
int graphd_iterator_and_process_state_clone(pdb_handle *pdb, pdb_iterator *it,
and_process_state const *src,
and_process_state *dst) {
graphd_iterator_and *gia = it->it_theory;
graphd_iterator_and *ogia = ogia(it);
cm_handle *cm = gia->gia_cm;
cl_handle *cl = gia->gia_cl;
size_t i;
int err;
cl_assert(cl, src->ps_it != NULL);
GRAPHD_AND_IS_PROCESS_STATE(cl, src);
dst->ps_it = cm_malloc(cm, sizeof(*dst->ps_it) * ogia->gia_n);
if (dst->ps_it == NULL) return ENOMEM;
for (i = 0; i < ogia->gia_n; i++) {
PDB_IS_ITERATOR(cl, src->ps_it[i]);
err = pdb_iterator_clone(pdb, src->ps_it[i], dst->ps_it + i);
if (err != 0) {
while (i > 0) {
i--;
pdb_iterator_destroy(pdb, dst->ps_it + i);
cm_free(cm, dst->ps_it);
dst->ps_it = NULL;
}
return err;
}
cl_assert(cl, pdb_iterator_has_position(pdb, dst->ps_it[i]));
}
return graphd_iterator_and_check_sort_refresh(it, dst);
}
int graphd_iterator_and_process_state_initialize(pdb_handle *pdb,
pdb_iterator *it,
and_process_state *ps) {
graphd_iterator_and *gia = it->it_theory;
cl_handle *cl = gia->gia_cl;
size_t i;
int err;
char buf[200];
cl_log(cl, CL_LEVEL_VERBOSE,
"graphd_iterator_and_process_state_initialize: %p for %s", (void *)ps,
pdb_iterator_to_string(pdb, it, buf, sizeof buf));
ps->ps_magic = GRAPHD_AND_PROCESS_STATE_MAGIC;
if (ps->ps_it != NULL) return 0;
cl_assert(cl, gia->gia_n > 0);
ps->ps_it = cm_malloc(gia->gia_cm, sizeof(*ps->ps_it) * gia->gia_n);
if (ps->ps_it == NULL) return errno ? errno : ENOMEM;
for (i = 0; i < gia->gia_n; i++) {
err = pdb_iterator_clone(pdb, ogia(it)->gia_sc[i].sc_it, ps->ps_it + i);
if (err != 0) {
cl_log_errno(cl, CL_LEVEL_FAIL, "pdb_iterator_clone", err, "it=%s",
pdb_iterator_to_string(pdb, ogia(it)->gia_sc[i].sc_it, buf,
sizeof buf));
while (i > 0) pdb_iterator_destroy(pdb, ps->ps_it + --i);
cm_free(gia->gia_cm, ps->ps_it);
ps->ps_it = NULL;
return err;
}
cl_assert(cl, pdb_iterator_has_position(pdb, ps->ps_it[i]));
}
GRAPHD_AND_IS_PROCESS_STATE(cl, ps);
err = graphd_iterator_and_check_sort_refresh(it, ps);
if (err != 0) {
cl_log_errno(cl, CL_LEVEL_FAIL, "garphd_iterator_and_check_sort_refresh",
err, "it=%s",
pdb_iterator_to_string(pdb, it, buf, sizeof buf));
for (i = gia->gia_n; i > 0; i--) pdb_iterator_destroy(pdb, ps->ps_it + i);
cm_free(gia->gia_cm, ps->ps_it);
ps->ps_it = NULL;
}
return err;
}
| 3,164 |
1,847 |
// Copyright (c) 2021 The Orbit Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <atomic>
#include <csignal>
#include <thread>
#include "VulkanTutorial/OffscreenRenderingVulkanTutorial.h"
namespace {
std::atomic<bool> exit_requested = false;
void SigintHandler(int signum) {
if (signum == SIGINT) {
exit_requested = true;
}
}
// Use SIGINT to stop the main rendering loop of the VulkanTutorial, transition to its orderly
// shutdown of Vulkan, and exit the program.
void InstallSigintHandler() {
struct sigaction act {};
act.sa_handler = SigintHandler;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
act.sa_restorer = nullptr;
sigaction(SIGINT, &act, nullptr);
}
} // namespace
int main() {
InstallSigintHandler();
orbit_vulkan_tutorial::OffscreenRenderingVulkanTutorial tutorial;
std::thread thread{[&tutorial] {
while (!exit_requested) {
std::this_thread::sleep_for(std::chrono::milliseconds{100});
}
tutorial.StopAsync();
}};
tutorial.Run();
thread.join();
return 0;
}
| 387 |
424 |
// Copyright 2015 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ShaderProgram.h"
std::map<PROGFILES, ShaderProgram*> ShaderProgram::instances_;
ShaderProgram::ShaderProgram()
{
}
ShaderProgram *ShaderProgram::GetProgram(int vertex_res, int fragment_res)
{
// Check whether we already have the program compiled
PROGFILES prog_files = vertex_res + fragment_res;
if (instances_.count(prog_files) > 0)
return instances_[prog_files];
// Nope, we don't have it cached, gotta compile and link the program
ShaderProgram *shader_program = new ShaderProgram();
if (!shader_program->CompileProgram(vertex_res, fragment_res))
return nullptr;
// Save the PROGFILES for this ShaderProgram
instances_.insert(std::make_pair(prog_files, shader_program));
return shader_program;
}
void ShaderProgram::DestroyAll()
{
for (auto &shader_program : instances_)
{
delete shader_program.second;
}
instances_.clear();
}
bool ShaderProgram::CompileProgram(int vertex_res, int fragment_res)
{
// Compile the vertex shader
FileResource vertex_fileres = ResourceLoader::GetBinResource(vertex_res);
GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, (char**)&vertex_fileres.bin_data, NULL);
glCompileShader(vertex_shader);
// Make sure vertex shader compiled successfully
GLint compile_result;
GLchar info_log[512] = { '\0' };
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &compile_result);
if (!compile_result)
{
glGetShaderInfoLog(vertex_shader, 512, NULL, info_log);
OutputDebugStringA(info_log);
return false;
}
// Compile the fragment shader
FileResource fragment_fileres = ResourceLoader::GetBinResource(fragment_res);
GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, (char**)&fragment_fileres.bin_data, NULL);
glCompileShader(fragment_shader);
// Make sure fragment shader compiled successfully
compile_result = 0;
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &compile_result);
if (!compile_result)
{
glGetShaderInfoLog(fragment_shader, 512, NULL, info_log);
OutputDebugStringA(info_log);
return false;
}
// Link our shaders
compiled_prog_id_ = glCreateProgram();
glAttachShader(compiled_prog_id_, vertex_shader);
glAttachShader(compiled_prog_id_, fragment_shader);
glLinkProgram(compiled_prog_id_);
// Make sure link was successful
GLint link_result;
glGetProgramiv(compiled_prog_id_, GL_LINK_STATUS, &link_result);
if (!link_result)
{
glGetProgramInfoLog(compiled_prog_id_, 512, NULL, info_log);
OutputDebugStringA(info_log);
return false;
}
// Clean-up our shaders
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return true;
}
| 1,286 |
573 |
<reponame>Morph1984/dynarmic<filename>src/dynarmic/backend/x64/host_feature.h
/* This file is part of the dynarmic project.
* Copyright (c) 2021 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
enum class HostFeature : u64 {
SSSE3 = 1ULL << 0,
SSE41 = 1ULL << 1,
SSE42 = 1ULL << 2,
AVX = 1ULL << 3,
AVX2 = 1ULL << 4,
AVX512F = 1ULL << 5,
AVX512CD = 1ULL << 6,
AVX512VL = 1ULL << 7,
AVX512BW = 1ULL << 8,
AVX512DQ = 1ULL << 9,
AVX512BITALG = 1ULL << 10,
AVX512VBMI = 1ULL << 11,
PCLMULQDQ = 1ULL << 12,
F16C = 1ULL << 13,
FMA = 1ULL << 14,
AES = 1ULL << 15,
POPCNT = 1ULL << 16,
BMI1 = 1ULL << 17,
BMI2 = 1ULL << 18,
LZCNT = 1ULL << 19,
GFNI = 1ULL << 20,
// Zen-based BMI2
FastBMI2 = 1ULL << 21,
// Orthographic AVX512 features on 128 and 256 vectors
AVX512_Ortho = AVX512F | AVX512VL,
// Orthographic AVX512 features for both 32-bit and 64-bit floats
AVX512_OrthoFloat = AVX512_Ortho | AVX512DQ,
};
constexpr HostFeature operator~(HostFeature f) {
return static_cast<HostFeature>(~static_cast<u64>(f));
}
constexpr HostFeature operator|(HostFeature f1, HostFeature f2) {
return static_cast<HostFeature>(static_cast<u64>(f1) | static_cast<u64>(f2));
}
constexpr HostFeature operator&(HostFeature f1, HostFeature f2) {
return static_cast<HostFeature>(static_cast<u64>(f1) & static_cast<u64>(f2));
}
constexpr HostFeature operator|=(HostFeature& result, HostFeature f) {
return result = (result | f);
}
constexpr HostFeature operator&=(HostFeature& result, HostFeature f) {
return result = (result & f);
}
} // namespace Dynarmic::Backend::X64
| 743 |
1,614 |
//
// CollectionViewCell.h
// WXSTransition
//
// Created by 王小树 on 16/5/31.
// Copyright © 2016年 王小树. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface CollectionViewCell : UICollectionViewCell
@property (nonatomic,strong) UIImageView *imgView;
@end
| 107 |
535 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "os_test_priv.h"
static int num_frees;
static struct os_mempool_ext pool;
static os_error_t
put_cb(struct os_mempool_ext *mpe, void *block, void *arg)
{
int *elem;
int rc;
num_frees++;
/* Only do work on the first free to avoid infinite recursion. */
if (num_frees == 1) {
/* Try to allocate and free within callback. */
elem = os_memblock_get(&mpe->mpe_mp);
TEST_ASSERT(elem != NULL);
rc = os_memblock_put(&mpe->mpe_mp, elem);
TEST_ASSERT(rc == 0);
}
/* Actually free block. */
return os_memblock_put_from_cb(&mpe->mpe_mp, block);
}
TEST_CASE_SELF(os_mempool_test_ext_nested)
{
uint8_t buf[OS_MEMPOOL_BYTES(10, 32)];
int *elem;
int rc;
/* Attempt to unregister the pool in case this test has already run. */
os_mempool_unregister(&pool.mpe_mp);
rc = os_mempool_ext_init(&pool, 10, 32, buf, "test_ext_nested");
TEST_ASSERT_FATAL(rc == 0);
pool.mpe_put_cb = put_cb;
elem = os_memblock_get(&pool.mpe_mp);
TEST_ASSERT_FATAL(elem != NULL, "Error allocating block");
rc = os_memblock_put(&pool.mpe_mp, elem);
TEST_ASSERT_FATAL(rc == 0, "Error freeing block %d", rc);
/* Verify callback was called within callback. */
TEST_ASSERT(num_frees == 2);
}
| 766 |
1,056 |
<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.web.client.rest.wizard;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import javax.lang.model.element.TypeElement;
import org.netbeans.api.java.source.CompilationController;
import org.netbeans.modules.web.client.rest.wizard.JSClientGenerator.HttpRequests;
import org.netbeans.modules.web.client.rest.wizard.RestPanel.JsUi;
/**
* @author ads
*
*/
class RouterGenerator {
RouterGenerator(StringBuilder routers, String name , ModelGenerator generator){
myRouters = routers;
myRouterName = name;
myModelGenerator = generator;
}
void generateRouter( TypeElement entity, String path,
String collectionPath, Map<HttpRequests, String> httpPaths,
CompilationController controller)
{
myRouters.append("var "); // NOI18N
myRouters.append(myRouterName);
myRouters.append(" = Backbone.Router.extend({\n"); // NOI18N
boolean hasCollection = collectionPath != null;
String modelVar = getModelGenerator().getModelName().toLowerCase(Locale.ENGLISH);
/*
* Fill routes
*/
// default route used on page loading
myRouters.append("routes:{\n"); // NOI18N
if ( hasCollection ){
myRouters.append("'':'list'"); // NOI18N
}
else {
myRouters.append("'':'details'"); // NOI18N
}
// #new route if there is a corresponding POST request in the REST
if ( httpPaths.get( HttpRequests.POST) != null ){
myRouters.append(",\n'new':'create'\n"); // NOI18N
}
// #id route if REST has a method for collection
if ( hasCollection ){
myRouters.append(",\n':id':'details'\n"); // NOI18N
}
myRouters.append("},\n"); // NOI18N
// CTOR ( initialize ) function assign CreateView for "tpl-create" template
myRouters.append("initialize:function(){\n"); // NOI18N
myRouters.append("var self = this;\n"); // NOI18N
myRouters.append("$('#"); // NOI18N
myRouters.append(getHeaderId());
myRouters.append("').html(new views.CreateView({\n"); // NOI18N
myRouters.append(" // tpl-create is template identifier for 'create' block\n");// NOI18N
myRouters.append("templateName :'#"); // NOI18N
myRouters.append(getCreateTemplate());
myRouters.append("',\n"); // NOI18N
myRouters.append("navigate: function(){\n"); // NOI18N
myRouters.append("self.navigate('new', true);\n}\n"); // NOI18N
myRouters.append("}).render().el);\n},\n"); // NOI18N
if ( hasCollection ){
if ( useUi() ){
mySideBarId = "datatable"; // NOI18N
}
else {
mySideBarId = "sidebar"; // NOI18N
}
myRouters.append("list:function () {\n"); // NOI18N
myRouters.append("this.collection = new models."); // NOI18N
myRouters.append(getModelGenerator().getCollectionModelName());
myRouters.append("();\nvar self = this;\n"); // NOI18N
myRouters.append("this.collection.fetch({\n"); // NOI18N
myRouters.append("success:function () {\n"); // NOI18N
myRouters.append("self.listView = new views.ListView({\n"); // NOI18N
myRouters.append("model:self.collection,\n");
StringBuilder builder = new StringBuilder("tpl-"); // NOI18N
builder.append(modelVar);
builder.append("-list-item"); // NOI18N
myListItemTemplate = builder.toString();
myRouters.append(" // "); // NOI18N
myRouters.append(myListItemTemplate);
myRouters.append("is template identifier for item\n"); // NOI18N
myRouters.append("templateName : '#"); // NOI18N
myRouters.append(myListItemTemplate);
myRouters.append("'\n});\n"); // NOI18N
myRouters.append("$('#"); // NOI18N
myRouters.append(getSideBarId());
myRouters.append("').html(self.listView.render().el)"); // NOI18N
if ( useUi() ){
myRouters.append(".append(_.template($('#"); // NOI18N
myRouters.append(getTableHeadId());
myRouters.append("').html())())");
}
myRouters.append(";\nif (self.requestedId) {\n"); // NOI18N
myRouters.append("self.details(self.requestedId);\n}\n"); // NOI18N
if ( useUi() ){
myRouters.append("var pagerOptions = {\n"); // NOI18N
myRouters.append(" // target the pager markup \n"); // NOI18N
myRouters.append("container: $('.pager'),\n"); // NOI18N
myRouters.append(" // output string - default is "); // NOI18N
myRouters.append("'{page}/{totalPages}'; possible"); // NOI18N
myRouters.append("variables: {page}, {totalPages},"); // NOI18N
myRouters.append("{startRow}, {endRow} and {totalRows}\n"); // NOI18N
myRouters.append("output: '{startRow} to"); // NOI18N
myRouters.append(" {endRow} ({totalRows})',\n"); // NOI18N
myRouters.append(" // starting page of the pager (zero based index)\n");// NOI18N
myRouters.append("page: 0,\n"); // NOI18N
myRouters.append(" // Number of visible rows - default is 10\n");// NOI18N
myRouters.append("size: 10\n};\n$('#"); // NOI18N
myRouters.append(getSideBarId());
myRouters.append("').tablesorter({widthFixed: true, \n"); // NOI18N
myRouters.append("widgets: ['zebra']}).\n"); // NOI18N
myRouters.append("tablesorterPager(pagerOptions);\n"); // NOI18N
}
myRouters.append("}\n});\n},\n"); // NOI18N
}
StringBuilder builder = new StringBuilder("tpl-"); // NOI18N
builder.append(modelVar);
builder.append("-details"); // NOI18N
myDetailsTemplateName = builder.toString();
// details function
myRouters.append("details:function ("); // NOI18N
if ( hasCollection ){
myRouters.append("id"); // NOI18N
}
myRouters.append("){\n"); // NOI18N
if ( hasCollection ){
myRouters.append("if (this.collection) {\n"); // NOI18N
myRouters.append("this."); // NOI18N
myRouters.append(modelVar);
myRouters.append("= this.collection.get(id);\n"); // NOI18N
myRouters.append("if (this.view) {\n"); // NOI18N
myRouters.append("this.view.close();\n}\n"); // NOI18N
myRouters.append("var self = this;\n"); // NOI18N
myRouters.append("this.view = new views.ModelView({\n"); // NOI18N
myRouters.append("model:this."); // NOI18N
myRouters.append(modelVar);
myRouters.append(",\n // "); // NOI18N
myRouters.append( myDetailsTemplateName );
myRouters.append(" is template identifier for chosen model element\n");// NOI18N
myRouters.append("templateName: '#"); // NOI18N
myRouters.append( myDetailsTemplateName );
myRouters.append("',\ngetHashObject: function(){\n"); // NOI18N
myRouters.append("return self.getData();\n}\n});\n"); // NOI18N
myRouters.append("$('#"); // NOI18N
myRouters.append(getContentId());
myRouters.append("').html(this.view.render().el);"); // NOI18N
myRouters.append("} else {\n"); // NOI18N
myRouters.append("this.requestedId = id;\n"); // NOI18N
myRouters.append("this.list();\n}\n},\n"); // NOI18N
}
else {
myRouters.append("if (this.view) {\n"); // NOI18N
myRouters.append("this.view.close();\n}\n"); // NOI18N
myRouters.append("var self = this;\n"); // NOI18N
myRouters.append("this."); // NOI18N
myRouters.append(modelVar);
myRouters.append(" = models."); // NOI18N
myRouters.append(getModelGenerator().getModelName());
myRouters.append("();\nthis."); // NOI18N
myRouters.append(modelVar);
myRouters.append(".fetch({\n"); // NOI18N
myRouters.append("success:function(){\n"); // NOI18N
myRouters.append("self.view = new views.ModelView({\n"); // NOI18N
myRouters.append("model: self.newclass,\n // "); // NOI18N
myRouters.append(myDetailsTemplateName);
myRouters.append(" is template identifier for chosen model element\n");// NOI18N
myRouters.append("templateName : '#"); // NOI18N
myRouters.append(myDetailsTemplateName);
myRouters.append("'\n});\n"); // NOI18N
myRouters.append("$('#"); // NOI18N
myRouters.append(getContentId());
myRouters.append("').html(self.view.render().el);}\n});\n},\n");// NOI18N
}
if ( httpPaths.get( HttpRequests.POST) != null){
myRouters.append("create:function () {\n"); // NOI18N
myRouters.append("if (this.view) {\n"); // NOI18N
myRouters.append("this.view.close();\n}\n"); // NOI18N
myRouters.append("var self = this;\n"); // NOI18N
myRouters.append("var dataModel = new models.");
myRouters.append( getModelGenerator().getModelName());
myRouters.append("();\n"); // NOI18N
myRouters.append(" // see isNew() method implementation in the model\n");// NOI18N
myRouters.append("dataModel.notSynced = true;\n"); // NOI18N
myRouters.append("this.view = new views.ModelView({\n"); // NOI18N
myRouters.append("model: dataModel,\n");
if ( hasCollection ){
myRouters.append("collection: this.collection,\n"); // NOI18N
}
myRouters.append(" // "); // NOI18N
myRouters.append(myDetailsTemplateName);
myRouters.append(" is a template identifier for chosen model element\n");// NOI18N
myRouters.append("templateName: '#"); // NOI18N
myRouters.append(myDetailsTemplateName);
myRouters.append("',\n"); // NOI18N
myRouters.append("navigate: function( id ){\n"); // NOI18N
myRouters.append("self.navigate(id, false);\n},\n\n"); // NOI18N
myRouters.append("getHashObject: function(){\n"); // NOI18N
myRouters.append("return self.getData();\n}\n"); // NOI18N
myRouters.append("});\n"); // NOI18N
myRouters.append("$('#"); // NOI18N
myRouters.append(getContentId());
myRouters.append("').html(this.view.render().el);\n},\n"); // NOI18N
}
// add method getData which returns composite object data got from HTML controls
myRouters.append("getData: function(){\n"); // NOI18N
myRouters.append("return {\n"); // NOI18N
if ( useUi() ){
ModelAttribute id = getModelGenerator().getIdAttribute();
if ( id!= null ){
myRouters.append(id.getName());
myRouters.append(":$('#"); // NOI18N
myRouters.append(id.getName());
myRouters.append("').val(),\n"); // NOI18N
}
Set<ModelAttribute> attributes = getModelGenerator().getAttributes();
int size = attributes.size();
int i=0;
for (ModelAttribute attribute : attributes) {
myRouters.append(attribute.getName());
myRouters.append(":$('#"); // NOI18N
myRouters.append(attribute.getName());
myRouters.append("').val()"); // NOI18N
i++;
if ( i <size ){
myRouters.append(',');
}
myRouters.append("\n"); // NOI18N
}
}
else {
String mainModelAttribute = getModelGenerator()
.getDisplayNameAlias();
myRouters.append("/*\n * get values from the HTML controls and"); // NOI18N
myRouters.append(" put them here as a hash of attributes\n"); // NOI18N
if ( mainModelAttribute!= null ){
myRouters.append(" * f.e.\n * "); // NOI18N
myRouters.append(mainModelAttribute);
myRouters.append(":$('#"); // NOI18N
myRouters.append(mainModelAttribute);
myRouters.append("').val(),\n * ....\n"); // NOI18N
}
myRouters.append(" */\n"); // NOI18N
}
myRouters.append("};\n}\n"); // NOI18N
myRouters.append("});\n"); // NOI18N
myRouters.append("new "); // NOI18N
myRouters.append(myRouterName);
myRouters.append("();\n"); // NOI18N
}
ModelGenerator getModelGenerator(){
return myModelGenerator;
}
String getDetailsTemplate(){
return myDetailsTemplateName;
}
String getListItemTemplate(){
return myListItemTemplate;
}
String getCreateTemplate(){
return "tpl-create"; // NOI18N
}
String getTableHeadId(){
return "thead"; // NOI18N
}
String getHeaderId(){
if ( useUi() ){
return "create"; // NOI18N
}
return "header"; // NOI18N
}
boolean useUi(){
return getModelGenerator().hasCollection() &&
getModelGenerator().getUi() == JsUi.TABLESORTER;
}
String getContentId(){
if ( useUi() ){
return "details"; // NOI18N
}
return "content"; // NOI18N
}
String getSideBarId(){
return mySideBarId;
}
private ModelGenerator myModelGenerator;
private StringBuilder myRouters;
private String myRouterName;
private String myDetailsTemplateName;
private String mySideBarId;
private String myListItemTemplate;
}
| 10,230 |
892 |
{
"schema_version": "1.2.0",
"id": "GHSA-54hp-9qvj-7wxg",
"modified": "2022-05-01T18:43:17Z",
"published": "2022-05-01T18:43:17Z",
"aliases": [
"CVE-2007-6472"
],
"details": "Multiple SQL injection vulnerabilities in phpMyRealty (PMR) 1.0.9 allow (1) remote attackers to execute arbitrary SQL commands via the type parameter to search.php and (2) remote authenticated administrators to execute arbitrary SQL commands via the listing_updated_days parameter to admin/findlistings.php. NOTE: some of these details are obtained from third party information.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2007-6472"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/39121"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/39122"
},
{
"type": "WEB",
"url": "https://www.exploit-db.com/exploits/4750"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/28155"
},
{
"type": "WEB",
"url": "http://www.osvdb.org/39267"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/26932"
}
],
"database_specific": {
"cwe_ids": [
"CWE-89"
],
"severity": "HIGH",
"github_reviewed": false
}
}
| 632 |
1,682 |
/*
Copyright (c) 2012 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.linkedin.restli.server.twitter;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.BatchCreateRequest;
import com.linkedin.restli.server.BatchCreateResult;
import com.linkedin.restli.server.BatchDeleteRequest;
import com.linkedin.restli.server.BatchPatchRequest;
import com.linkedin.restli.server.BatchUpdateRequest;
import com.linkedin.restli.server.BatchUpdateResult;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.ActionParam;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import com.linkedin.restli.server.twitter.TwitterTestDataModels.DiscoveredItem;
import com.linkedin.restli.server.twitter.TwitterTestDataModels.DiscoveredItemKey;
import com.linkedin.restli.server.twitter.TwitterTestDataModels.DiscoveredItemKeyParams;
import java.util.List;
import java.util.Map;
import java.util.Set;
@RestLiCollection(name="discovereditems", keyName="discoveredItemId")
public class DiscoveredItemsResource
extends ComplexKeyResourceTemplate<DiscoveredItemKey, DiscoveredItemKeyParams, DiscoveredItem>
{
@Finder("user")
public List<DiscoveredItem> findByUser(@QueryParam("userId") long userId)
{
return null;
}
@Override
public CreateResponse create(DiscoveredItem entity)
{
return null;
}
@Override
public Map<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> batchGet(
Set<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>> ids)
{
return null;
}
@Override
public DiscoveredItem get(ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams> key)
{
return null;
}
@Override
public UpdateResponse delete(ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams> key)
{
return null;
}
@Override
public UpdateResponse update(
ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams> key, PatchRequest<DiscoveredItem> request)
{
return null;
}
@Override
public UpdateResponse update(ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams> key, DiscoveredItem entity)
{
return null;
}
@Override
public BatchUpdateResult<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> batchUpdate(
BatchUpdateRequest<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> entities)
{
return null;
}
@Override
public BatchUpdateResult<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> batchUpdate(
BatchPatchRequest<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> entityUpdates)
{
return null;
}
@Override
public BatchCreateResult<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> batchCreate(
BatchCreateRequest<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> entities)
{
return null;
}
@Override
public BatchUpdateResult<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> batchDelete(
BatchDeleteRequest<ComplexResourceKey<DiscoveredItemKey, DiscoveredItemKeyParams>, DiscoveredItem> ids)
{
return null;
}
@Action(name="purge")
public void purge(@ActionParam("user") long userId)
{
}
}
| 1,315 |
443 |
<reponame>alichry/revsh
#include "common.h"
/***********************************************************************************************************************
*
* message_push()
*
* Input: Nothing, but we will heavily reference the global io_helper struct.
* Output: 0 on success, -1 on error.
*
* Purpose: This is our message interface for sending data.
*
**********************************************************************************************************************/
int message_push(){
unsigned short header_len;
unsigned short tmp_short;
/* Send the header. */
header_len = sizeof(message->data_type) + sizeof(message->data_len);
if(message->data_type == DT_PROXY || message->data_type == DT_CONNECTION){
header_len += sizeof(message->header_type) + sizeof(message->header_origin) + sizeof(message->header_id);
if(message->header_type == DT_PROXY_HT_CREATE || message->header_type == DT_PROXY_HT_REPORT || message->header_type == DT_CONNECTION_HT_CREATE){
header_len += sizeof(message->header_proxy_type);
}
}
if(header_len > io->message_data_size){
report_error("message_push(): message: local header too long!");
return(-1);
}
tmp_short = htons(header_len);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
if(io->remote_write(&message->data_type, sizeof(message->data_type)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &message->data_type, (int) sizeof(message->data_type), strerror(errno));
return(-1);
}
if(message->data_type == DT_NOP){
message->data_len = 0;
}
/* Send the data. */
if(message->data_len > io->message_data_size){
report_error("message_push(): message: local data too long!");
return(-1);
}
tmp_short = htons(message->data_len);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
if(message->data_type == DT_PROXY || message->data_type == DT_CONNECTION){
tmp_short = htons(message->header_type);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
tmp_short = htons(message->header_origin);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
tmp_short = htons(message->header_id);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
if(message->header_type == DT_PROXY_HT_CREATE || message->header_type == DT_PROXY_HT_REPORT || message->header_type == DT_CONNECTION_HT_CREATE){
tmp_short = htons(message->header_proxy_type);
if(io->remote_write(&tmp_short, sizeof(tmp_short)) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) &tmp_short, (int) sizeof(tmp_short), strerror(errno));
return(-1);
}
}
}
if(io->remote_write(message->data, message->data_len) == -1){
report_error("message_push(): remote_write(%lx, %d): %s", \
(unsigned long) message->data, message->data_len, strerror(errno));
return(-1);
}
return(0);
}
/***********************************************************************************************************************
*
* message_pull()
*
* Input: Nothing, but we will heavily reference the global io_helper struct.
* Output: 0 on success, -1 on error.
*
* Purpose: This is our message interface for receiving data.
*
**********************************************************************************************************************/
int message_pull(){
unsigned short header_len;
int retval;
memset(message->data, '\0', io->message_data_size);
/* Grab the header. */
if((retval = io->remote_read(&header_len, sizeof(header_len))) == -1){
/* During a normal disconnect condition, this is where the message_pull should fail, so check for EOF. */
if(!io->eof){
report_error("message_pull(): remote_read(%lx, %d): %s", (unsigned long) &header_len, (int) sizeof(header_len), strerror(errno));
}
return(-1);
}
header_len = ntohs(header_len);
if((retval = io->remote_read(&message->data_type, sizeof(message->data_type))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", \
(unsigned long) &message->data_type, (int) sizeof(message->data_type), strerror(errno));
return(-1);
}
header_len -= sizeof(message->data_type);
if((retval = io->remote_read(&message->data_len, sizeof(message->data_len))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", (unsigned long) &message->data_len, (int) sizeof(message->data_len), strerror(errno));
return(-1);
}
message->data_len = ntohs(message->data_len);
header_len -= sizeof(message->data_len);
if(header_len > io->message_data_size){
report_error("message_pull(): message: remote header too long!\n");
return(-1);
}
if(message->data_type == DT_PROXY || message->data_type == DT_CONNECTION){
if((retval = io->remote_read(&message->header_type, sizeof(message->header_type))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", \
(unsigned long) &message->header_type, (int) sizeof(message->header_type), strerror(errno));
return(-1);
}
message->header_type = ntohs(message->header_type);
header_len -= sizeof(message->header_type);
if((retval = io->remote_read(&message->header_origin, sizeof(message->header_origin))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", \
(unsigned long) &message->header_origin, (int) sizeof(message->header_origin), strerror(errno));
return(-1);
}
message->header_origin = ntohs(message->header_origin);
header_len -= sizeof(message->header_origin);
if((retval = io->remote_read(&message->header_id, sizeof(message->header_id))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", \
(unsigned long) &message->header_id, (int) sizeof(message->header_id), strerror(errno));
return(-1);
}
message->header_id = ntohs(message->header_id);
header_len -= sizeof(message->header_id);
if(message->header_type == DT_PROXY_HT_CREATE || message->header_type == DT_PROXY_HT_REPORT || message->header_type == DT_CONNECTION_HT_CREATE){
if((retval = io->remote_read(&message->header_proxy_type, sizeof(message->header_proxy_type))) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", \
(unsigned long) &message->header_proxy_type, (int) sizeof(message->header_proxy_type), strerror(errno));
return(-1);
}
message->header_proxy_type = ntohs(message->header_proxy_type);
header_len -= sizeof(message->header_proxy_type);
}
}
/* Ignore any remaining header data as unknown, and probably from a more modern version of the */
/* protocol than we were compiled with. */
if(header_len){
if(header_len > io->message_data_size){
report_error("message_pull(): headers bigger than buffer!");
return(-1);
}
if((retval = io->remote_read(message->data, header_len)) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", (unsigned long) message->data, header_len, strerror(errno));
return(-1);
}
}
/* Grab the data. */
if(message->data_len > io->message_data_size){
report_error("message_pull(): message: remote data too long!");
return(-1);
}
if((retval = io->remote_read(message->data, message->data_len)) == -1){
report_error("message_pull(): remote_read(%lx, %d): %s", (unsigned long) message->data, message->data_len, strerror(errno));
return(-1);
}
return(0);
}
/***********************************************************************************************************************
*
* message_helper_create()
*
* Input: A pointer to the data.
* The length of that data.
* The max size that data is allowed to be in this run.
* Output: A pointer to a new message_helper node if successful, NULL if not.
*
* Purpose: Make a new message_helper node and fill it with data. Probably for the write buffering case where a write()
* somewhere is failing non-fataly.
*
**********************************************************************************************************************/
struct message_helper *message_helper_create(char *data, unsigned short data_len, unsigned short message_data_size){
struct message_helper *new_mh;
new_mh = (struct message_helper *) calloc(1, sizeof(struct message_helper));
if(!new_mh){
report_error("message_helper_create(): calloc(1, %d): %s", (int) sizeof(struct message_helper), strerror(errno));
return(NULL);
}
new_mh->data = (char *) calloc(message_data_size, sizeof(char));
if(!new_mh->data){
report_error("message_helper_create(): calloc(1, %d): %s", (int) sizeof(struct message_helper), strerror(errno));
free(new_mh);
return(NULL);
}
memcpy(new_mh->data, data, data_len);
new_mh->data_len = data_len;
return(new_mh);
}
/***********************************************************************************************************************
*
* message_helper_destroy()
*
* Input: The message_helper node that we want to destroy.
* Output: None.
*
* Purpose: Destroy a message_helper node.
*
**********************************************************************************************************************/
void message_helper_destroy(struct message_helper *mh){
free(mh->data);
free(mh);
}
| 3,462 |
1,738 |
<filename>dev/Code/Sandbox/Plugins/EditorCommon/QPropertyTree/PropertyRowString.cpp
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
#include "stdafx.h"
#include <math.h>
#include "PropertyRowString.h"
#include "PropertyTreeModel.h"
#include "PropertyDrawContext.h"
#include "QPropertyTree.h"
#include "Serialization/IArchive.h"
#include "Serialization/ClassFactory.h"
#include <QMenu>
#include "Unicode.h"
// ---------------------------------------------------------------------------
SERIALIZATION_CLASS_NAME(PropertyRow, PropertyRowString, "PropertyRowString", "string");
bool PropertyRowString::assignTo(string& str) const
{
str = fromWideChar(value_.c_str());
return true;
}
bool PropertyRowString::assignTo(wstring& str) const
{
str = value_;
return true;
}
PropertyRowWidget* PropertyRowString::createWidget(QPropertyTree* tree)
{
return new PropertyRowWidgetString(this, tree);
}
bool PropertyRowString::assignToByPointer(void* instance, const Serialization::TypeID& type) const
{
if (type == Serialization::TypeID::get<string>())
{
assignTo(*(string*)instance);
return true;
}
else if (type == Serialization::TypeID::get<wstring>())
{
assignTo(*(wstring*)instance);
return true;
}
return false;
}
string PropertyRowString::valueAsString() const
{
return fromWideChar(value_.c_str());
}
void PropertyRowString::setValue(const wchar_t* str, const void* handle, const Serialization::TypeID& type)
{
value_ = str;
serializer_.setPointer((void*)handle);
serializer_.setType(type);
}
void PropertyRowString::setValue(const char* str, const void* handle, const Serialization::TypeID& type)
{
value_ = toWideChar(str);
serializer_.setPointer((void*)handle);
serializer_.setType(type);
}
void PropertyRowString::serializeValue(Serialization::IArchive& ar)
{
ar(value_, "value", "Value");
}
#include <QPropertyTree/PropertyRowString.moc>
// vim:ts=4 sw=4:
| 825 |
14,668 |
<reponame>zealoussnow/chromium
{
"desc" : "Simple non-empty configuration file to test that configuration is loaded correctly",
"testValue" : "something"
}
| 47 |
839 |
<gh_stars>100-1000
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.jaxrs.fortest.jaxb;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.annotation.XmlElementDecl;
import javax.xml.bind.annotation.XmlRegistry;
import javax.xml.namespace.QName;
@XmlRegistry
public class ObjectFactory {
private static final QName SUPERBOOK2_QNAME = new QName("http://books", "SuperBook2");
public Book createBook() {
return new Book();
}
public SuperBook createSuperBook() {
return new SuperBook();
}
public SuperBook2 createSuperBook2() {
return new SuperBook2();
}
@XmlElementDecl(namespace = "http://books", name = "SuperBook2")
public JAXBElement<SuperBook2> createExactlyOne(SuperBook2 value) {
return new JAXBElement<SuperBook2>(SUPERBOOK2_QNAME, SuperBook2.class, null, value);
}
}
| 510 |
640 |
<gh_stars>100-1000
void InitPowerupSprites()
{
LoadSprite(powerup_psgcompr,POWERUPBASE,powerup_psgcompr_bank);
}
void InitPowerups()
{
InitPowerupSprites();
}
void UpdatePowerup()
{
if(powerupt!=0)
{
if(stageframe%8==0)powerupv++;
powerupy+=powerupv;
DrawSpriteArray(POWERUPBASE-2+(powerupt<<1),powerupx,powerupy,16,8);
if(powerupy>=192)powerupt=0;
// Player gets powerup?
if((playertype==3)||(playertype==5))
{
if((playerx+16>powerupx)&&(playerx<powerupx+16)&&(playery+16>powerupy)&&(playery<powerupy+8))
{
// Assign to player
if(powerupt==1)playerspeed=DEFAULTPLAYERSPEED+1;
else if(powerupt==2)playershootlevel=1;
else playershootmax=MAXPLAYERSHOOTS;
// Reset
powerupt=0;
// Sound
PlaySound(powerup_psg,1);
}
}
}
}
void InitPowerup(enemy *en)
{
if(playertype==3)
if(powerupt==0)
{
powerupcounter++;
if(powerupcounter==6)
{
powerupt=1+(myRand()%3);
powerupx=en->enemyposx;
powerupy=en->enemyposy;
powerupcounter=0;
powerupv=-1;
}
}
}
| 519 |
3,428 |
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "stdlib/ndarray/base/numel.h"
/**
* Returns the number of elements in an array.
*
* @param ndims number of dimensions
* @param shape array shape (dimensions)
* @return number of elements
*
* @example
* #include "stdlib/ndarray/base/numel.h"
*
* int64_t ndims = 2;
* int64_t shape[] = { 10, 8 };
*
* int64_t n = stdlib_ndarray_numel( ndims, shape );
* // returns 80
*/
int64_t stdlib_ndarray_numel( int64_t ndims, int64_t *shape ) {
int64_t n;
int64_t i;
if ( ndims == 0 ) {
return 0;
}
n = 1;
for ( i = 0; i < ndims; i++ ) {
if ( shape[ i ] < 0 ) {
return 0;
}
n *= shape[ i ];
}
return n;
}
| 466 |
407 |
<filename>secondaryindex/src/test/java/org/apache/hadoop/hbase/index/TestUtils.java
/**
* Copyright 2011 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.index;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.experimental.categories.Category;
@Category(LargeTests.class)
public class TestUtils {
public static IndexedHTableDescriptor createIndexedHTableDescriptor(String tableName,
String columnFamily, String indexName, String indexColumnFamily, String indexColumnQualifier) {
IndexedHTableDescriptor htd = new IndexedHTableDescriptor(tableName);
IndexSpecification iSpec = new IndexSpecification(indexName);
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
iSpec.addIndexColumn(hcd, indexColumnQualifier, ValueType.String, 10);
htd.addFamily(hcd);
htd.addIndex(iSpec);
return htd;
}
public static void waitUntilIndexTableCreated(HMaster master, String tableName)
throws IOException, InterruptedException {
boolean isEnabled = false;
boolean isExist = false;
do {
isExist = MetaReader.tableExists(master.getCatalogTracker(), tableName);
isEnabled = master.getAssignmentManager().getZKTable().isEnabledTable(tableName);
Thread.sleep(1000);
} while ((false == isExist) && (false == isEnabled));
}
public static List<Pair<byte[], ServerName>> getStartKeysAndLocations(HMaster master,
String tableName) throws IOException, InterruptedException {
List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(), tableName);
List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
Pair<byte[], ServerName> startKeyAndLocation = null;
for (Pair<HRegionInfo, ServerName> regionAndLocation : tableRegionsAndLocations) {
startKeyAndLocation =
new Pair<byte[], ServerName>(regionAndLocation.getFirst().getStartKey(),
regionAndLocation.getSecond());
startKeyAndLocationPairs.add(startKeyAndLocation);
}
return startKeyAndLocationPairs;
}
public static boolean checkForColocation(HMaster master, String tableName, String indexTableName)
throws IOException, InterruptedException {
List<Pair<byte[], ServerName>> uTableStartKeysAndLocations =
getStartKeysAndLocations(master, tableName);
List<Pair<byte[], ServerName>> iTableStartKeysAndLocations =
getStartKeysAndLocations(master, indexTableName);
boolean regionsColocated = true;
if (uTableStartKeysAndLocations.size() != iTableStartKeysAndLocations.size()) {
regionsColocated = false;
} else {
for (int i = 0; i < uTableStartKeysAndLocations.size(); i++) {
Pair<byte[], ServerName> uStartKeyAndLocation = uTableStartKeysAndLocations.get(i);
Pair<byte[], ServerName> iStartKeyAndLocation = iTableStartKeysAndLocations.get(i);
if (Bytes.compareTo(uStartKeyAndLocation.getFirst(), iStartKeyAndLocation.getFirst()) == 0) {
if (uStartKeyAndLocation.getSecond().equals(iStartKeyAndLocation.getSecond())) {
continue;
}
}
regionsColocated = false;
}
}
return regionsColocated;
}
}
| 1,523 |
312 |
<gh_stars>100-1000
/* key.h
* Copyright (C) 2001-2007, Parrot Foundation.
* Overview:
* This is the API header for the PMC subsystem
* Data Structure and Algorithms:
* History:
* Notes:
* References:
*/
#ifndef PARROT_KEY_H_GUARD
#define PARROT_KEY_H_GUARD
#include "parrot/parrot.h"
/*
Type of Keys.
C<KEY_register_FLAG> used for indirect referencing. E.g.
$S0 = "foo"
$P1 = $P0[$S0]
In this case C<[$S0]> will have type C<KEY_string_FLAG | KEY_register_FLAG>
and store I<integer> value of string register number.
*/
typedef enum {
KEY_integer_FLAG = PObj_private0_FLAG,
KEY_string_FLAG = PObj_private2_FLAG,
KEY_pmc_FLAG = PObj_private3_FLAG,
KEY_register_FLAG = PObj_private4_FLAG,
KEY_type_FLAGS = KEY_integer_FLAG |
KEY_string_FLAG |
KEY_pmc_FLAG |
KEY_register_FLAG
} KEY_flags;
#define KEY_get_FLAGS(p) (PObj_get_FLAGS(p) & KEY_type_FLAGS)
#define KEY_flags_CLEARALL(p) ((p)->flags &= ~KEY_type_FLAGS)
#define KEY_set_flag(p, i) ((p)->flags |= (i))
#define KEY_integer_SET(p) PObj_flag_SET(private0, (p))
#define KEY_integer_TEST(p) PObj_flag_TEST(private0, (p))
#define KEY_integer_CLEAR(p) PObj_flag_CLEAR(private0, (p))
#define KEY_string_SET(p) PObj_flag_SET(private2, (p))
#define KEY_string_TEST(p) PObj_flag_TEST(private2, (p))
#define KEY_string_CLEAR(p) PObj_flag_CLEAR(private2, (p))
#define KEY_pmc_SET(p) PObj_flag_SET(private3, (p))
#define KEY_pmc_TEST(p) PObj_flag_TEST(private3, (p))
#define KEY_pmc_CLEAR(p) PObj_flag_CLEAR(private3, (p))
#define KEY_register_SET(p) PObj_flag_SET(private4, (p))
#define KEY_register_TEST(p) PObj_flag_TEST(private4, (p))
#define KEY_register_CLEAR(p) PObj_flag_CLEAR(private4, (p))
/* HEADERIZER BEGIN: src/key.c */
/* Don't modify between HEADERIZER BEGIN / HEADERIZER END. Your changes will be lost. */
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_IGNORABLE_RESULT
PMC * Parrot_key_append(PARROT_INTERP, ARGMOD(PMC *key1), ARGIN(PMC *key2))
__attribute__nonnull__(1)
__attribute__nonnull__(2)
__attribute__nonnull__(3)
FUNC_MODIFIES(*key1);
PARROT_EXPORT
PARROT_WARN_UNUSED_RESULT
INTVAL Parrot_key_integer(PARROT_INTERP, ARGIN(PMC *key))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
void Parrot_key_mark(PARROT_INTERP, ARGIN(PMC *key))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_new(PARROT_INTERP)
__attribute__nonnull__(1);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_new_cstring(PARROT_INTERP, ARGIN_NULLOK(const char *value))
__attribute__nonnull__(1);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_new_integer(PARROT_INTERP, INTVAL value)
__attribute__nonnull__(1);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_new_string(PARROT_INTERP, ARGIN(STRING *value))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
PARROT_CAN_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_next(PARROT_INTERP, ARGIN(PMC *key))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
PMC * Parrot_key_pmc(PARROT_INTERP, ARGIN(PMC *key))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
void Parrot_key_set_integer(PARROT_INTERP, ARGMOD(PMC *key), INTVAL value)
__attribute__nonnull__(1)
__attribute__nonnull__(2)
FUNC_MODIFIES(*key);
PARROT_EXPORT
void Parrot_key_set_register(PARROT_INTERP,
ARGMOD(PMC *key),
INTVAL value,
INTVAL flag)
__attribute__nonnull__(1)
__attribute__nonnull__(2)
FUNC_MODIFIES(*key);
PARROT_EXPORT
void Parrot_key_set_string(PARROT_INTERP,
ARGMOD(PMC *key),
ARGIN(STRING *value))
__attribute__nonnull__(1)
__attribute__nonnull__(2)
__attribute__nonnull__(3)
FUNC_MODIFIES(*key);
PARROT_EXPORT
PARROT_CANNOT_RETURN_NULL
PARROT_WARN_UNUSED_RESULT
STRING * Parrot_key_set_to_string(PARROT_INTERP, ARGIN_NULLOK(PMC *key))
__attribute__nonnull__(1);
PARROT_EXPORT
PARROT_WARN_UNUSED_RESULT
PARROT_CAN_RETURN_NULL
STRING * Parrot_key_string(PARROT_INTERP, ARGIN(PMC *key))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
PARROT_EXPORT
PARROT_PURE_FUNCTION
PARROT_WARN_UNUSED_RESULT
INTVAL Parrot_key_type(PARROT_INTERP, ARGIN(const PMC *key))
__attribute__nonnull__(2);
#define ASSERT_ARGS_Parrot_key_append __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key1) \
, PARROT_ASSERT_ARG(key2))
#define ASSERT_ARGS_Parrot_key_integer __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_mark __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_new __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp))
#define ASSERT_ARGS_Parrot_key_new_cstring __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp))
#define ASSERT_ARGS_Parrot_key_new_integer __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp))
#define ASSERT_ARGS_Parrot_key_new_string __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(value))
#define ASSERT_ARGS_Parrot_key_next __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_pmc __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_set_integer __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_set_register __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_set_string __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key) \
, PARROT_ASSERT_ARG(value))
#define ASSERT_ARGS_Parrot_key_set_to_string __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp))
#define ASSERT_ARGS_Parrot_key_string __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(interp) \
, PARROT_ASSERT_ARG(key))
#define ASSERT_ARGS_Parrot_key_type __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
PARROT_ASSERT_ARG(key))
/* Don't modify between HEADERIZER BEGIN / HEADERIZER END. Your changes will be lost. */
/* HEADERIZER END: src/key.c */
#endif /* PARROT_KEY_H_GUARD */
/*
* Local variables:
* c-file-style: "parrot"
* End:
* vim: expandtab shiftwidth=4 cinoptions='\:2=2' :
*/
| 3,451 |
585 |
<reponame>KevinKecc/caffe2
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLossOps(hu.HypothesisTestCase):
@given(n=st.integers(1, 8), **hu.gcs)
def test_averaged_loss(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"AveragedLoss",
["X"],
["y"],
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
| 663 |
482 |
package io.cattle.platform.iaas.api.filter.snapshot;
import static io.cattle.platform.core.model.tables.SnapshotTable.*;
import io.cattle.platform.core.model.Snapshot;
import io.cattle.platform.iaas.api.filter.common.AbstractDefaultResourceManagerFilter;
import io.cattle.platform.object.ObjectManager;
import io.github.ibuildthecloud.gdapi.condition.Condition;
import io.github.ibuildthecloud.gdapi.condition.ConditionType;
import io.github.ibuildthecloud.gdapi.exception.ClientVisibleException;
import io.github.ibuildthecloud.gdapi.request.ApiRequest;
import io.github.ibuildthecloud.gdapi.request.resource.ResourceManager;
import io.github.ibuildthecloud.gdapi.util.ResponseCodes;
import io.github.ibuildthecloud.gdapi.validation.ValidationErrorCodes;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
public class SnapshotValidationFilter extends AbstractDefaultResourceManagerFilter {
@Inject
ObjectManager objectManager;
@Override
public Class<?>[] getTypeClasses() {
return new Class<?>[] { Snapshot.class };
}
public Object resourceAction(String type, ApiRequest request, ResourceManager next) {
if ("remove".equalsIgnoreCase(request.getAction())) {
validateSnapshotRemove(request);
}
return super.resourceAction(type, request, next);
}
@Override
public Object delete(String type, String id, ApiRequest request, ResourceManager next) {
validateSnapshotRemove(request);
return super.delete(type, id, request, next);
}
void validateSnapshotRemove(ApiRequest request) {
Snapshot snapshot = objectManager.loadResource(Snapshot.class, request.getId());
Map<Object, Object> criteria = new HashMap<Object, Object>();
criteria.put(SNAPSHOT.VOLUME_ID, snapshot.getVolumeId());
criteria.put(SNAPSHOT.REMOVED, null);
criteria.put(SNAPSHOT.ID, new Condition(ConditionType.GT, snapshot.getId()));
List<Snapshot> snapshots = objectManager.find(Snapshot.class, criteria);
if (snapshots.size() == 0) {
throw new ClientVisibleException(ResponseCodes.BAD_REQUEST, ValidationErrorCodes.INVALID_STATE,
"This snapshot cannot be removed because it is the latest one for the volume.", null);
}
}
}
| 821 |
323 |
<gh_stars>100-1000
# BSD 3-Clause License
#
# DeepGlint is pleased to support the open source community by making EasyQuant available.
# Copyright (C) 2020 DeepGlint. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import pickle
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import cv2
import numpy as np
import shutil
from google.protobuf import text_format
import scipy.spatial.distance as dis
import sys
from collections import OrderedDict
import numpy as np
from functools import reduce
import os
import re
############### end ###################################
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models int8 quantize scale value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--save', dest='save',
help='path to saved shape pkl file', type=str, default='layerDims.pickle')
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
proto = args.proto
model = args.model
beginLayerIndex = 1
endLayerIndex = 110
def layerToOutputName():
namePat = re.compile(r'\s+?name:\s+?"(.*)"')
topPat = re.compile(r'\s+?top:\s+?"(.*)"')
res = {}
with open(args.proto) as file:
name = None
top = None
for line in file.readlines():
if re.match(namePat, line):
name = re.match(namePat, line).group(1)
if re.match(topPat, line):
top = re.match(topPat, line).group(1)
res[name] = top
return res
def findEachLayerDim(caffe_model, net_file):
layer2OutputName = layerToOutputName()
res = OrderedDict()
with open(net_file, 'r') as fin:
with open('temp.prototxt', 'w') as fout:
for line in fin.readlines():
fout.write(line.replace('ReLU6', 'ReLU'))
net = caffe.Net('temp.prototxt', caffe_model, caffe.TEST)
img = np.random.random((224, 224, 3))
img = img.transpose(2, 0, 1)
net.blobs['data'].data[...] = img
output = net.forward()
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
print(net.blobs.keys())
for i, layer in enumerate(params.layer):
print(layer.name)
if layer.name in layer2OutputName.keys() and layer2OutputName[layer.name] in net.blobs.keys():
res[layer.name] = net.blobs[layer2OutputName[layer.name]].data[0].shape
return res
def main():
res = findEachLayerDim(args.model, args.proto)
for k in res:
print(k, res[k])
import os
os.remove('temp.prototxt')
with open(args.save, 'w') as file:
pickle.dump(res, file)
if __name__ == '__main__':
main()
| 1,622 |
2,593 |
<filename>lib/src/main/java/github/chenupt/springindicator/SpringIndicator.java
/*
* Copyright 2015 chenupt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package github.chenupt.springindicator;
import android.animation.ArgbEvaluator;
import android.animation.ObjectAnimator;
import android.content.Context;
import android.content.res.TypedArray;
import android.support.v4.view.ViewPager;
import android.util.AttributeSet;
import android.util.TypedValue;
import android.view.Gravity;
import android.view.View;
import android.view.ViewGroup;
import android.widget.FrameLayout;
import android.widget.LinearLayout;
import android.widget.TextView;
import java.util.ArrayList;
import java.util.List;
/**
* Created by <EMAIL> on 2015/1/31.
* Description : Tab layout container
*/
public class SpringIndicator extends FrameLayout {
private static final int INDICATOR_ANIM_DURATION = 3000;
private float acceleration = 0.5f;
private float headMoveOffset = 0.6f;
private float footMoveOffset = 1- headMoveOffset;
private float radiusMax;
private float radiusMin;
private float radiusOffset;
private float textSize;
private int textColorId;
private int textBgResId;
private int selectedTextColorId;
private int indicatorColorId;
private int indicatorColorsId;
private int[] indicatorColorArray;
private LinearLayout tabContainer;
private SpringView springView;
private ViewPager viewPager;
private List<TextView> tabs;
private ViewPager.OnPageChangeListener delegateListener;
private TabClickListener tabClickListener;
private ObjectAnimator indicatorColorAnim;
public SpringIndicator(Context context) {
this(context, null);
}
public SpringIndicator(Context context, AttributeSet attrs) {
super(context, attrs);
initAttrs(attrs);
}
private void initAttrs(AttributeSet attrs){
textColorId = R.color.si_default_text_color;
selectedTextColorId = R.color.si_default_text_color_selected;
indicatorColorId = R.color.si_default_indicator_bg;
textSize = getResources().getDimension(R.dimen.si_default_text_size);
radiusMax = getResources().getDimension(R.dimen.si_default_radius_max);
radiusMin = getResources().getDimension(R.dimen.si_default_radius_min);
TypedArray a = getContext().obtainStyledAttributes(attrs, R.styleable.SpringIndicator);
textColorId = a.getResourceId(R.styleable.SpringIndicator_siTextColor, textColorId);
selectedTextColorId = a.getResourceId(R.styleable.SpringIndicator_siSelectedTextColor, selectedTextColorId);
textSize = a.getDimension(R.styleable.SpringIndicator_siTextSize, textSize);
textBgResId = a.getResourceId(R.styleable.SpringIndicator_siTextBg, 0);
indicatorColorId = a.getResourceId(R.styleable.SpringIndicator_siIndicatorColor, indicatorColorId);
indicatorColorsId = a.getResourceId(R.styleable.SpringIndicator_siIndicatorColors, 0);
radiusMax = a.getDimension(R.styleable.SpringIndicator_siRadiusMax, radiusMax);
radiusMin = a.getDimension(R.styleable.SpringIndicator_siRadiusMin, radiusMin);
a.recycle();
if(indicatorColorsId != 0){
indicatorColorArray = getResources().getIntArray(indicatorColorsId);
}
radiusOffset = radiusMax - radiusMin;
}
public void setViewPager(final ViewPager viewPager) {
this.viewPager = viewPager;
initSpringView();
setUpListener();
}
private void initSpringView() {
addPointView();
addTabContainerView();
addTabItems();
}
private void addPointView() {
springView = new SpringView(getContext());
springView.setIndicatorColor(getResources().getColor(indicatorColorId));
addView(springView);
}
private void addTabContainerView() {
tabContainer = new LinearLayout(getContext());
tabContainer.setLayoutParams(new LinearLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT, 1.0f));
tabContainer.setOrientation(LinearLayout.HORIZONTAL);
tabContainer.setGravity(Gravity.CENTER);
addView(tabContainer);
}
private void addTabItems() {
LinearLayout.LayoutParams layoutParams = new LinearLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT, 1.0f);
tabs = new ArrayList<>();
for (int i = 0; i < viewPager.getAdapter().getCount(); i++) {
TextView textView = new TextView(getContext());
if(viewPager.getAdapter().getPageTitle(i) != null){
textView.setText(viewPager.getAdapter().getPageTitle(i));
}
textView.setGravity(Gravity.CENTER);
textView.setTextSize(TypedValue.COMPLEX_UNIT_PX, textSize);
textView.setTextColor(getResources().getColor(textColorId));
if (textBgResId != 0){
textView.setBackgroundResource(textBgResId);
}
textView.setLayoutParams(layoutParams);
final int position = i;
textView.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
if(tabClickListener == null || tabClickListener.onTabClick(position)){
viewPager.setCurrentItem(position);
}
}
});
tabs.add(textView);
tabContainer.addView(textView);
}
}
/**
* Set current point position.
*/
private void createPoints(){
View view = tabs.get(viewPager.getCurrentItem());
springView.getHeadPoint().setX(view.getX() + view.getWidth() / 2);
springView.getHeadPoint().setY(view.getY() + view.getHeight() / 2);
springView.getFootPoint().setX(view.getX() + view.getWidth() / 2);
springView.getFootPoint().setY(view.getY() + view.getHeight() / 2);
springView.animCreate();
}
@Override
protected void onLayout(boolean changed, int l, int t, int r, int b) {
super.onLayout(changed, l, t, r, b);
if (changed) {
createPoints();
setSelectedTextColor(viewPager.getCurrentItem());
}
}
private void setUpListener(){
viewPager.setOnPageChangeListener(new ViewPager.SimpleOnPageChangeListener() {
@Override
public void onPageSelected(int position) {
super.onPageSelected(position);
setSelectedTextColor(position);
if(delegateListener != null){
delegateListener.onPageSelected(position);
}
}
@Override
public void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) {
if (position < tabs.size() - 1) {
// radius
float radiusOffsetHead = 0.5f;
if(positionOffset < radiusOffsetHead){
springView.getHeadPoint().setRadius(radiusMin);
}else{
springView.getHeadPoint().setRadius(((positionOffset-radiusOffsetHead)/(1-radiusOffsetHead) * radiusOffset + radiusMin));
}
float radiusOffsetFoot = 0.5f;
if(positionOffset < radiusOffsetFoot){
springView.getFootPoint().setRadius((1-positionOffset/radiusOffsetFoot) * radiusOffset + radiusMin);
}else{
springView.getFootPoint().setRadius(radiusMin);
}
// x
float headX = 1f;
if (positionOffset < headMoveOffset){
float positionOffsetTemp = positionOffset / headMoveOffset;
headX = (float) ((Math.atan(positionOffsetTemp*acceleration*2 - acceleration ) + (Math.atan(acceleration))) / (2 * (Math.atan(acceleration))));
}
springView.getHeadPoint().setX(getTabX(position) - headX * getPositionDistance(position));
float footX = 0f;
if (positionOffset > footMoveOffset){
float positionOffsetTemp = (positionOffset- footMoveOffset) / (1- footMoveOffset);
footX = (float) ((Math.atan(positionOffsetTemp*acceleration*2 - acceleration ) + (Math.atan(acceleration))) / (2 * (Math.atan(acceleration))));
}
springView.getFootPoint().setX(getTabX(position) - footX * getPositionDistance(position));
// reset radius
if(positionOffset == 0){
springView.getHeadPoint().setRadius(radiusMax);
springView.getFootPoint().setRadius(radiusMax);
}
} else {
springView.getHeadPoint().setX(getTabX(position));
springView.getFootPoint().setX(getTabX(position));
springView.getHeadPoint().setRadius(radiusMax);
springView.getFootPoint().setRadius(radiusMax);
}
// set indicator colors
// https://github.com/TaurusXi/GuideBackgroundColorAnimation
if (indicatorColorsId != 0){
float length = (position + positionOffset) / viewPager.getAdapter().getCount();
int progress = (int) (length * INDICATOR_ANIM_DURATION);
seek(progress);
}
springView.postInvalidate();
if(delegateListener != null){
delegateListener.onPageScrolled(position, positionOffset, positionOffsetPixels);
}
}
@Override
public void onPageScrollStateChanged(int state) {
super.onPageScrollStateChanged(state);
if(delegateListener != null){
delegateListener.onPageScrollStateChanged(state);
}
}
});
}
private float getPositionDistance(int position) {
float tarX = tabs.get(position + 1).getX();
float oriX = tabs.get(position).getX();
return oriX - tarX;
}
private float getTabX(int position) {
return tabs.get(position).getX() + tabs.get(position).getWidth() / 2;
}
private void setSelectedTextColor(int position){
for (TextView tab : tabs) {
tab.setTextColor(getResources().getColor(textColorId));
}
tabs.get(position).setTextColor(getResources().getColor(selectedTextColorId));
}
private void createIndicatorColorAnim(){
indicatorColorAnim = ObjectAnimator.ofInt(springView, "indicatorColor", indicatorColorArray);
indicatorColorAnim.setEvaluator(new ArgbEvaluator());
indicatorColorAnim.setDuration(INDICATOR_ANIM_DURATION);
}
private void seek(long seekTime) {
if (indicatorColorAnim == null) {
createIndicatorColorAnim();
}
indicatorColorAnim.setCurrentPlayTime(seekTime);
}
public List<TextView> getTabs(){
return tabs;
}
public void setOnPageChangeListener(ViewPager.OnPageChangeListener listener){
this.delegateListener = listener;
}
public void setOnTabClickListener(TabClickListener listener){
this.tabClickListener = listener;
}
}
| 5,142 |
657 |
<gh_stars>100-1000
# Generated by Django 2.1 on 2018-08-13 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0016_contributor_status"),
]
operations = [
migrations.RemoveField(model_name="districtneed", name="status",),
migrations.AddField(
model_name="districtneed",
name="cnandpts",
field=models.TextField(default=" ", verbose_name="Contacts and collection points"),
preserve_default=False,
),
]
| 237 |
376 |
// Copyright (C) 2020 <NAME>
// This file is part of the "Nazara Engine - Utility module"
// For conditions of distribution and use, see copyright notice in Config.hpp
#pragma once
#ifndef NAZARA_LOADERS_MD2_HPP
#define NAZARA_LOADERS_MD2_HPP
#include <Nazara/Prerequisites.hpp>
#include <Nazara/Utility/Mesh.hpp>
namespace Nz::Loaders
{
MeshLoader::Entry GetMeshLoader_MD2();
}
#endif // NAZARA_LOADERS_MD2_HPP
| 155 |
3,459 |
<gh_stars>1000+
/*
* Copyright 2017 requery.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.requery.processor;
import javax.lang.model.element.AnnotationMirror;
import javax.lang.model.element.AnnotationValue;
import javax.lang.model.element.Element;
import javax.lang.model.element.TypeElement;
import javax.lang.model.type.DeclaredType;
import javax.lang.model.type.TypeKind;
import javax.lang.model.type.TypeMirror;
import javax.lang.model.util.ElementFilter;
import javax.lang.model.util.SimpleTypeVisitor6;
import javax.lang.model.util.Types;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* Helper class for working with {@link java.lang.annotation.Annotation) and
* {@link javax.lang.model.element.AnnotationMirror}.
*
* @author <NAME>
*/
final class Mirrors {
private Mirrors() {
}
static Optional<? extends AnnotationMirror> findAnnotationMirror(
Element element, Class<? extends Annotation> annotation) {
return findAnnotationMirror(element, annotation.getName());
}
static Optional<? extends AnnotationMirror> findAnnotationMirror(
Element element, String qualifiedName) {
return element.getAnnotationMirrors().stream()
.filter(mirror ->
namesEqual((TypeElement) mirror.getAnnotationType().asElement(), qualifiedName))
.findFirst();
}
static Optional<AnnotationValue> findAnnotationValue(AnnotationMirror mirror) {
return findAnnotationValue(mirror, "value");
}
static Optional<AnnotationValue> findAnnotationValue(AnnotationMirror mirror, String name) {
return mirror.getElementValues() == null ? Optional.empty() :
mirror.getElementValues().entrySet().stream()
.filter(entry -> entry.getKey().getSimpleName().contentEquals(name))
.map(entry -> (AnnotationValue)entry.getValue()).findFirst();
}
static List<TypeMirror> listGenericTypeArguments(TypeMirror typeMirror) {
final List<TypeMirror> typeArguments = new ArrayList<>();
typeMirror.accept(new SimpleTypeVisitor6<Void, Void>() {
@Override
public Void visitDeclared(DeclaredType declaredType, Void v) {
if (!declaredType.getTypeArguments().isEmpty()) {
typeArguments.addAll(declaredType.getTypeArguments());
}
return null;
}
@Override
protected Void defaultAction(TypeMirror typeMirror, Void v) {
return null;
}
}, null);
return typeArguments;
}
static boolean isInstance(Types types, TypeElement element, Class<?> type) {
if (element == null) {
return false;
}
String className = type.getCanonicalName();
if (type.isInterface()) {
return implementsInterface(types, element, className);
} else {
return namesEqual(element, className) || extendsClass(types, element, className);
}
}
static boolean isInstance(Types types, TypeElement element, String className) {
if (element == null) {
return false;
}
// check name
if (namesEqual(element, className)) {
return true;
}
// check interfaces then super types
return implementsInterface(types, element, className) ||
extendsClass(types, element, className);
}
private static boolean implementsInterface(Types types, TypeElement element, String interfaceName) {
// check name or interfaces
if (namesEqual(element, interfaceName)) {
return true;
}
TypeMirror type = element.asType();
while (type != null && type.getKind() != TypeKind.NONE) {
TypeElement currentElement = (TypeElement) types.asElement(type);
if (currentElement == null) {
break;
}
List<? extends TypeMirror> interfaces = element.getInterfaces();
for (TypeMirror interfaceType : interfaces) {
interfaceType = types.erasure(interfaceType);
TypeElement typeElement = (TypeElement) types.asElement(interfaceType);
if (typeElement != null && implementsInterface(types, typeElement, interfaceName)) {
return true;
}
}
type = currentElement.getSuperclass();
}
return false;
}
private static boolean extendsClass(Types types, TypeElement element, String className) {
if (namesEqual(element, className)) {
return true;
}
// check super types
TypeMirror superType = element.getSuperclass();
while (superType != null && superType.getKind() != TypeKind.NONE) {
TypeElement superTypeElement = (TypeElement) types.asElement(superType);
if (namesEqual(superTypeElement, className)) {
return true;
}
superType = superTypeElement.getSuperclass();
}
return false;
}
static boolean overridesMethod(Types types, TypeElement element, String methodName) {
while (element != null) {
if (ElementFilter.methodsIn(element.getEnclosedElements()).stream()
.anyMatch(method -> method.getSimpleName().contentEquals(methodName))) {
return true;
}
TypeMirror superType = element.getSuperclass();
if (superType.getKind() == TypeKind.NONE) {
break;
} else {
element = (TypeElement) types.asElement(superType);
}
if (namesEqual(element, Object.class.getCanonicalName())) {
break;
}
}
return false;
}
private static boolean namesEqual(TypeElement element, String qualifiedName) {
return element != null && element.getQualifiedName().contentEquals(qualifiedName);
}
}
| 2,658 |
1,466 |
<gh_stars>1000+
/**
* Copyright 2004-present, Facebook, Inc.
*
* <p>Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
#include "MmapBufferTraceWriter.h"
#include <fb/fbjni.h>
#include <fb/log.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstring>
#include <fstream>
#include <memory>
#include <stdexcept>
#include <profilo/entries/EntryType.h>
#include <profilo/logger/buffer/RingBuffer.h>
#include <profilo/mmapbuf/Buffer.h>
#include <profilo/mmapbuf/header/MmapBufferHeader.h>
#include <profilo/util/common.h>
#include <profilo/writer/TraceWriter.h>
#include <profilo/writer/trace_headers.h>
namespace facebook {
namespace profilo {
namespace mmapbuf {
namespace writer {
using namespace facebook::profilo::mmapbuf::header;
namespace {
constexpr int64_t kTriggerEventFlag = 0x0002000000000000L; // 1 << 49
static constexpr char kMemoryMappingKey[] = "l:s:u:o:s";
void loggerWrite(
Logger& logger,
EntryType type,
int32_t callid,
int32_t matchid,
int64_t extra,
int64_t timestamp = monotonicTime()) {
logger.write(StandardEntry{
.id = 0,
.type = type,
.timestamp = timestamp,
.tid = threadID(),
.callid = callid,
.matchid = matchid,
.extra = extra,
});
}
void loggerWriteStringAnnotation(
Logger& logger,
EntryType type,
int32_t callid,
const std::string& annotationKey,
const std::string& annotationValue,
int64_t extra = 0,
int64_t timestamp = monotonicTime()) {
StandardEntry annotationEntry{};
annotationEntry.type = type;
annotationEntry.tid = threadID();
annotationEntry.timestamp = timestamp;
annotationEntry.callid = callid;
auto matchid = logger.write(std::move(annotationEntry));
auto key = annotationKey.c_str();
matchid = logger.writeBytes(
EntryType::STRING_KEY,
matchid,
reinterpret_cast<const uint8_t*>(key),
strlen(key));
auto value = annotationValue.c_str();
logger.writeBytes(
EntryType::STRING_VALUE,
matchid,
reinterpret_cast<const uint8_t*>(value),
strlen(value));
}
void loggerWriteTraceStringAnnotation(
Logger& logger,
int32_t annotationQuicklogId,
const std::string& annotationKey,
const std::string& annotationValue,
int64_t timestamp = monotonicTime()) {
loggerWriteStringAnnotation(
logger,
EntryType::TRACE_ANNOTATION,
annotationQuicklogId,
annotationKey,
annotationValue,
0,
timestamp);
}
void loggerWriteQplTriggerAnnotation(
Logger& logger,
int32_t marker_id,
const std::string& annotationKey,
const std::string& annotationValue,
int64_t timestamp = monotonicTime()) {
loggerWriteStringAnnotation(
logger,
EntryType::QPL_ANNOTATION,
marker_id,
annotationKey,
annotationValue,
kTriggerEventFlag,
timestamp);
}
//
// Process entries from source buffer and write to the destination.
// It's okay if not all entries were successfully copied.
// Returns false if were able to copy less than 50% of source buffer entries.
//
bool copyBufferEntries(TraceBuffer& source, TraceBuffer& dest) {
TraceBuffer::Cursor cursor = source.currentTail(0);
alignas(4) Packet packet;
uint32_t processed_count = 0;
while (source.tryRead(packet, cursor)) {
Packet writePacket = packet; // copy
dest.write(writePacket);
++processed_count;
if (!cursor.moveForward()) {
break;
}
}
return processed_count > 0;
}
void processMemoryMappingsFile(
Logger& logger,
std::string& file_path,
int64_t timestamp) {
std::ifstream mappingsFile(file_path);
if (!mappingsFile.is_open()) {
return;
}
int32_t tid = threadID();
std::string mappingLine;
while (std::getline(mappingsFile, mappingLine)) {
auto mappingId = logger.write(entries::StandardEntry{
.type = EntryType::MAPPING,
.timestamp = timestamp,
.tid = tid,
});
auto keyId = logger.writeBytes(
EntryType::STRING_KEY,
mappingId,
reinterpret_cast<const uint8_t*>(kMemoryMappingKey),
sizeof(kMemoryMappingKey));
logger.writeBytes(
EntryType::STRING_VALUE,
keyId,
reinterpret_cast<const uint8_t*>(mappingLine.c_str()),
mappingLine.size());
}
}
} // namespace
int64_t MmapBufferTraceWriter::nativeInitAndVerify(
const std::string& dump_path) {
dump_path_ = dump_path;
bufferMapHolder_ = std::make_unique<BufferFileMapHolder>(dump_path);
MmapBufferPrefix* mapBufferPrefix =
reinterpret_cast<MmapBufferPrefix*>(bufferMapHolder_->map_ptr);
if (mapBufferPrefix->staticHeader.magic != kMagic) {
return 0;
}
if (mapBufferPrefix->staticHeader.version != kVersion) {
return 0;
}
if (mapBufferPrefix->header.bufferVersion != RingBuffer::kVersion) {
return 0;
}
int64_t trace_id = mapBufferPrefix->header.traceId;
trace_id_ = trace_id;
return trace_id;
}
void MmapBufferTraceWriter::nativeWriteTrace(
const std::string& type,
bool persistent,
const std::string& trace_folder,
const std::string& trace_prefix,
int32_t trace_flags,
fbjni::alias_ref<JNativeTraceWriterCallbacks> callbacks) {
writeTrace(
type,
persistent,
trace_folder,
trace_prefix,
trace_flags,
std::make_shared<NativeTraceWriterCallbacksProxy>(callbacks));
}
void MmapBufferTraceWriter::writeTrace(
const std::string& type,
bool persistent,
const std::string& trace_folder,
const std::string& trace_prefix,
int32_t trace_flags,
std::shared_ptr<TraceCallbacks> callbacks,
uint64_t timestamp) {
if (bufferMapHolder_.get() == nullptr) {
throw std::runtime_error(
"Not initialized. Method nativeInitAndVerify() should be called first.");
}
if (trace_id_ == 0) {
throw std::runtime_error(
"Buffer is not associated with a trace. Trace Id is 0.");
}
MmapBufferPrefix* mapBufferPrefix =
reinterpret_cast<MmapBufferPrefix*>(bufferMapHolder_->map_ptr);
int32_t qpl_marker_id =
static_cast<int32_t>(mapBufferPrefix->header.longContext);
auto entriesCount = mapBufferPrefix->header.size;
// Number of additional records we need to log in addition to entries from the
// buffer file + memory mappings file records + some buffer for long string
// entries.
constexpr auto kExtraRecordCount = 4096;
std::shared_ptr<mmapbuf::Buffer> buffer =
std::make_shared<mmapbuf::Buffer>(entriesCount + kExtraRecordCount);
auto& ringBuffer = buffer->ringBuffer();
TraceBuffer::Cursor startCursor = ringBuffer.currentHead();
Logger::EntryIDCounter newBufferEntryID{1};
Logger logger([&]() -> TraceBuffer& { return ringBuffer; }, newBufferEntryID);
// It's not technically backwards trace but that's what we use to denote Black
// Box traces.
loggerWrite(
logger, EntryType::TRACE_BACKWARDS, 0, trace_flags, trace_id_, timestamp);
{
// Copying entries from the saved buffer to the new one.
TraceBuffer* historicBuffer = reinterpret_cast<TraceBuffer*>(
reinterpret_cast<char*>(bufferMapHolder_->map_ptr) +
sizeof(MmapBufferPrefix));
bool ok = copyBufferEntries(*historicBuffer, ringBuffer);
if (!ok) {
throw std::runtime_error("Unable to read the file-backed buffer.");
}
}
loggerWrite(
logger,
EntryType::QPL_START,
qpl_marker_id,
0,
kTriggerEventFlag,
timestamp);
loggerWrite(
logger,
EntryType::TRACE_ANNOTATION,
QuickLogConstants::APP_VERSION_CODE,
0,
mapBufferPrefix->header.versionCode,
timestamp);
loggerWrite(
logger,
EntryType::TRACE_ANNOTATION,
QuickLogConstants::CONFIG_ID,
0,
mapBufferPrefix->header.configId,
timestamp);
loggerWriteTraceStringAnnotation(
logger,
QuickLogConstants::SESSION_ID,
"Asl Session Id",
std::string(mapBufferPrefix->header.sessionId));
loggerWriteQplTriggerAnnotation(
logger, qpl_marker_id, "type", type, timestamp);
if (persistent) {
loggerWriteQplTriggerAnnotation(
logger, qpl_marker_id, "collection_method", "persistent", timestamp);
}
const char* mapsFilename = mapBufferPrefix->header.memoryMapsFilename;
if (mapsFilename[0] != '\0') {
auto lastSlashIdx = dump_path_.rfind("/");
std::string mapsPath =
dump_path_.substr(0, lastSlashIdx + 1) + mapsFilename;
processMemoryMappingsFile(logger, mapsPath, timestamp);
}
loggerWrite(logger, EntryType::TRACE_END, 0, 0, trace_id_, timestamp);
TraceWriter writer(
std::move(trace_folder),
std::move(trace_prefix),
buffer,
callbacks,
calculateHeaders(mapBufferPrefix->header.pid));
try {
writer.processTrace(trace_id_, startCursor);
} catch (std::exception& e) {
FBLOGE("Error during dump processing: %s", e.what());
callbacks->onTraceAbort(trace_id_, AbortReason::UNKNOWN);
}
}
fbjni::local_ref<MmapBufferTraceWriter::jhybriddata>
MmapBufferTraceWriter::initHybrid(fbjni::alias_ref<jclass>) {
return makeCxxInstance();
}
void MmapBufferTraceWriter::registerNatives() {
registerHybrid({
makeNativeMethod("initHybrid", MmapBufferTraceWriter::initHybrid),
makeNativeMethod(
"nativeWriteTrace", MmapBufferTraceWriter::nativeWriteTrace),
makeNativeMethod(
"nativeInitAndVerify", MmapBufferTraceWriter::nativeInitAndVerify),
});
}
} // namespace writer
} // namespace mmapbuf
} // namespace profilo
} // namespace facebook
| 3,765 |
2,151 |
//===- AMDGPUMCInstLower.h MachineInstr Lowering Interface ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef AMDGPU_MCINSTLOWER_H
#define AMDGPU_MCINSTLOWER_H
namespace llvm {
class MCInst;
class MachineInstr;
class AMDGPUMCInstLower {
public:
AMDGPUMCInstLower();
/// lower - Lower a MachineInstr to an MCInst
void lower(const MachineInstr *MI, MCInst &OutMI) const;
};
} // End namespace llvm
#endif //AMDGPU_MCINSTLOWER_H
| 220 |
25,151 |
package org.openqa.selenium.concurrent;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.logging.Level.WARNING;
import java.util.concurrent.ExecutorService;
import java.util.logging.Logger;
public class ExecutorServices {
private static final Logger LOG = Logger.getLogger(ExecutorServices.class.getName());
public static void shutdownGracefully(String name, ExecutorService service) {
service.shutdown();
try {
if (!service.awaitTermination(5, SECONDS)) {
LOG.warning(String.format("Failed to shutdown %s", name));
service.shutdownNow();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.log(WARNING, String.format("Failed to shutdown %s", name), e);
service.shutdownNow();
}
}
}
| 282 |
923 |
package cc.mrbird.febs.auth.service.impl;
import cc.mrbird.febs.auth.properties.FebsAuthProperties;
import cc.mrbird.febs.auth.properties.FebsValidateCodeProperties;
import cc.mrbird.febs.auth.service.ValidateCodeService;
import cc.mrbird.febs.common.core.entity.constant.FebsConstant;
import cc.mrbird.febs.common.core.entity.constant.ImageTypeConstant;
import cc.mrbird.febs.common.core.entity.constant.ParamsConstant;
import cc.mrbird.febs.common.core.exception.ValidateCodeException;
import cc.mrbird.febs.common.redis.service.RedisService;
import com.wf.captcha.GifCaptcha;
import com.wf.captcha.SpecCaptcha;
import com.wf.captcha.base.Captcha;
import lombok.RequiredArgsConstructor;
import org.apache.commons.lang3.StringUtils;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
/**
* 验证码服务
*
* @author MrBird
*/
@Service
@RequiredArgsConstructor
public class ValidateCodeServiceImpl implements ValidateCodeService {
private final RedisService redisService;
private final FebsAuthProperties properties;
@Override
public void create(HttpServletRequest request, HttpServletResponse response) throws IOException, ValidateCodeException {
String key = request.getParameter(ParamsConstant.VALIDATE_CODE_KEY);
if (StringUtils.isBlank(key)) {
throw new ValidateCodeException("验证码key不能为空");
}
FebsValidateCodeProperties code = properties.getCode();
setHeader(response, code.getType());
Captcha captcha = createCaptcha(code);
redisService.set(FebsConstant.CODE_PREFIX + key, StringUtils.lowerCase(captcha.text()), code.getTime());
captcha.out(response.getOutputStream());
}
@Override
public void check(String key, String value) throws ValidateCodeException {
Object codeInRedis = redisService.get(FebsConstant.CODE_PREFIX + key);
if (StringUtils.isBlank(value)) {
throw new ValidateCodeException("请输入验证码");
}
if (codeInRedis == null) {
throw new ValidateCodeException("验证码已过期");
}
if (!StringUtils.equalsIgnoreCase(value, String.valueOf(codeInRedis))) {
throw new ValidateCodeException("验证码不正确");
}
}
private Captcha createCaptcha(FebsValidateCodeProperties code) {
Captcha captcha = null;
if (StringUtils.equalsIgnoreCase(code.getType(), ImageTypeConstant.GIF)) {
captcha = new GifCaptcha(code.getWidth(), code.getHeight(), code.getLength());
} else {
captcha = new SpecCaptcha(code.getWidth(), code.getHeight(), code.getLength());
}
captcha.setCharType(code.getCharType());
return captcha;
}
private void setHeader(HttpServletResponse response, String type) {
if (StringUtils.equalsIgnoreCase(type, ImageTypeConstant.GIF)) {
response.setContentType(MediaType.IMAGE_GIF_VALUE);
} else {
response.setContentType(MediaType.IMAGE_PNG_VALUE);
}
response.setHeader(HttpHeaders.PRAGMA, "No-cache");
response.setHeader(HttpHeaders.CACHE_CONTROL, "No-cache");
response.setDateHeader(HttpHeaders.EXPIRES, 0L);
}
}
| 1,357 |
538 |
/*
* Copyright 2016 JiongBull
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jiongbull.jlog.printer;
import android.content.Context;
import android.support.annotation.NonNull;
import com.jiongbull.jlog.constant.LogLevel;
import com.jiongbull.jlog.constant.LogSegment;
import com.jiongbull.jlog.util.PrinterUtils;
import com.jiongbull.jlog.util.TimeUtils;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/**
* JSON打印机.
*/
public class JsonPrinter implements Printer {
/** JSON的缩进量. */
private static final int JSON_INDENT = 4;
@Override
public void printConsole(@LogLevel String level, @NonNull String tag, @NonNull String message,
@NonNull StackTraceElement element) {
String json;
try {
if (message.startsWith("{")) {
JSONObject jsonObject = new JSONObject(message);
json = jsonObject.toString(JSON_INDENT);
} else if (message.startsWith("[")) {
JSONArray jsonArray = new JSONArray(message);
json = jsonArray.toString(JSON_INDENT);
} else {
json = message;
}
} catch (JSONException e) {
json = message;
}
PrinterUtils.printConsole(level, tag, PrinterUtils.decorateMsgForConsole(json, element));
}
@Override
public void printFile(@NonNull Context context, @LogLevel String level, @NonNull String message,
@NonNull StackTraceElement element, @TimeUtils.ZoneOffset long zoneOffset,
@NonNull String timeFmt, @NonNull String logDir, String logPrefix,
@LogSegment int logSegment) {
synchronized (Printer.class) {
PrinterUtils.printFile(context, logDir, logPrefix, logSegment, zoneOffset,
PrinterUtils.decorateMsgForFile(level, message, element, zoneOffset, timeFmt));
}
}
}
| 946 |
535 |
#define TYPE uintmax_t
#define NAME strtoumax
#include "templates/strtox.c.template"
| 31 |
1,053 |
<filename>tos/chips/cortex/m3/sam3/u/mpu/sam3umpuhardware.h<gh_stars>1000+
/*
* Copyright (c) 2009 Stanford University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
* - Neither the name of the Stanford University nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD
* UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Definitions specific to the SAM3U ARM Cortex-M3 memory protection unit.
*
* @author <EMAIL>
*/
#ifndef SAM3UMPUHARDWARE_H
#define SAM3UMPUHARDWARE_H
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 211
typedef union
{
uint32_t flat;
struct
{
uint8_t separate : 1; // support for unified or separate instruction and data memory maps, always unified on SAM3U
uint8_t reserved0 : 7;
uint8_t dregion : 8; // number of supported MPU data regions, always 8 on SAM3U
uint8_t iregion : 8; // number of supported MPU instruction regions, always 0 on SAM3U
uint8_t reserved1 : 8;
} bits;
} mpu_type_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 212
typedef union
{
uint32_t flat;
struct
{
uint8_t enable : 1; // enables the MPU
uint8_t hfnmiena : 1; // enables MPU operation during hard fault, NMI, and FAULTMASK handlers
uint8_t privdefena : 1; // enables privileged access to default memory map
uint8_t reserved0 : 5;
uint8_t reserved1 : 8;
uint8_t reserved2 : 8;
uint8_t reserved3 : 8;
} bits;
} mpu_ctrl_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 214
typedef union
{
uint32_t flat;
struct
{
uint8_t region : 8; // region referenced by RBAR and RASR registers
uint8_t reserved0 : 8;
uint8_t reserved1 : 8;
uint8_t reserved2 : 8;
} bits;
} mpu_rnr_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 215
typedef union
{
uint32_t flat;
struct
{
uint8_t region : 4; // MPU region field
uint8_t valid : 1; // MPU region number valid bit
uint32_t addr : 27; // region base address field, depending on the region size in RASR!
} bits;
} mpu_rbar_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 216
typedef union
{
uint32_t flat;
struct
{
uint8_t enable : 1; // region enable bit
uint8_t size : 5; // size of the MPU protection region; minimum is 4 (32 B), maximum is 31 (4 GB)
uint8_t reserved0 : 2;
uint8_t srd : 8; // subregion disable bits; 0 = enabled, 1 = disabled
uint8_t b : 1; // bufferable bit
uint8_t c : 1; // cacheable bit
uint8_t s : 1; // shareable bit
uint8_t tex : 3; // type extension field
uint8_t reserved1 : 2;
uint8_t ap : 3; // access permission field
uint8_t reserved2 : 1;
uint8_t xn : 1; // instruction access disable bit; 0 = fetches enabled, 1 = fetches disabled
uint8_t reserved3 : 3;
} bits;
} mpu_rasr_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 195
typedef union
{
uint8_t flat;
struct
{
uint8_t iaccviol : 1; // instruction fetch from location that does not permit execution
uint8_t daccviol : 1; // load or store at location that does not permit that
uint8_t reserved0 : 1;
uint8_t munstkerr : 1; // unstack for an exception return caused access violation(s)
uint8_t mstkerr : 1; // stacking for an exception entry caused access violation(s)
uint8_t reserved1 : 2;
uint8_t mmarvalid : 1; // MMAR holds a valid fault address
} bits;
} mpu_mmfsr_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 201
typedef union
{
uint32_t flat;
struct
{
uint32_t address : 32; // when MMARVALID in MMFSR is set to 1, this holds the address of the location that caused the fault
} bits;
} mpu_mmfar_t;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 210
volatile uint32_t* MPU_BASE = (volatile uint32_t *) 0xe000ed90;
volatile mpu_type_t* MPU_TYPE = (volatile mpu_type_t *) 0xe000ed90;
volatile mpu_ctrl_t* MPU_CTRL = (volatile mpu_ctrl_t *) 0xe000ed94;
volatile mpu_rnr_t* MPU_RNR = (volatile mpu_rnr_t *) 0xe000ed98;
volatile mpu_rbar_t* MPU_RBAR = (volatile mpu_rbar_t *) 0xe000ed9c;
volatile mpu_rasr_t* MPU_RASR = (volatile mpu_rasr_t *) 0xe000eda0;
volatile mpu_rbar_t* MPU_RBAR_A1 = (volatile mpu_rbar_t *) 0xe000eda4;
volatile mpu_rasr_t* MPU_RASR_A1 = (volatile mpu_rasr_t *) 0xe000eda8;
volatile mpu_rbar_t* MPU_RBAR_A2 = (volatile mpu_rbar_t *) 0xe000edac;
volatile mpu_rasr_t* MPU_RASR_A2 = (volatile mpu_rasr_t *) 0xe000edb0;
volatile mpu_rbar_t* MPU_RBAR_A3 = (volatile mpu_rbar_t *) 0xe000edb4;
volatile mpu_rasr_t* MPU_RASR_A3 = (volatile mpu_rasr_t *) 0xe000edb8;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 194
volatile mpu_mmfsr_t* MPU_MMFSR = (volatile mpu_mmfsr_t *) 0xe000ed28;
// Defined in AT91 ARM Cortex-M3 based Microcontrollers, SAM3U Series, Preliminary, p. 175
volatile mpu_mmfar_t* MPU_MMFAR = (volatile mpu_mmfar_t *) 0xe000ed34;
#endif // SAM3UMPUHARDWARE_H
| 2,450 |
521 |
/* Copyright (c) 2001, Stanford University
* All rights reserved
*
* See the file LICENSE.txt for information on redistributing this software.
*/
#include "unpacker.h"
void crUnpackLightfv(PCrUnpackerState pState)
{
CHECK_BUFFER_SIZE_STATIC_LAST(pState, sizeof(int) + 4, GLenum);
GLenum light = READ_DATA(pState, sizeof( int ) + 0, GLenum );
GLenum pname = READ_DATA(pState, sizeof( int ) + 4, GLenum );
GLfloat *params = DATA_POINTER(pState, sizeof( int ) + 8, GLfloat );
switch (pname)
{
case GL_AMBIENT:
case GL_DIFFUSE:
case GL_SPECULAR:
case GL_POSITION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 4, GLfloat);
break;
case GL_SPOT_DIRECTION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 3, GLfloat);
break;
case GL_SPOT_EXPONENT:
case GL_SPOT_CUTOFF:
case GL_CONSTANT_ATTENUATION:
case GL_LINEAR_ATTENUATION:
case GL_QUADRATIC_ATTENUATION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 1, GLfloat);
break;
default:
crError("crUnpackLightfv: Invalid pname (%#x) passed!\n", pname);
pState->rcUnpack = VERR_INVALID_PARAMETER;
return;
}
pState->pDispatchTbl->Lightfv( light, pname, params );
INCR_VAR_PTR(pState);
}
void crUnpackLightiv(PCrUnpackerState pState)
{
CHECK_BUFFER_SIZE_STATIC_LAST(pState, sizeof(int) + 4, GLenum);
GLenum light = READ_DATA(pState, sizeof( int ) + 0, GLenum );
GLenum pname = READ_DATA(pState, sizeof( int ) + 4, GLenum );
GLint *params = DATA_POINTER(pState, sizeof( int ) + 8, GLint );
switch (pname)
{
case GL_AMBIENT:
case GL_DIFFUSE:
case GL_SPECULAR:
case GL_POSITION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 4, GLint);
break;
case GL_SPOT_DIRECTION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 3, GLint);
break;
case GL_SPOT_EXPONENT:
case GL_SPOT_CUTOFF:
case GL_CONSTANT_ATTENUATION:
case GL_LINEAR_ATTENUATION:
case GL_QUADRATIC_ATTENUATION:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 1, GLint);
break;
default:
crError("crUnpackLightfv: Invalid pname (%#x) passed!\n", pname);
pState->rcUnpack = VERR_INVALID_PARAMETER;
return;
}
pState->pDispatchTbl->Lightiv( light, pname, params );
INCR_VAR_PTR(pState);
}
void crUnpackLightModelfv(PCrUnpackerState pState)
{
CHECK_BUFFER_SIZE_STATIC_LAST(pState, sizeof(int) + 0, GLenum);
GLenum pname = READ_DATA(pState, sizeof( int ) + 0, GLenum );
GLfloat *params = DATA_POINTER(pState, sizeof( int ) + 4, GLfloat );
switch (pname)
{
case GL_LIGHT_MODEL_AMBIENT:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 4, GLfloat);
break;
case GL_LIGHT_MODEL_TWO_SIDE:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 1, GLfloat);
break;
default:
crError("crUnpackLightfv: Invalid pname (%#x) passed!\n", pname);
pState->rcUnpack = VERR_INVALID_PARAMETER;
return;
}
pState->pDispatchTbl->LightModelfv( pname, params );
INCR_VAR_PTR(pState);
}
void crUnpackLightModeliv(PCrUnpackerState pState)
{
CHECK_BUFFER_SIZE_STATIC_LAST(pState, sizeof(int) + 0, GLenum);
GLenum pname = READ_DATA(pState, sizeof( int ) + 0, GLenum );
GLint *params = DATA_POINTER(pState, sizeof( int ) + 4, GLint );
switch (pname)
{
case GL_LIGHT_MODEL_AMBIENT:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 4, GLint);
break;
case GL_LIGHT_MODEL_TWO_SIDE:
CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, params, 1, GLint);
break;
default:
crError("crUnpackLightfv: Invalid pname (%#x) passed!\n", pname);
pState->rcUnpack = VERR_INVALID_PARAMETER;
return;
}
pState->pDispatchTbl->LightModeliv( pname, params );
INCR_VAR_PTR(pState);
}
| 2,106 |
2,236 |
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/178ebc4de06c4c1fb718d3038ec42d50
import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
total = 0
nx = [-1, 0, 1, 0]
ny = [0, -1, 0, 1]
stu = []
feel = [[0 for i in range(N)] for j in range(N)]
arr = [[0 for i in range(N)] for j in range(N)]
save = [[] for i in range(N**2+1)]
for i in range(N**2):
a = list(map(int, input().split()))
stu.append(a)
save[a[0]].append(a[1:])
for i in range(N**2):
temp = []
for x in range(N):
for y in range(N):
if arr[x][y] != 0:
continue
like = 0
empty = 0
for z in range(4):
dx = x + nx[z]
dy = y + ny[z]
if dx < 0 or dx >= N or dy < 0 or dy >= N:
continue
if arr[dx][dy] in stu[i][1:]:
like += 1
if arr[dx][dy] == 0:
empty += 1
temp.append((like, empty, (x,y)))
temp.sort(key = lambda x : (-x[0], -x[1], x[2]))
arr[temp[0][2][0]][temp[0][2][1]] = stu[i][0]
for i in range(N):
for j in range(N):
now = arr[i][j]
near = 0
for z in range(4):
dx = i + nx[z]
dy = j + ny[z]
if dx < 0 or dx >= N or dy < 0 or dy >= N:
continue
if arr[dx][dy] in save[now][0]:
near += 1
feel[i][j] = near
for i in range(N):
for j in range(N):
if feel[i][j] == 1:
total += 1
elif feel[i][j] == 2:
total += 10
elif feel[i][j] == 3:
total += 100
elif feel[i][j] == 4:
total += 1000
print(total)
| 1,019 |
2,151 |
<reponame>zipated/src
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_LANGUAGE_CORE_BROWSER_LANGUAGE_MODEL_H_
#define COMPONENTS_LANGUAGE_CORE_BROWSER_LANGUAGE_MODEL_H_
#include <string>
#include <vector>
#include "components/keyed_service/core/keyed_service.h"
namespace language {
// Defines a user language model represented by a ranked list of languages and
// associated scores.
class LanguageModel : public KeyedService {
public:
// Information about one language that a user understands.
struct LanguageDetails {
LanguageDetails();
LanguageDetails(const std::string& in_lang_code, float in_score);
// The language code.
std::string lang_code;
// A score representing the importance of the language to the user. Higher
// scores mean that the language is of more importance to the user.
float score;
};
// The set of languages that the user understands. The languages are ranked
// from most important to least.
virtual std::vector<LanguageDetails> GetLanguages() = 0;
};
} // namespace language
#endif // COMPONENTS_LANGUAGE_CORE_BROWSER_LANGUAGE_MODEL_H_
| 385 |
1,998 |
from .logger_middleware import LoggerMiddleware
__all__ = ["LoggerMiddleware"]
| 25 |
627 |
<filename>firmware/coreboot/3rdparty/chromeec/include/curve25519.h<gh_stars>100-1000
/* Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef __CROS_EC_CURVE25519_H
#define __CROS_EC_CURVE25519_H
#include <stdint.h>
/* Curve25519.
*
* Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748.
*/
/* X25519.
*
* X25519 is the Diffie-Hellman primitive built from curve25519. It is
* sometimes referred to as “curve25519”, but “X25519” is a more precise
* name.
* See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748.
*/
#define X25519_PRIVATE_KEY_LEN 32
#define X25519_PUBLIC_VALUE_LEN 32
/**
* Generate a public/private key pair.
* @param out_public_value generated public key.
* @param out_private_value generated private key.
*/
void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]);
/**
* Diffie-Hellman function.
* @param out_shared_key
* @param private_key
* @param out_public_value
* @return one on success and zero on error.
*
* X25519() writes a shared key to @out_shared_key that is calculated from the
* given private key and the peer's public value.
*
* Don't use the shared key directly, rather use a KDF and also include the two
* public values as inputs.
*/
int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32],
const uint8_t peers_public_value[32]);
/**
* Compute the matching public key.
* @param out_public_value computed public key.
* @param private_key private key to use.
*
* X25519_public_from_private() calculates a Diffie-Hellman public value from
* the given private key and writes it to @out_public_value.
*/
void X25519_public_from_private(uint8_t out_public_value[32],
const uint8_t private_key[32]);
/*
* Low-level x25519 function, defined by either the generic or cortex-m0
* implementation. Must not be called directly.
*/
void x25519_scalar_mult(uint8_t out[32],
const uint8_t scalar[32],
const uint8_t point[32]);
#endif /* __CROS_EC_CURVE25519_H */
| 747 |
365 |
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2001-2002 by <NAME>ing BV.
* All rights reserved.
*/
/** \file
* \ingroup DNA
* \page makesdna makesdna
*
* \section aboutdna About the DNA module
*
* The DNA module holds all type definitions that are serialized in a
* blender file. There is an executable that scans all files, looking
* for struct-s to serialize (hence sdna: Struct \ref DNA). From this
* information, it builds a file with numbers that encode the format,
* the names of variables, and the place to look for them.
*
* \section dnaissues Known issues with DNA
*
* - Function pointers:
*
* Because of historical reasons, some function pointers were
* untyped. The parser/dna generator has been modified to explicitly
* handle these special cases. Most pointers have been given proper
* proto's by now. DNA_space_types.h::Spacefile::returnfunc may still
* be badly defined. The reason for this is that it is called with
* different types of arguments. It takes a char* at this moment...
*
* - Ignoring structs:
*
* Sometimes we need to define structs in DNA which aren't written
* to disk, and can be excluded from blend file DNA string.
* in this case, add two '#' chars directly before the struct. eg.
*
* \code{.c}
* #
* #
* typedef struct MyStruct {
* int value;
* } MyStruct;
* \endcode
*
* Ignored structs can only be referred to from non-ignored structs
* when referred to as a pointer (where they're usually allocated
* and cleared in ``readfile.c``).
*
* - %Path to the header files
*
* Also because of historical reasons, there is a path prefix to the
* headers that need to be scanned. This is the BASE_HEADER
* define. If you change the file-layout for DNA, you will probably
* have to change this (Not very flexible, but it is hardly ever
* changed. Sorry.).
*
* \section dnadependencies Dependencies
*
* DNA has no external dependencies (except for a few system
* includes).
*
* \section dnanote NOTE
*
* PLEASE READ INSTRUCTIONS ABOUT ADDING VARIABLES IN 'DNA' STRUCTS IN
*
* intern/dna_genfile.c
* (ton)
*/
/* This file has intentionally no definitions or implementation. */
| 827 |
4,054 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.intent.model;
import java.util.Map;
/**
* A source node in an intent model tree. Represents a source with an appropriateness score
* (i.e the score of a source node is called <i>appropriateness</i>).
* Sources are ordered by decreasing appropriateness.
*
* @author bratseth
*/
public class SourceNode extends Node {
private Source source;
public SourceNode(Source source,double score) {
super(score);
this.source=source;
}
/** Sets the source of this node */
public void setSource(Source source) { this.source=source; }
/** Returns the source of this node */
public Source getSource() { return source; }
@Override void addSources(double weight,Map<Source,SourceNode> sources) {
SourceNode existing=sources.get(source);
if (existing!=null)
existing.increaseScore(weight*getScore());
else
sources.put(source,new SourceNode(source,weight*getScore()));
}
/** Returns source:appropriateness */
@Override
public String toString() {
return source + ":" + getScore();
}
}
| 413 |
1,350 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.authorization.fluent.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for MicrosoftGraphComplianceState. */
public final class MicrosoftGraphComplianceState extends ExpandableStringEnum<MicrosoftGraphComplianceState> {
/** Static value unknown for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState UNKNOWN = fromString("unknown");
/** Static value compliant for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState COMPLIANT = fromString("compliant");
/** Static value noncompliant for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState NONCOMPLIANT = fromString("noncompliant");
/** Static value conflict for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState CONFLICT = fromString("conflict");
/** Static value error for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState ERROR = fromString("error");
/** Static value inGracePeriod for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState IN_GRACE_PERIOD = fromString("inGracePeriod");
/** Static value configManager for MicrosoftGraphComplianceState. */
public static final MicrosoftGraphComplianceState CONFIG_MANAGER = fromString("configManager");
/**
* Creates or finds a MicrosoftGraphComplianceState from its string representation.
*
* @param name a name to look for.
* @return the corresponding MicrosoftGraphComplianceState.
*/
@JsonCreator
public static MicrosoftGraphComplianceState fromString(String name) {
return fromString(name, MicrosoftGraphComplianceState.class);
}
/** @return known MicrosoftGraphComplianceState values. */
public static Collection<MicrosoftGraphComplianceState> values() {
return values(MicrosoftGraphComplianceState.class);
}
}
| 601 |
1,338 |
<filename>src/preferences/bluetooth/BluetoothWindow.h
/*
* Copyright 2008-09, <NAME>, <oliver.ruiz.dorantes_at_gmail.com>
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef BLUETOOTH_WINDOW_H
#define BLUETOOTH_WINDOW_H
#include "BluetoothSettingsView.h"
#include <Application.h>
#include <Button.h>
#include <Window.h>
#include <Message.h>
#include <TabView.h>
class BluetoothSettingsView;
class RemoteDevicesView;
class BluetoothWindow : public BWindow {
public:
BluetoothWindow(BRect frame);
bool QuitRequested();
void MessageReceived(BMessage *message);
private:
RemoteDevicesView* fRemoteDevices;
BButton* fDefaultsButton;
BButton* fRevertButton;
BMenuBar* fMenubar;
BluetoothSettingsView* fSettingsView;
};
#endif
| 299 |
7,137 |
<reponame>grego10/onedev
package io.onedev.server.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class RangeBuilder {
private final List<List<Long>> ranges = new ArrayList<>();
public RangeBuilder(List<Long> discreteValues, List<Long> allValues) {
Map<Long, Integer> indexes = new HashMap<>();
for (int i=0; i<allValues.size(); i++)
indexes.put(allValues.get(i), i);
List<Long> continuousValues = new ArrayList<>();
int lastIndex = -1;
for (Long value: discreteValues) {
Integer index = indexes.get(value);
if (index != null) {
if (lastIndex != -1 && index - lastIndex > 1) {
ranges.add(continuousValues);
continuousValues = new ArrayList<>();
}
continuousValues.add(value);
lastIndex = index;
}
}
if (!continuousValues.isEmpty())
ranges.add(continuousValues);
}
public List<List<Long>> getRanges() {
return ranges;
}
}
| 369 |
460 |
/*
* Copyright (C) 1997 <NAME> (<EMAIL>)
* (C) 1997 <NAME> (<EMAIL>)
* (C) 1998 <NAME> (<EMAIL>)
* (C) 1999 <NAME> (<EMAIL>)
* (C) 1999 <NAME> (<EMAIL>)
* Copyright (C) 2003, 2004, 2005, 2006, 2009 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef RenderTable_h
#define RenderTable_h
#include "RenderBlock.h"
#include <wtf/Vector.h>
namespace WebCore {
class RenderTableCol;
class RenderTableCell;
class RenderTableSection;
class TableLayout;
class RenderTable : public RenderBlock {
public:
RenderTable(Node*);
int getColumnPos(int col) const { return m_columnPos[col]; }
int hBorderSpacing() const { return m_hSpacing; }
int vBorderSpacing() const { return m_vSpacing; }
bool collapseBorders() const { return style()->borderCollapse(); }
int borderLeft() const { return m_borderLeft; }
int borderRight() const { return m_borderRight; }
int borderTop() const;
int borderBottom() const;
const Color& bgColor() const { return style()->backgroundColor(); }
int outerBorderTop() const;
int outerBorderBottom() const;
int outerBorderLeft() const;
int outerBorderRight() const;
int calcBorderLeft() const;
int calcBorderRight() const;
void recalcHorizontalBorders();
virtual void addChild(RenderObject* child, RenderObject* beforeChild = 0);
struct ColumnStruct {
enum {
WidthUndefined = 0xffff
};
ColumnStruct()
: span(1)
, width(WidthUndefined)
{
}
unsigned short span;
unsigned width; // the calculated position of the column
};
Vector<ColumnStruct>& columns() { return m_columns; }
Vector<int>& columnPositions() { return m_columnPos; }
RenderTableSection* header() const { return m_head; }
RenderTableSection* footer() const { return m_foot; }
RenderTableSection* firstBody() const { return m_firstBody; }
void splitColumn(int pos, int firstSpan);
void appendColumn(int span);
int numEffCols() const { return m_columns.size(); }
int spanOfEffCol(int effCol) const { return m_columns[effCol].span; }
int colToEffCol(int col) const
{
int i = 0;
int effCol = numEffCols();
for (int c = 0; c < col && i < effCol; ++i)
c += m_columns[i].span;
return i;
}
int effColToCol(int effCol) const
{
int c = 0;
for (int i = 0; i < effCol; i++)
c += m_columns[i].span;
return c;
}
int bordersPaddingAndSpacing() const
{
return borderLeft() + borderRight() +
(collapseBorders() ? 0 : (paddingLeft() + paddingRight() + (numEffCols() + 1) * hBorderSpacing()));
}
RenderTableCol* colElement(int col, bool* startEdge = 0, bool* endEdge = 0) const;
RenderTableCol* nextColElement(RenderTableCol* current) const;
bool needsSectionRecalc() const { return m_needsSectionRecalc; }
void setNeedsSectionRecalc()
{
if (documentBeingDestroyed())
return;
m_needsSectionRecalc = true;
setNeedsLayout(true);
}
RenderTableSection* sectionAbove(const RenderTableSection*, bool skipEmptySections = false) const;
RenderTableSection* sectionBelow(const RenderTableSection*, bool skipEmptySections = false) const;
RenderTableCell* cellAbove(const RenderTableCell*) const;
RenderTableCell* cellBelow(const RenderTableCell*) const;
RenderTableCell* cellBefore(const RenderTableCell*) const;
RenderTableCell* cellAfter(const RenderTableCell*) const;
const CollapsedBorderValue* currentBorderStyle() const { return m_currentBorder; }
bool hasSections() const { return m_head || m_foot || m_firstBody; }
void recalcSectionsIfNeeded() const
{
if (m_needsSectionRecalc)
recalcSections();
}
protected:
virtual void styleDidChange(StyleDifference, const RenderStyle* oldStyle);
private:
virtual const char* renderName() const { return "RenderTable"; }
virtual bool isTable() const { return true; }
virtual bool avoidsFloats() const { return true; }
virtual void removeChild(RenderObject* oldChild);
virtual void paint(PaintInfo&, int tx, int ty);
virtual void paintObject(PaintInfo&, int tx, int ty);
virtual void paintBoxDecorations(PaintInfo&, int tx, int ty);
virtual void paintMask(PaintInfo&, int tx, int ty);
virtual void layout();
virtual void calcPrefWidths();
virtual bool nodeAtPoint(const HitTestRequest&, HitTestResult&, int xPos, int yPos, int tx, int ty, HitTestAction);
virtual int firstLineBoxBaseline() const;
virtual RenderBlock* firstLineBlock() const;
virtual void updateFirstLetter();
virtual void setCellWidths();
virtual void calcWidth();
virtual IntRect overflowClipRect(int tx, int ty);
void recalcSections() const;
mutable Vector<int> m_columnPos;
mutable Vector<ColumnStruct> m_columns;
mutable RenderBlock* m_caption;
mutable RenderTableSection* m_head;
mutable RenderTableSection* m_foot;
mutable RenderTableSection* m_firstBody;
OwnPtr<TableLayout> m_tableLayout;
const CollapsedBorderValue* m_currentBorder;
mutable bool m_hasColElements : 1;
mutable bool m_needsSectionRecalc : 1;
short m_hSpacing;
short m_vSpacing;
int m_borderLeft;
int m_borderRight;
};
inline RenderTable* toRenderTable(RenderObject* object)
{
ASSERT(!object || object->isTable());
return static_cast<RenderTable*>(object);
}
inline const RenderTable* toRenderTable(const RenderObject* object)
{
ASSERT(!object || object->isTable());
return static_cast<const RenderTable*>(object);
}
// This will catch anyone doing an unnecessary cast.
void toRenderTable(const RenderTable*);
} // namespace WebCore
#endif // RenderTable_h
| 2,645 |
364 |
// AUTOGENERATED CODE. DO NOT MODIFY DIRECTLY! Instead, please modify the util/function/VariadicFunction.ftl file.
// See the README in the module's src/template directory for details.
package com.linkedin.dagli.util.function;
import com.linkedin.dagli.util.exception.Exceptions;
@FunctionalInterface
public interface FunctionVariadic<A, R> extends FunctionBase {
R apply(A... args);
static <A, R> FunctionVariadic<A, R> unchecked(Checked<A, R, ?> checkedFunction) {
return (A... args) -> {
try {
return checkedFunction.apply(args);
} catch (Throwable e) {
throw Exceptions.asRuntimeException(e);
}
};
}
@FunctionalInterface
interface Checked<A, R, X extends Throwable> extends FunctionBase {
R apply(A... args) throws X;
}
interface Serializable<A, R> extends FunctionVariadic<A, R>, java.io.Serializable {
}
}
| 305 |
2,290 |
package com.robotium.solo;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import android.app.Instrumentation;
import android.content.Context;
import android.os.SystemClock;
import android.view.View;
import android.view.ViewGroup;
import android.view.ViewParent;
import android.view.WindowManager;
import android.webkit.WebView;
import android.widget.TextView;
/**
* Contains view methods. Examples are getViews(),
* getCurrentTextViews(), getCurrentImageViews().
*
* @author <NAME>, <EMAIL>
*
*/
class ViewFetcher {
private String windowManagerString;
private Instrumentation instrumentation;
private Sleeper sleeper;
/**
* Constructs this object.
*
* @param instrumentation the {@code Instrumentation} instance.
*
*/
public ViewFetcher(Instrumentation instrumentation, Sleeper sleeper) {
this.instrumentation = instrumentation;
this.sleeper = sleeper;
setWindowManagerString();
}
/**
* Returns the absolute top parent {@code View} in for a given {@code View}.
*
* @param view the {@code View} whose top parent is requested
* @return the top parent {@code View}
*/
public View getTopParent(View view) {
final ViewParent viewParent = view.getParent();
if (viewParent != null
&& viewParent instanceof android.view.View) {
return getTopParent((View) viewParent);
} else {
return view;
}
}
/**
* Returns the scroll or list parent view
*
* @param view the view who's parent should be returned
* @return the parent scroll view, list view or null
*/
public View getScrollOrListParent(View view) {
if (!(view instanceof android.widget.AbsListView) && !(view instanceof android.widget.ScrollView) && !(view instanceof WebView)) {
try{
return getScrollOrListParent((View) view.getParent());
}catch(Exception e){
return null;
}
} else {
return view;
}
}
/**
* Returns views from the shown DecorViews.
*
* @param onlySufficientlyVisible if only sufficiently visible views should be returned
* @return all the views contained in the DecorViews
*/
public ArrayList<View> getAllViews(boolean onlySufficientlyVisible) {
final View[] views = getWindowDecorViews();
final ArrayList<View> allViews = new ArrayList<View>();
final View[] nonDecorViews = getNonDecorViews(views);
View view = null;
if(nonDecorViews != null){
for(int i = 0; i < nonDecorViews.length; i++){
view = nonDecorViews[i];
try {
addChildren(allViews, (ViewGroup)view, onlySufficientlyVisible);
} catch (Exception ignored) {}
if(view != null) allViews.add(view);
}
}
if (views != null && views.length > 0) {
view = getRecentDecorView(views);
try {
addChildren(allViews, (ViewGroup)view, onlySufficientlyVisible);
} catch (Exception ignored) {}
if(view != null) allViews.add(view);
}
return allViews;
}
/**
* Returns the most recent DecorView
*
* @param views the views to check
* @return the most recent DecorView
*/
public final View getRecentDecorView(View[] views) {
if(views == null)
return null;
final View[] decorViews = new View[views.length];
int i = 0;
View view;
for (int j = 0; j < views.length; j++) {
view = views[j];
if (isDecorView(view)){
decorViews[i] = view;
i++;
}
}
return getRecentContainer(decorViews);
}
/**
* Returns the most recent view container
*
* @param views the views to check
* @return the most recent view container
*/
private final View getRecentContainer(View[] views) {
View container = null;
long drawingTime = 0;
View view;
for(int i = 0; i < views.length; i++){
view = views[i];
if (view != null && view.isShown() && view.hasWindowFocus() && view.getDrawingTime() > drawingTime) {
container = view;
drawingTime = view.getDrawingTime();
}
}
return container;
}
/**
* Returns all views that are non DecorViews
*
* @param views the views to check
* @return the non DecorViews
*/
private final View[] getNonDecorViews(View[] views) {
View[] decorViews = null;
if(views != null) {
decorViews = new View[views.length];
int i = 0;
View view;
for (int j = 0; j < views.length; j++) {
view = views[j];
if (!isDecorView(view)) {
decorViews[i] = view;
i++;
}
}
}
return decorViews;
}
/**
* Returns whether a view is a DecorView
* @param view
* @return true if view is a DecorView, false otherwise
*/
private boolean isDecorView(View view) {
if (view == null) {
return false;
}
final String nameOfClass = view.getClass().getName();
return (nameOfClass.equals("com.android.internal.policy.impl.PhoneWindow$DecorView") ||
nameOfClass.equals("com.android.internal.policy.impl.MultiPhoneWindow$MultiPhoneDecorView") ||
nameOfClass.equals("com.android.internal.policy.PhoneWindow$DecorView"));
}
/**
* Extracts all {@code View}s located in the currently active {@code Activity}, recursively.
*
* @param parent the {@code View} whose children should be returned, or {@code null} for all
* @param onlySufficientlyVisible if only sufficiently visible views should be returned
* @return all {@code View}s located in the currently active {@code Activity}, never {@code null}
*/
public ArrayList<View> getViews(View parent, boolean onlySufficientlyVisible) {
final ArrayList<View> views = new ArrayList<View>();
final View parentToUse;
if (parent == null){
return getAllViews(onlySufficientlyVisible);
}else{
parentToUse = parent;
views.add(parentToUse);
if (parentToUse instanceof ViewGroup) {
addChildren(views, (ViewGroup) parentToUse, onlySufficientlyVisible);
}
}
return views;
}
/**
* Adds all children of {@code viewGroup} (recursively) into {@code views}.
*
* @param views an {@code ArrayList} of {@code View}s
* @param viewGroup the {@code ViewGroup} to extract children from
* @param onlySufficientlyVisible if only sufficiently visible views should be returned
*/
private void addChildren(ArrayList<View> views, ViewGroup viewGroup, boolean onlySufficientlyVisible) {
if(viewGroup != null){
for (int i = 0; i < viewGroup.getChildCount(); i++) {
final View child = viewGroup.getChildAt(i);
if(onlySufficientlyVisible && isViewSufficientlyShown(child)) {
views.add(child);
}
else if(!onlySufficientlyVisible && child != null) {
views.add(child);
}
if (child instanceof ViewGroup) {
addChildren(views, (ViewGroup) child, onlySufficientlyVisible);
}
}
}
}
/**
* Returns true if the view is sufficiently shown
*
* @param view the view to check
* @return true if the view is sufficiently shown
*/
public final boolean isViewSufficientlyShown(View view){
final int[] xyView = new int[2];
final int[] xyParent = new int[2];
if(view == null)
return false;
final float viewHeight = view.getHeight();
final View parent = getScrollOrListParent(view);
view.getLocationOnScreen(xyView);
if(parent == null){
xyParent[1] = 0;
}
else{
parent.getLocationOnScreen(xyParent);
}
if(xyView[1] + (viewHeight/2.0f) > getScrollListWindowHeight(view))
return false;
else if(xyView[1] + (viewHeight/2.0f) < xyParent[1])
return false;
return true;
}
/**
* Returns the height of the scroll or list view parent
* @param view the view who's parents height should be returned
* @return the height of the scroll or list view parent
*/
@SuppressWarnings("deprecation")
public float getScrollListWindowHeight(View view) {
final int[] xyParent = new int[2];
View parent = getScrollOrListParent(view);
final float windowHeight;
if(parent == null){
WindowManager windowManager = (WindowManager)
instrumentation.getTargetContext().getSystemService(Context.WINDOW_SERVICE);
windowHeight = windowManager.getDefaultDisplay().getHeight();
}
else{
parent.getLocationOnScreen(xyParent);
windowHeight = xyParent[1] + parent.getHeight();
}
parent = null;
return windowHeight;
}
/**
* Returns an {@code ArrayList} of {@code View}s of the specified {@code Class} located in the current
* {@code Activity}.
*
* @param classToFilterBy return all instances of this class, e.g. {@code Button.class} or {@code GridView.class}
* @param includeSubclasses include instances of the subclasses in the {@code ArrayList} that will be returned
* @return an {@code ArrayList} of {@code View}s of the specified {@code Class} located in the current {@code Activity}
*/
public <T extends View> ArrayList<T> getCurrentViews(Class<T> classToFilterBy, boolean includeSubclasses) {
return getCurrentViews(classToFilterBy, includeSubclasses, null);
}
/**
* Returns an {@code ArrayList} of {@code View}s of the specified {@code Class} located under the specified {@code parent}.
*
* @param classToFilterBy return all instances of this class, e.g. {@code Button.class} or {@code GridView.class}
* @param includeSubclasses include instances of subclasses in {@code ArrayList} that will be returned
* @param parent the parent {@code View} for where to start the traversal
* @return an {@code ArrayList} of {@code View}s of the specified {@code Class} located under the specified {@code parent}
*/
public <T extends View> ArrayList<T> getCurrentViews(Class<T> classToFilterBy, boolean includeSubclasses, View parent) {
ArrayList<T> filteredViews = new ArrayList<T>();
List<View> allViews = getViews(parent, true);
for(View view : allViews){
if (view == null) {
continue;
}
Class<? extends View> classOfView = view.getClass();
if (includeSubclasses && classToFilterBy.isAssignableFrom(classOfView) || !includeSubclasses && classToFilterBy == classOfView) {
filteredViews.add(classToFilterBy.cast(view));
}
}
allViews = null;
return filteredViews;
}
/**
* Tries to guess which view is the most likely to be interesting. Returns
* the most recently drawn view, which presumably will be the one that the
* user was most recently interacting with.
*
* @param views A list of potentially interesting views, likely a collection
* of views from a set of types, such as [{@link Button},
* {@link TextView}] or [{@link ScrollView}, {@link ListView}]
* @param index the index of the view
* @return most recently drawn view, or null if no views were passed
*/
public final <T extends View> T getFreshestView(ArrayList<T> views){
final int[] locationOnScreen = new int[2];
T viewToReturn = null;
long drawingTime = 0;
if(views == null){
return null;
}
for(T view : views){
if(view != null){
view.getLocationOnScreen(locationOnScreen);
if (locationOnScreen[0] < 0 || !(view.getHeight() > 0)){
continue;
}
if(view.getDrawingTime() > drawingTime){
drawingTime = view.getDrawingTime();
viewToReturn = view;
}
else if (view.getDrawingTime() == drawingTime){
if(view.isFocused()){
viewToReturn = view;
}
}
}
}
views = null;
return viewToReturn;
}
/**
* Waits for a RecyclerView and returns it.
*
* @param recyclerViewIndex the index of the RecyclerView
* @return {@code ViewGroup} if RecycleView is displayed
*/
public <T extends View> ViewGroup getRecyclerView(int recyclerViewIndex, int timeOut) {
final long endTime = SystemClock.uptimeMillis() + timeOut;
while (SystemClock.uptimeMillis() < endTime) {
View recyclerView = getRecyclerView(true, recyclerViewIndex);
if(recyclerView != null){
return (ViewGroup) recyclerView;
}
}
return null;
}
/**
* Returns a RecyclerView or null if none is found
*
* @param viewList the list to check in
*
* @return a RecyclerView
*/
public View getRecyclerView(boolean shouldSleep, int recyclerViewIndex){
Set<View> uniqueViews = new HashSet<View>();
if(shouldSleep){
sleeper.sleep();
}
@SuppressWarnings("unchecked")
ArrayList<View> views = RobotiumUtils.filterViewsToSet(new Class[] {ViewGroup.class}, getAllViews(false));
views = RobotiumUtils.removeInvisibleViews(views);
for(View view : views){
if(isViewType(view.getClass(), "widget.RecyclerView")){
uniqueViews.add(view);
}
if(uniqueViews.size() > recyclerViewIndex) {
return (ViewGroup) view;
}
}
return null;
}
/**
* Returns a Set of all RecyclerView or empty Set if none is found
*
*
* @return a Set of RecyclerViews
*/
public List<View> getScrollableSupportPackageViews(boolean shouldSleep){
List <View> viewsToReturn = new ArrayList<View>();
if(shouldSleep){
sleeper.sleep();
}
@SuppressWarnings("unchecked")
ArrayList<View> views = RobotiumUtils.filterViewsToSet(new Class[] {ViewGroup.class}, getAllViews(true));
views = RobotiumUtils.removeInvisibleViews(views);
for(View view : views){
if(isViewType(view.getClass(), "widget.RecyclerView") ||
isViewType(view.getClass(), "widget.NestedScrollView")){
viewsToReturn.add(view);
}
}
return viewsToReturn;
}
private boolean isViewType(Class<?> aClass, String typeName) {
if (aClass.getName().contains(typeName)) {
return true;
}
if (aClass.getSuperclass() != null) {
return isViewType(aClass.getSuperclass(), typeName);
}
return false;
}
/**
* Returns an identical View to the one specified.
*
* @param view the view to find
* @return identical view of the specified view
*/
public View getIdenticalView(View view) {
if(view == null){
return null;
}
View viewToReturn = null;
List<? extends View> visibleViews = RobotiumUtils.removeInvisibleViews(getCurrentViews(view.getClass(), true));
for(View v : visibleViews){
if(areViewsIdentical(v, view)){
viewToReturn = v;
break;
}
}
return viewToReturn;
}
/**
* Compares if the specified views are identical. This is used instead of View.compare
* as it always returns false in cases where the View tree is refreshed.
*
* @param firstView the first view
* @param secondView the second view
* @return true if views are equal
*/
private boolean areViewsIdentical(View firstView, View secondView){
if(firstView.getId() != secondView.getId() || !firstView.getClass().isAssignableFrom(secondView.getClass())){
return false;
}
if (firstView.getParent() != null && firstView.getParent() instanceof View &&
secondView.getParent() != null && secondView.getParent() instanceof View) {
return areViewsIdentical((View) firstView.getParent(), (View) secondView.getParent());
} else {
return true;
}
}
private static Class<?> windowManager;
static{
try {
String windowManagerClassName;
if (android.os.Build.VERSION.SDK_INT >= 17) {
windowManagerClassName = "android.view.WindowManagerGlobal";
} else {
windowManagerClassName = "android.view.WindowManagerImpl";
}
windowManager = Class.forName(windowManagerClassName);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
} catch (SecurityException e) {
e.printStackTrace();
}
}
/**
* Returns the WindorDecorViews shown on the screen.
*
* @return the WindorDecorViews shown on the screen
*/
@SuppressWarnings("unchecked")
public View[] getWindowDecorViews()
{
Field viewsField;
Field instanceField;
try {
viewsField = windowManager.getDeclaredField("mViews");
instanceField = windowManager.getDeclaredField(windowManagerString);
viewsField.setAccessible(true);
instanceField.setAccessible(true);
Object instance = instanceField.get(null);
View[] result;
if (android.os.Build.VERSION.SDK_INT >= 19) {
result = ((ArrayList<View>) viewsField.get(instance)).toArray(new View[0]);
} else {
result = (View[]) viewsField.get(instance);
}
return result;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* Sets the window manager string.
*/
private void setWindowManagerString(){
if (android.os.Build.VERSION.SDK_INT >= 17) {
windowManagerString = "sDefaultWindowManager";
} else if(android.os.Build.VERSION.SDK_INT >= 13) {
windowManagerString = "sWindowManager";
} else {
windowManagerString = "mWindowManager";
}
}
}
| 5,767 |
854 |
__________________________________________________________________________________________________
sample 196 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
if not root:
return 0
if root.val < L:
return self.rangeSumBST(root.right, L, R)
if root.val > R:
return self.rangeSumBST(root.left, L, R)
return root.val + self.rangeSumBST(root.right, L, R) +\
self.rangeSumBST(root.left, L, R)
__________________________________________________________________________________________________
sample 21528 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
if not root:
return 0
if root.val >= L and root.val <= R:
return root.val + self.rangeSumBST(root.left, L, R) + self.rangeSumBST(root.right, L, R)
else:
return self.rangeSumBST(root.left, L, R) + self.rangeSumBST(root.right, L, R)
def stringToTreeNode(input):
input = input.strip()
input = input[1:-1]
if not input:
return None
inputValues = [s.strip() for s in input.split(',')]
root = TreeNode(int(inputValues[0]))
nodeQueue = [root]
front = 0
index = 1
while index < len(inputValues):
node = nodeQueue[front]
front = front + 1
item = inputValues[index]
index = index + 1
if item != "null":
leftNumber = int(item)
node.left = TreeNode(leftNumber)
nodeQueue.append(node.left)
if index >= len(inputValues):
break
item = inputValues[index]
index = index + 1
if item != "null":
rightNumber = int(item)
node.right = TreeNode(rightNumber)
nodeQueue.append(node.right)
return root
__________________________________________________________________________________________________
| 1,000 |
2,434 |
/*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.swagger2markup.extension;
import io.github.swagger2markup.OpenAPI2MarkupConverter.OpenAPIContext;
/**
* An abstract OpenAPI extension which must be extended by an other OpenAPI extensions
*/
abstract class AbstractExtension implements Extension {
protected OpenAPIContext globalContext;
/**
* Global context lazy initialization
*
* @param globalContext Global context
*/
public void setGlobalContext(OpenAPIContext globalContext) {
this.globalContext = globalContext;
}
}
| 326 |
521 |
#include "nsILocalFile.h"
#if 0 /* too new */
#include "nsStringGlue.h"
#else
#include "nsString.h"
#endif
#include <stdio.h>
#include "nsXPCOM.h"
#include "nsIComponentManager.h"
#include "nsIComponentRegistrar.h"
#include "nsIServiceManager.h"
#include "nsIMemory.h"
#include "nsComponentManagerUtils.h"
#include "nsCOMPtr.h"
void Passed();
void Failed(const char* explanation = nsnull);
void Inspect();
void Banner(const char* bannerString);
void VerifyResult(nsresult rv)
{
if (NS_FAILED(rv))
{
Failed("rv failed");
printf("rv = %d\n", rv);
}
}
//----------------------------------------------------------------------------
void Banner(const char* bannerString)
//----------------------------------------------------------------------------
{
printf("---------------------------\n");
printf("%s\n", bannerString);
printf("---------------------------\n");
}
//----------------------------------------------------------------------------
void Passed()
//----------------------------------------------------------------------------
{
printf("Test passed.");
}
//----------------------------------------------------------------------------
void Failed(const char* explanation)
//----------------------------------------------------------------------------
{
printf("ERROR : Test failed.\n");
printf("REASON: %s.\n", explanation);
}
//----------------------------------------------------------------------------
void Inspect()
//----------------------------------------------------------------------------
{
printf("^^^^^^^^^^ PLEASE INSPECT OUTPUT FOR ERRORS\n");
}
void GetPaths(nsILocalFile* file)
{
nsresult rv;
nsCAutoString pathName;
printf("Getting Path\n");
rv = file->GetNativePath(pathName);
VerifyResult(rv);
printf("filepath: %s\n", pathName.get());
}
void InitTest(const char* creationPath, const char* appendPath)
{
nsILocalFile* file = nsnull;
nsresult rv = CallCreateInstance(NS_LOCAL_FILE_CONTRACTID, &file);
if (NS_FAILED(rv) || (!file))
{
printf("create nsILocalFile failed\n");
return;
}
nsCAutoString leafName;
Banner("InitWithPath");
printf("creationPath == %s\nappendPath == %s\n", creationPath, appendPath);
rv = file->InitWithNativePath(nsDependentCString(creationPath));
VerifyResult(rv);
printf("Getting Filename\n");
rv = file->GetNativeLeafName(leafName);
printf(" %s\n", leafName.get());
VerifyResult(rv);
printf("Appending %s \n", appendPath);
rv = file->AppendNative(nsDependentCString(appendPath));
VerifyResult(rv);
printf("Getting Filename\n");
rv = file->GetNativeLeafName(leafName);
printf(" %s\n", leafName.get());
VerifyResult(rv);
GetPaths(file);
printf("Check For Existence\n");
PRBool exists;
file->Exists(&exists);
if (exists)
printf("Yup!\n");
else
printf("no.\n");
}
void CreationTest(const char* creationPath, const char* appendPath,
PRInt32 whatToCreate, PRInt32 perm)
{
nsresult rv;
nsCOMPtr<nsILocalFile> file =
do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
if (NS_FAILED(rv) || (!file))
{
printf("create nsILocalFile failed\n");
return;
}
Banner("Creation Test");
printf("creationPath == %s\nappendPath == %s\n", creationPath, appendPath);
rv = file->InitWithNativePath(nsDependentCString(creationPath));
VerifyResult(rv);
printf("Appending %s\n", appendPath);
rv = file->AppendRelativeNativePath(nsDependentCString(appendPath));
VerifyResult(rv);
printf("Check For Existence\n");
PRBool exists;
file->Exists(&exists);
if (exists)
printf("Yup!\n");
else
printf("no.\n");
rv = file->Create(whatToCreate, perm);
VerifyResult(rv);
rv = file->Exists(&exists);
VerifyResult(rv);
if (!exists)
{
Failed("Did not create file system object!");
return;
}
}
void CreateUniqueTest(const char* creationPath, const char* appendPath,
PRInt32 whatToCreate, PRInt32 perm)
{
nsresult rv;
nsCOMPtr<nsILocalFile> file =
do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
if (NS_FAILED(rv) || (!file))
{
printf("create nsILocalFile failed\n");
return;
}
Banner("Creation Test");
printf("creationPath == %s\nappendPath == %s\n", creationPath, appendPath);
rv = file->InitWithNativePath(nsDependentCString(creationPath));
VerifyResult(rv);
printf("Appending %s\n", appendPath);
rv = file->AppendNative(nsDependentCString(appendPath));
VerifyResult(rv);
printf("Check For Existence\n");
PRBool exists;
file->Exists(&exists);
if (exists)
printf("Yup!\n");
else
printf("no.\n");
rv = file->CreateUnique(whatToCreate, perm);
VerifyResult(rv);
rv = file->Exists(&exists);
VerifyResult(rv);
if (!exists)
{
Failed("Did not create file system object!");
return;
}
}
void
CopyTest(const char *testFile, const char *targetDir)
{
printf("start copy test\n");
nsresult rv;
nsCOMPtr<nsILocalFile> file =
do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
if (NS_FAILED(rv) || (!file))
{
printf("create nsILocalFile failed\n");
return;
}
rv = file->InitWithNativePath(nsDependentCString(testFile));
VerifyResult(rv);
nsCOMPtr<nsILocalFile> dir =
do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
if (NS_FAILED(rv) || (!dir))
{
printf("create nsILocalFile failed\n");
return;
}
rv = dir->InitWithNativePath(nsDependentCString(targetDir));
VerifyResult(rv);
rv = file->CopyTo(dir, EmptyString());
VerifyResult(rv);
printf("end copy test\n");
}
void
DeletionTest(const char* creationPath, const char* appendPath, PRBool recursive)
{
nsresult rv;
nsCOMPtr<nsILocalFile> file =
do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
if (NS_FAILED(rv) || (!file))
{
printf("create nsILocalFile failed\n");
return;
}
Banner("Deletion Test");
printf("creationPath == %s\nappendPath == %s\n", creationPath, appendPath);
rv = file->InitWithNativePath(nsDependentCString(creationPath));
VerifyResult(rv);
printf("Appending %s\n", appendPath);
rv = file->AppendNative(nsDependentCString(appendPath));
VerifyResult(rv);
printf("Check For Existance\n");
PRBool exists;
file->Exists(&exists);
if (exists)
printf("Yup!\n");
else
printf("no.\n");
rv = file->Remove(recursive);
VerifyResult(rv);
rv = file->Exists(&exists);
VerifyResult(rv);
if (exists)
{
Failed("Did not create delete system object!");
return;
}
}
void
MoveTest(const char *testFile, const char *targetDir)
{
Banner("Move Test");
printf("start move test\n");
nsresult rv;
nsCOMPtr<nsILocalFile> file(do_CreateInstance(NS_LOCAL_FILE_CONTRACTID));
if (!file)
{
printf("create nsILocalFile failed\n");
return;
}
rv = file->InitWithNativePath(nsDependentCString(testFile));
VerifyResult(rv);
nsCOMPtr<nsILocalFile> dir(do_CreateInstance(NS_LOCAL_FILE_CONTRACTID));
if (!dir)
{
printf("create nsILocalFile failed\n");
return;
}
rv = dir->InitWithNativePath(nsDependentCString(targetDir));
VerifyResult(rv);
rv = file->MoveToNative(dir, NS_LITERAL_CSTRING("newtemp"));
VerifyResult(rv);
if (NS_FAILED(rv))
{
printf("MoveToNative() test Failed.\n");
}
printf("end move test\n");
}
// move up the number of directories in moveUpCount, then append "foo/bar"
void
NormalizeTest(const char *testPath, int moveUpCount,
const char *expected)
{
Banner("Normalize Test");
nsresult rv;
nsCOMPtr<nsILocalFile> file(do_CreateInstance(NS_LOCAL_FILE_CONTRACTID));
if (!file)
{
printf("create nsILocalFile failed\n");
return;
}
rv = file->InitWithNativePath(nsDependentCString(testPath));
VerifyResult(rv);
nsCOMPtr<nsIFile> parent;
nsAutoString path;
for (int i=0; i < moveUpCount; i++)
{
rv = file->GetParent(getter_AddRefs(parent));
VerifyResult(rv);
rv = parent->GetPath(path);
VerifyResult(rv);
rv = file->InitWithPath(path);
VerifyResult(rv);
}
if (!parent) {
printf("Getting parent failed!\n");
return;
}
rv = parent->Append(NS_LITERAL_STRING("foo"));
VerifyResult(rv);
rv = parent->Append(NS_LITERAL_STRING("bar"));
VerifyResult(rv);
rv = parent->Normalize();
VerifyResult(rv);
nsCAutoString newPath;
rv = parent->GetNativePath(newPath);
VerifyResult(rv);
nsCOMPtr<nsILocalFile>
expectedFile(do_CreateInstance(NS_LOCAL_FILE_CONTRACTID));
if (!expectedFile)
{
printf("create nsILocalFile failed\n");
return;
}
rv = expectedFile->InitWithNativePath(nsDependentCString(expected));
VerifyResult(rv);
rv = expectedFile->Normalize();
VerifyResult(rv);
nsCAutoString expectedPath;
rv = expectedFile->GetNativePath(expectedPath);
VerifyResult(rv);
if (!newPath.Equals(expectedPath)) {
printf("ERROR: Normalize() test Failed!\n");
printf(" Got: %s\n", newPath.get());
printf("Expected: %s\n", expectedPath.get());
}
printf("end normalize test.\n");
}
int main(void)
{
nsCOMPtr<nsIServiceManager> servMan;
NS_InitXPCOM2(getter_AddRefs(servMan), nsnull, nsnull);
nsCOMPtr<nsIComponentRegistrar> registrar = do_QueryInterface(servMan);
NS_ASSERTION(registrar, "Null nsIComponentRegistrar");
registrar->AutoRegister(nsnull);
#if defined(XP_WIN) || defined(XP_OS2)
InitTest("c:\\temp\\", "sub1/sub2/"); // expect failure
InitTest("d:\\temp\\", "sub1\\sub2\\"); // expect failure
CreationTest("c:\\temp\\", "file.txt", nsIFile::NORMAL_FILE_TYPE, 0644);
DeletionTest("c:\\temp\\", "file.txt", PR_FALSE);
MoveTest("c:\\newtemp\\", "d:");
CreationTest("c:\\temp\\", "mumble\\a\\b\\c\\d\\e\\f\\g\\h\\i\\j\\k\\", nsIFile::DIRECTORY_TYPE, 0644);
DeletionTest("c:\\temp\\", "mumble", PR_TRUE);
CreateUniqueTest("c:\\temp\\", "foo", nsIFile::NORMAL_FILE_TYPE, 0644);
CreateUniqueTest("c:\\temp\\", "foo", nsIFile::NORMAL_FILE_TYPE, 0644);
CreateUniqueTest("c:\\temp\\", "bar.xx", nsIFile::DIRECTORY_TYPE, 0644);
CreateUniqueTest("c:\\temp\\", "bar.xx", nsIFile::DIRECTORY_TYPE, 0644);
DeletionTest("c:\\temp\\", "foo", PR_TRUE);
DeletionTest("c:\\temp\\", "foo-1", PR_TRUE);
DeletionTest("c:\\temp\\", "bar.xx", PR_TRUE);
DeletionTest("c:\\temp\\", "bar-1.xx", PR_TRUE);
#else
#ifdef XP_UNIX
InitTest("/tmp/", "sub1/sub2/"); // expect failure
CreationTest("/tmp", "file.txt", nsIFile::NORMAL_FILE_TYPE, 0644);
DeletionTest("/tmp/", "file.txt", PR_FALSE);
CreationTest("/tmp", "mumble/a/b/c/d/e/f/g/h/i/j/k/", nsIFile::DIRECTORY_TYPE, 0644);
DeletionTest("/tmp", "mumble", PR_TRUE);
CreationTest("/tmp", "file", nsIFile::NORMAL_FILE_TYPE, 0644);
CopyTest("/tmp/file", "/tmp/newDir");
MoveTest("/tmp/file", "/tmp/newDir/anotherNewDir");
DeletionTest("/tmp", "newDir", PR_TRUE);
CreationTest("/tmp", "qux/quux", nsIFile::NORMAL_FILE_TYPE, 0644);
CreationTest("/tmp", "foo/bar", nsIFile::NORMAL_FILE_TYPE, 0644);
NormalizeTest("/tmp/qux/quux/..", 1, "/tmp/foo/bar");
DeletionTest("/tmp", "qux", PR_TRUE);
DeletionTest("/tmp", "foo", PR_TRUE);
#endif /* XP_UNIX */
#endif /* XP_WIN || XP_OS2 */
return 0;
}
| 4,558 |
333 |
<reponame>JustinKyleJames/irods
#ifndef _HASHER_HPP_
#define _HASHER_HPP_
#include "HashStrategy.hpp"
#include "irods_error.hpp"
#include <string>
#include <boost/any.hpp>
namespace irods {
const std::string STRICT_HASH_POLICY( "strict" );
const std::string COMPATIBLE_HASH_POLICY( "compatible" );
class Hasher {
public:
Hasher() : _strategy( NULL ) {}
error init( const HashStrategy* );
error update( const std::string& );
error digest( std::string& messageDigest );
private:
const HashStrategy* _strategy;
boost::any _context;
error _stored_error;
std::string _stored_digest;
};
}; // namespace irods
#endif // _HASHER_HPP_
| 385 |
3,102 |
class T;
class S {
T *t;
int a;
};
| 22 |
877 |
<filename>BlockParty/BlockPartyConstants.h
//
// BlockPartyConstants.h
// BlockParty
//
// Created by <NAME> on 17/09/2015.
// Copyright © 2015 <NAME>. All rights reserved.
//
#ifndef BlockPartyConstants_h
#define BlockPartyConstants_h
#define APP_EXTENSION_NAME @"com.blackwaterpark.apps.BlockParty.ContentBlock"
#define APP_DEFAULT_BLOCKS_FILE @"blockerList"
#define APP_DEFAULT_BLOCKS_URL @"" // full URL of remote blockerList.json file
#endif /* BlockPartyConstants_h */
| 165 |
400 |
/*
* dynamic_analyzer_loader.cpp - dynamic analyzer loader
*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
*/
#include "dynamic_analyzer_loader.h"
#include "dynamic_analyzer.h"
#include "handler_interface.h"
#include <dlfcn.h>
namespace XCam {
DynamicAnalyzerLoader::DynamicAnalyzerLoader (const char *lib_path, const char *symbol)
: AnalyzerLoader (lib_path, symbol)
{
}
DynamicAnalyzerLoader::~DynamicAnalyzerLoader ()
{
}
SmartPtr<X3aAnalyzer>
DynamicAnalyzerLoader::load_analyzer (SmartPtr<AnalyzerLoader> &self)
{
XCAM_ASSERT (self.ptr () == this);
SmartPtr<X3aAnalyzer> analyzer;
XCam3ADescription *desc = (XCam3ADescription*)load_library (get_lib_path ());
analyzer = new DynamicAnalyzer (desc, self);
if (!analyzer.ptr ()) {
XCAM_LOG_WARNING ("create DynamicAnalyzer from lib failed");
close_handle ();
return NULL;
}
XCAM_LOG_INFO ("analyzer(%s) created from 3a lib", XCAM_STR (analyzer->get_name()));
return analyzer;
}
void *
DynamicAnalyzerLoader::load_symbol (void* handle)
{
XCam3ADescription *desc = NULL;
desc = (XCam3ADescription *)AnalyzerLoader::get_symbol (handle);
if (!desc) {
XCAM_LOG_DEBUG ("get symbol failed from lib");
return NULL;
}
if (desc->version < xcam_version ()) {
XCAM_LOG_DEBUG ("get symbolfailed. version is:0x%04x, but expect:0x%04x",
desc->version, xcam_version ());
return NULL;
}
if (desc->size < sizeof (XCam3ADescription)) {
XCAM_LOG_DEBUG ("get symbol failed, XCam3ADescription size is:%" PRIu32 ", but expect:%" PRIuS,
desc->size, sizeof (XCam3ADescription));
return NULL;
}
if (!desc->create_context || !desc->destroy_context ||
!desc->configure_3a || !desc->set_3a_stats ||
!desc->analyze_awb || !desc->analyze_ae ||
!desc->analyze_af || !desc->combine_analyze_results ||
!desc->free_results) {
XCAM_LOG_DEBUG ("some functions in symbol not set from lib");
return NULL;
}
return (void*)desc;
}
};
| 1,055 |
999 |
<filename>repokid/cli/dispatcher_cli.py
import contextlib
import inspect
import json
from typing import Any
from typing import Dict
from typing import Generator
from typing import Optional
from cloudaux.aws.sts import boto3_cached_conn
from mypy_boto3_sns.client import SNSClient
from mypy_boto3_sqs.client import SQSClient
from mypy_boto3_sqs.type_defs import ReceiveMessageResultTypeDef
import repokid.dispatcher
from repokid import CONFIG
from repokid.dispatcher.types import Message
def get_failure_message(channel: str, message: str) -> Dict[str, Any]:
return {"channel": channel, "message": message, "title": "Repokid Failure"}
def delete_message(receipt_handle: str, conn_details: Dict[str, Any]) -> None:
client: SQSClient = boto3_cached_conn("sqs", **conn_details)
client.delete_message(
QueueUrl=CONFIG["dispatcher"]["to_rr_queue"], ReceiptHandle=receipt_handle
)
def receive_message(conn_details: Dict[str, Any]) -> ReceiveMessageResultTypeDef:
client: SQSClient = boto3_cached_conn("sqs", **conn_details)
return client.receive_message(
QueueUrl=CONFIG["dispatcher"]["to_rr_queue"],
MaxNumberOfMessages=1,
WaitTimeSeconds=10,
)
def send_message(message_dict: Dict[str, Any], conn_details: Dict[str, Any]) -> None:
client: SNSClient = boto3_cached_conn("sns", **conn_details)
client.publish(
TopicArn=CONFIG["dispatcher"]["from_rr_sns"], Message=json.dumps(message_dict)
)
@contextlib.contextmanager
def message_context(
message_object: ReceiveMessageResultTypeDef, connection: Dict[str, Any]
) -> Generator[Optional[str], Dict[str, Any], None]:
try:
receipt_handle = message_object["Messages"][0]["ReceiptHandle"]
yield json.loads(message_object["Messages"][0]["Body"])
except KeyError:
# we might not actually have a message
yield None
else:
if receipt_handle:
delete_message(receipt_handle, connection)
all_funcs = inspect.getmembers(repokid.dispatcher, inspect.isfunction)
RESPONDER_FUNCTIONS = {
func[1]._implements_command: func[1]
for func in all_funcs
if hasattr(func[1], "_implements_command")
}
def main() -> None:
conn_details = {
"assume_role": CONFIG["dispatcher"].get("assume_role", None),
"session_name": CONFIG["dispatcher"].get("session_name", "Repokid"),
"region": CONFIG["dispatcher"].get("region", "us-west-2"),
}
while True:
message = receive_message(conn_details)
if not message or "Messages" not in message:
continue
with message_context(message, conn_details) as msg:
if not msg:
continue
parsed_msg = Message.parse_obj(msg)
if parsed_msg.errors:
failure_message = get_failure_message(
channel=parsed_msg.respond_channel,
message="Malformed message: {}".format(parsed_msg.errors),
)
send_message(failure_message, conn_details)
continue
try:
return_val = RESPONDER_FUNCTIONS[parsed_msg.command](parsed_msg)
except KeyError:
failure_message = get_failure_message(
channel=parsed_msg.respond_channel,
message="Unknown function {}".format(parsed_msg.command),
)
send_message(failure_message, conn_details)
continue
send_message(
{
"message": "@{} {}".format(
parsed_msg.respond_user, return_val.return_message
),
"channel": parsed_msg.respond_channel,
"title": "Repokid Success"
if return_val.successful
else "Repokid Failure",
},
conn_details,
)
if __name__ == "__main__":
main()
| 1,793 |
394 |
#include "dronet_control/deep_navigation.h"
namespace deep_navigation
{
deepNavigation::deepNavigation(
const ros::NodeHandle& nh,
const ros::NodeHandle& nh_private)
: nh_(nh),
nh_private_(nh_private),
name_(nh_private.getNamespace())
{
ROS_INFO("[%s]: Initializing Deep Control Node", name_.c_str());
loadParameters();
deep_network_sub_ = nh_.subscribe("cnn_predictions", 1, &deepNavigation::deepNetworkCallback, this);
state_change_sub_ = nh_.subscribe("state_change", 1, &deepNavigation::stateChangeCallback, this);
desired_velocity_pub_ = nh_.advertise < geometry_msgs::Twist > ("velocity", 1);
steering_angle_ = 0.0;
probability_of_collision_ = 0.0;
// Aggressive initialization
desired_forward_velocity_ = max_forward_index_;
desired_angular_velocity_ = 0.0;
use_network_out_ = false;
}
void deepNavigation::run()
{
ros::Duration(2.0).sleep();
ros::Rate rate(30.0);
while (ros::ok())
{
// Desired body frame velocity to world frame
double desired_forward_velocity_m = (1.0 - probability_of_collision_) * max_forward_index_;
if (desired_forward_velocity_m <= 0.0)
{
ROS_INFO("Detected negative forward velocity! Drone will now stop!");
desired_forward_velocity_m = 0;
}
// Low pass filter the velocity and integrate it to get the position
desired_forward_velocity_ = (1.0 - alpha_velocity_) * desired_forward_velocity_
+ alpha_velocity_ * desired_forward_velocity_m;
ROS_INFO("Desired_Forward_Velocity [0-1]: %.3f ", desired_forward_velocity_);
// Stop if velocity is prob of collision is too high
if (desired_forward_velocity_ < ((1 - critical_prob_coll_) * max_forward_index_))
{
desired_forward_velocity_ = 0.0;
}
// Low pass filter the angular_velocity (Remeber to tune the bebop angular velocity parameters)
desired_angular_velocity_ = (1.0 - alpha_yaw_) * desired_angular_velocity_ + alpha_yaw_ * steering_angle_;
ROS_INFO("Desired_Angular_Velocity[0-1]: %.3f ", desired_angular_velocity_);
// Prepare command velocity
cmd_velocity_.linear.x = desired_forward_velocity_;
cmd_velocity_.angular.z = desired_angular_velocity_;
// Publish desired state
if (use_network_out_)
{
desired_velocity_pub_.publish(cmd_velocity_);
}
else
ROS_INFO("NOT PUBLISHING VELOCITY");
ROS_INFO("Collision Prob.: %.3f - OutSteer: %.3f", probability_of_collision_, steering_angle_);
ROS_INFO("--------------------------------------------------");
rate.sleep();
ros::spinOnce();
}
}
void deepNavigation::deepNetworkCallback(const dronet_perception::CNN_out::ConstPtr& msg)
{
probability_of_collision_ = msg->collision_prob;
steering_angle_ = msg->steering_angle;
// Output modulation
if (steering_angle_ < -1.0) { steering_angle_ = -1.0;}
if (steering_angle_ > 1.0) { steering_angle_ = 1.0;}
}
void deepNavigation::stateChangeCallback(const std_msgs::Bool& msg)
{
//change current state
use_network_out_ = msg.data;
}
void deepNavigation::loadParameters()
{
ROS_INFO("[%s]: Reading parameters", name_.c_str());
nh_private_.param<double>("alpha_velocity", alpha_velocity_, 0.3);
nh_private_.param<double>("alpha_yaw", alpha_yaw_, 0.5);
nh_private_.param<double>("max_forward_index", max_forward_index_, 0.2);
nh_private_.param<double>("critical_prob", critical_prob_coll_, 0.7);
}
} // namespace deep_navigation
int main(int argc, char** argv)
{
ros::init(argc, argv, "deep_navigation");
deep_navigation::deepNavigation dn;
dn.run();
return 0;
}
| 1,364 |
416 |
/*
File: AVPlayerViewController.h
Framework: AVKit
Copyright © 2014-2017 Apple Inc. All rights reserved.
*/
#import <UIKit/UIKit.h>
#import <AVFoundation/AVFoundation.h>
NS_ASSUME_NONNULL_BEGIN
@protocol AVPlayerViewControllerDelegate;
/*!
@class AVPlayerViewController
@abstract AVPlayerViewController is a subclass of UIViewController that can be used to display the visual content of an AVPlayer object and the standard playback controls.
*/
API_AVAILABLE(ios(8.0))
@interface AVPlayerViewController : UIViewController
/*!
@property player
@abstract The player from which to source the media content for the view controller.
*/
@property (nonatomic, strong, nullable) AVPlayer *player;
/*!
@property showsPlaybackControls
@abstract Whether or not the receiver shows playback controls. Default is YES.
@discussion Clients can set this property to NO when they don't want to have any playback controls on top of the visual content (e.g. for a game splash screen).
*/
@property (nonatomic) BOOL showsPlaybackControls;
/*!
@property videoGravity
@abstract A string defining how the video is displayed within an AVPlayerLayer bounds rect.
@discussion Options are AVLayerVideoGravityResizeAspect, AVLayerVideoGravityResizeAspectFill and AVLayerVideoGravityResize. AVLayerVideoGravityResizeAspect is default.
See <AVFoundation/AVAnimation.h> for a description of these options.
*/
@property (nonatomic, copy) NSString *videoGravity;
/*!
@property readyForDisplay
@abstract Boolean indicating that the first video frame has been made ready for display for the current item of the associated AVPlayer.
*/
@property (nonatomic, readonly, getter = isReadyForDisplay) BOOL readyForDisplay;
/*!
@property videoBounds
@abstract The current size and position of the video image as displayed within the receiver's view's bounds.
*/
@property (nonatomic, readonly) CGRect videoBounds;
/*!
@property contentOverlayView
@abstract Use the content overlay view to add additional custom views between the video content and the controls.
*/
@property (nonatomic, readonly, nullable) UIView *contentOverlayView;
/*!
@property allowsPictureInPicturePlayback
@abstract Whether or not the receiver allows Picture in Picture playback. Default is YES.
*/
@property (nonatomic) BOOL allowsPictureInPicturePlayback API_AVAILABLE(ios(9.0));
/*!
@property updatesNowPlayingInfoCenter
@abstract Whether or not the now playing info center should be updated. Default is YES.
*/
@property (nonatomic) BOOL updatesNowPlayingInfoCenter API_AVAILABLE(ios(10.0));
/*!
@property entersFullScreenWhenPlaybackBegins
@abstract Whether or not the receiver automatically enters full screen when the play button is tapped. Default is NO.
@discussion If YES, the receiver will show a user interface tailored to this behavior.
*/
@property (nonatomic) BOOL entersFullScreenWhenPlaybackBegins API_AVAILABLE(ios(11.0));
/*!
@property exitsFullScreenWhenPlaybackEnds
@abstract Whether or not the receiver automatically exits full screen when playback ends. Default is NO.
@discussion If multiple player items have been enqueued, the receiver exits fullscreen once no more items are remaining in the queue.
*/
@property (nonatomic) BOOL exitsFullScreenWhenPlaybackEnds API_AVAILABLE(ios(11.0));
/*!
@property delegate
@abstract The receiver's delegate.
*/
@property (nonatomic, weak, nullable) id <AVPlayerViewControllerDelegate> delegate API_AVAILABLE(ios(9.0));
@end
/*!
@protocol AVPlayerViewControllerDelegate
@abstract A protocol for delegates of AVPlayerViewController.
*/
@protocol AVPlayerViewControllerDelegate <NSObject>
@optional
/*!
@method playerViewControllerWillStartPictureInPicture:
@param playerViewController
The player view controller.
@abstract Delegate can implement this method to be notified when Picture in Picture will start.
*/
- (void)playerViewControllerWillStartPictureInPicture:(AVPlayerViewController *)playerViewController;
/*!
@method playerViewControllerDidStartPictureInPicture:
@param playerViewController
The player view controller.
@abstract Delegate can implement this method to be notified when Picture in Picture did start.
*/
- (void)playerViewControllerDidStartPictureInPicture:(AVPlayerViewController *)playerViewController;
/*!
@method playerViewController:failedToStartPictureInPictureWithError:
@param playerViewController
The player view controller.
@param error
An error describing why it failed.
@abstract Delegate can implement this method to be notified when Picture in Picture failed to start.
*/
- (void)playerViewController:(AVPlayerViewController *)playerViewController failedToStartPictureInPictureWithError:(NSError *)error;
/*!
@method playerViewControllerWillStopPictureInPicture:
@param playerViewController
The player view controller.
@abstract Delegate can implement this method to be notified when Picture in Picture will stop.
*/
- (void)playerViewControllerWillStopPictureInPicture:(AVPlayerViewController *)playerViewController;
/*!
@method playerViewControllerDidStopPictureInPicture:
@param playerViewController
The player view controller.
@abstract Delegate can implement this method to be notified when Picture in Picture did stop.
*/
- (void)playerViewControllerDidStopPictureInPicture:(AVPlayerViewController *)playerViewController;
/*!
@method playerViewControllerShouldAutomaticallyDismissAtPictureInPictureStart:
@param playerViewController
The player view controller.
@abstract Delegate can implement this method and return NO to prevent player view controller from automatically being dismissed when Picture in Picture starts.
*/
- (BOOL)playerViewControllerShouldAutomaticallyDismissAtPictureInPictureStart:(AVPlayerViewController *)playerViewController;
/*!
@method playerViewController:restoreUserInterfaceForPictureInPictureStopWithCompletionHandler:
@param playerViewController
The player view controller.
@param completionHandler
The completion handler the delegate needs to call after restore.
@abstract Delegate can implement this method to restore the user interface before Picture in Picture stops.
*/
- (void)playerViewController:(AVPlayerViewController *)playerViewController restoreUserInterfaceForPictureInPictureStopWithCompletionHandler:(void (^)(BOOL restored))completionHandler;
@end
NS_ASSUME_NONNULL_END
| 1,793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.