max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
2,023 | """
Simplest Windows Registry I/0
"""
import win32api
import win32con
def ReadRegistryValue(hiveKey, key, name=""):
""" Read one value from Windows registry. If 'name' is empty string, reads default value."""
data = typeId = None
try:
keyHandle = win32api.RegOpenKeyEx(hiveKey, key, 0, win32con.KEY_ALL_ACCESS)
data, typeId = win32api.RegQueryValueEx(keyHandle, name)
win32api.RegCloseKey(keyHandle)
except Exception, e:
print "ReadRegistryValue failed:", hiveKey, key, name, e
return data, typeId
def WriteRegistryValue(hiveKey, key, name, data, typeId=win32con.REG_SZ):
""" Write one value to Windows registry. If 'name' is empty string, writes default value.
Creates subkeys as necessary"""
try:
keyHandle = OpenRegistryKey(hiveKey, key)
win32api.RegSetValueEx(keyHandle, name, 0, typeId, data)
win32api.RegCloseKey(keyHandle)
except Exception, e:
print "WriteRegistryValue failed:", hiveKey, name, e
def OpenRegistryKey(hiveKey, key):
""" Opens a keyHandle for hiveKey and key, creating subkeys as necessary """
keyHandle = None
try:
curKey = ""
keyItems = key.split('\\')
for subKey in keyItems:
if curKey:
curKey = curKey + "\\" + subKey
else:
curKey = subKey
keyHandle = win32api.RegCreateKey(hiveKey, curKey)
except Exception, e:
keyHandle = None
print "OpenRegistryKey failed:", hiveKey, key, e
return keyHandle
def DeleteRegistryKey(hiveKey, key):
""" Deletes a registry key -- must be a leaf key or call fails """
try:
result = win32api.RegDeleteKey(hiveKey, key)
return result
except Exception, e:
print "DeleteRegistryKey failed:", hiveKey, key, e
return None
def TestRegistryWriteRead(hiveKey, key, name, data, typeId):
WriteRegistryValue(hiveKey, key, name, data, typeId)
outputData, outputTypeId = ReadRegistryValue(hiveKey, key, name)
status = "OK"
if (outputData != data or outputTypeId != typeId):
status = "FAILED"
print "%s -- %d %s %s -- input: %s %s output: %s %s" % \
(status, hiveKey, key, name, str(data), str(typeId), str(outputData), str(outputTypeId))
def Test():
TestRegistryWriteRead(win32con.HKEY_LOCAL_MACHINE, "Software\\AAAAA", "", "this is a default value", win32con.REG_SZ)
TestRegistryWriteRead(win32con.HKEY_LOCAL_MACHINE, "Software\\AAAAA", "Data-SZ", "this is a string", win32con.REG_SZ)
TestRegistryWriteRead(win32con.HKEY_LOCAL_MACHINE, "Software\\AAAAA", "Data-BINARY", '\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09', win32con.REG_BINARY)
TestRegistryWriteRead(win32con.HKEY_LOCAL_MACHINE, "Software\\AAAAA", "Data-DWORD", 0x01234567, win32con.REG_DWORD)
DeleteRegistryKey(win32con.HKEY_LOCAL_MACHINE, "Software\\AAAAA")
if __name__ == "__main__":
Test()
| 1,217 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.j2ee.persistence.unit;
import java.net.URI;
import java.util.Enumeration;
import org.netbeans.api.project.Project;
import org.netbeans.junit.NbTestCase;
import org.netbeans.spi.project.FileOwnerQueryImplementation;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.filesystems.MIMEResolver;
import org.openide.loaders.DataLoaderPool;
import org.openide.util.Enumerations;
import org.openide.util.Lookup;
import org.openide.util.lookup.Lookups;
import org.openide.util.lookup.ProxyLookup;
/**
*
* @author <NAME>, <NAME>
*/
public abstract class PUDataObjectTestBase extends NbTestCase {
private static DummyProject project = new DummyProject();
static {
System.setProperty("org.openide.util.Lookup", Lkp.class.getName());
((Lkp)Lookup.getDefault()).setLookups(new Object[] { new PUMimeResolver(), new Pool(), new PUFOQI(project) });
assertEquals("Unable to set the default lookup!", Lkp.class, Lookup.getDefault().getClass());
assertEquals("The default MIMEResolver is not our resolver!", PUMimeResolver.class, Lookup.getDefault().lookup(MIMEResolver.class).getClass());
assertEquals("The default DataLoaderPool is not our pool!", Pool.class, Lookup.getDefault().lookup(DataLoaderPool.class).getClass());
}
public PUDataObjectTestBase(String name) {
super(name);
project.setDirectory(FileUtil.toFileObject(getDataDir()));
}
/**
* Our default lookup.
*/
public static final class Lkp extends ProxyLookup {
public Lkp() {
setLookups(new Object[0]);
}
public void setLookups(Object[] instances) {
ClassLoader l = PersistenceEditorTestBase.class.getClassLoader();
setLookups(new Lookup[] {
Lookups.fixed(instances),
Lookups.metaInfServices(l),
Lookups.singleton(l)
});
}
}
/**
* DataLoaderPool which is registered in the default lookup and loads
* PUDataLoader.
*/
public static final class Pool extends DataLoaderPool {
public Enumeration loaders() {
return Enumerations.singleton(new PUDataLoader());
}
}
/**
* MIME Resolver that associates persistence.xml with PUDataLoader.
*/
public static final class PUMimeResolver extends MIMEResolver {
public String findMIMEType(FileObject fo) {
if (fo.getName().startsWith("persistence")){
return PUDataLoader.REQUIRED_MIME;
}
return null;
}
}
/**
* Returns dummy project implementation. Needed since persistence unit needs
* to be associated with {@link Project} owner. Also see issue #74426.
*/
private static final class PUFOQI implements FileOwnerQueryImplementation {
Project dummyProject;
PUFOQI(Project dummy){
dummyProject = dummy;
}
public Project getOwner(URI file) {
return dummyProject;
}
public Project getOwner(FileObject file) {
return dummyProject;
}
}
private static class DummyProject implements Project{
private FileObject dir;
/**
* Dummy project have to have not null dir for some editor kits, for example XMLKit from xml.text module
* @param dir
*/
public void setDirectory(FileObject dir){
this.dir = dir;
}
@Override
public Lookup getLookup() { return Lookup.EMPTY; }
@Override
public FileObject getProjectDirectory() { return dir; }
}
}
| 1,701 |
777 | <gh_stars>100-1000
/*
* Artificial Intelligence for Humans
* Volume 1: Fundamental Algorithms
* C/C++ Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
* Copyright 2013 by <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
#include "aifh-vol1.h"
DATA_SET *DataCreate(int rowCount, int inputCount, int outputCount) {
DATA_SET *result = NULL;
int allocSize;
/* Allocate the data set */
result = (DATA_SET*)calloc(1,sizeof(DATA_SET));
result->inputCount = inputCount;
result->idealCount = outputCount;
result->recordCount = rowCount;
allocSize = rowCount*(result->inputCount+result->idealCount);
result->data = (double*)calloc(allocSize,sizeof(double));
result->cursor = result->data;
return result;
}
void DataDelete(DATA_SET *data) {
free(data);
}
double *DataGetInput(DATA_SET *data, unsigned int index)
{
int i;
i = index*(data->inputCount+data->idealCount);
return &data->data[i];
}
double *DataGetIdeal(DATA_SET *data, unsigned int index)
{
int i;
i = index*(data->inputCount+data->idealCount);
return &data->data[i+data->inputCount];
}
void DataCSVSave(FILE *fp,NORM_DATA *norm, DATA_SET *data)
{
unsigned int i,j,len;
double *input, *ideal;
NORM_DATA_ITEM *item;
/* Write the header */
if( norm!=NULL ) {
item = norm->firstItem;
j=0;
while(item!=NULL) {
/* Determine the length of the normalized column */
len=NormCalculateActualCount(norm,j,1);
if( len==1 ) {
/* Length 1, just append a normal column head */
if( j>0 ) {
fprintf(fp,",");
}
fprintf(fp,"\"%s\"",item->name);
j++;
} else {
/* Otherwise, append the correct number of columns it normalizes into */
for(i=0;i<len;i++) {
if( j>0 ) {
fprintf(fp,",");
}
fprintf(fp,"\"%s-%i\"",item->name,i);
j++;
}
}
item = item->next;
j++;
}
fprintf(fp,"\n");
}
/* Write the data */
for(i=0; i<data->recordCount; i++)
{
input = DataGetInput(data,i);
ideal = DataGetIdeal(data,i);
for(j=0; j<data->inputCount; j++)
{
if(j>0)
{
fprintf(fp,",");
}
fprintf(fp,"%f",input[j]);
}
for(j=0; j<data->idealCount; j++)
{
fprintf(fp,",");
fprintf(fp,"%f",ideal[j]);
}
fputs("\n",fp);
}
fclose(fp);
}
void DataMoveCursor(DATA_SET *data, int location) {
data->cursor = data->data + (location * (data->inputCount+data->idealCount));
}
void DataAddVar(DATA_SET *data, ...)
{
int i,total;
double d = 0.0;
va_list arguments;
va_start ( arguments, data );
total = data->inputCount + data->idealCount;
for(i=0; i<total; i++)
{
d = (double)va_arg(arguments,double);
*(data->cursor++) = d;
}
va_end( arguments );
}
| 1,480 |
777 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/ash/launcher/test/launcher_application_menu_item_model_test_api.h"
LauncherApplicationMenuItemModelTestAPI::
LauncherApplicationMenuItemModelTestAPI(
LauncherApplicationMenuItemModel* menu_item_model)
: menu_item_model_(menu_item_model) {
}
LauncherApplicationMenuItemModelTestAPI::
~LauncherApplicationMenuItemModelTestAPI() {
}
int LauncherApplicationMenuItemModelTestAPI::GetNumMenuItemsEnabled() const {
return menu_item_model_->GetNumMenuItemsEnabled();
}
void LauncherApplicationMenuItemModelTestAPI::RecordMenuItemSelectedMetrics(
int command_id,
int num_menu_items_enabled) {
return menu_item_model_->RecordMenuItemSelectedMetrics(
command_id, num_menu_items_enabled);
}
| 284 |
381 | <filename>helix-core/src/test/java/org/apache/helix/integration/paticipant/TestParticipantFreeze.java
package org.apache.helix.integration.paticipant;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.ClusterManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.messaging.DefaultMessagingService;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.util.MessageUtil;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestParticipantFreeze extends ZkTestBase {
private HelixManager _manager;
private HelixDataAccessor _accessor;
private PropertyKey.Builder _keyBuilder;
private String _clusterName;
private int _numNodes;
private String _resourceName;
private String _instanceName;
private MockParticipantManager[] _participants;
// current states in participant[0]
private List<CurrentState> _originCurStates;
private String _originSession;
@BeforeClass
public void beforeClass() throws Exception {
_clusterName = "CLUSTER_" + TestHelper.getTestClassName();
_numNodes = 3;
_resourceName = "TestDB";
_participants = new MockParticipantManager[_numNodes];
TestHelper.setupCluster(_clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
_resourceName, // resource name prefix
1, // resources
1, // partitions per resource
_numNodes, // number of nodes
3, // replicas
"MasterSlave", true);
_manager = HelixManagerFactory
.getZKHelixManager(_clusterName, "Admin", InstanceType.ADMINISTRATOR, ZK_ADDR);
_manager.connect();
_accessor = _manager.getHelixDataAccessor();
_keyBuilder = _accessor.keyBuilder();
// start controller
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, _clusterName, "controller_0");
controller.syncStart();
// start participants
for (int i = 0; i < _numNodes; i++) {
String instanceName = "localhost_" + (12918 + i);
_participants[i] = new MockParticipantManager(ZK_ADDR, _clusterName, instanceName);
_participants[i].syncStart();
}
_instanceName = _participants[0].getInstanceName();
Assert.assertTrue(ClusterStateVerifier.verifyByZkCallback(
new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, _clusterName)));
// We just need controller to rebalance the cluster once to get current states.
controller.syncStop();
_originSession = _participants[0].getSessionId();
_originCurStates =
_accessor.getChildValues(_keyBuilder.currentStates(_instanceName, _originSession), false);
}
@AfterClass
public void afterClass() {
_manager.disconnect();
Arrays.stream(_participants).forEach(ClusterManager::syncStop);
deleteCluster(_clusterName);
}
/*
* Live instance is not frozen and does not have a frozen status field
*/
@Test
public void testNormalLiveInstanceStatus() {
LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(_instanceName));
Assert.assertEquals(liveInstance.getStatus(), LiveInstance.LiveInstanceStatus.NORMAL);
Assert.assertNull(
liveInstance.getRecord().getSimpleField(LiveInstance.LiveInstanceProperty.STATUS.name()));
}
@Test(dependsOnMethods = "testNormalLiveInstanceStatus")
public void testFreezeParticipant() throws Exception {
freezeParticipant(_participants[0]);
}
// Simulates instance is restarted and the in-memory status is gone.
// When instance comes back alive, it'll reset state model, carry over
// and set current state to init state.
@Test(dependsOnMethods = "testFreezeParticipant")
public void testRestartParticipantWhenFrozen() throws Exception {
String instanceName = _participants[1].getInstanceName();
List<CurrentState> originCurStates = _accessor
.getChildValues(_keyBuilder.currentStates(instanceName, _participants[1].getSessionId()),
false);
String oldSession = _participants[1].getSessionId();
freezeParticipant(_participants[1]);
// Restart participants[1]
_participants[1].syncStop();
_participants[1] = new MockParticipantManager(ZK_ADDR, _participants[1].getClusterName(),
instanceName);
_participants[1].syncStart();
Assert.assertTrue(TestHelper.verify(() ->
_gZkClient.exists(_keyBuilder.liveInstance(instanceName).getPath()),
TestHelper.WAIT_DURATION));
LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(instanceName));
// New live instance ephemeral node
Assert.assertEquals(liveInstance.getEphemeralOwner(), _participants[1].getSessionId());
// Status is not frozen because controller is not running, no freeze message sent.
verifyLiveInstanceStatus(_participants[1], LiveInstance.LiveInstanceStatus.NORMAL);
// Old session current state is deleted because of current state carry-over
Assert.assertTrue(TestHelper.verify(
() -> !_gZkClient.exists(_keyBuilder.currentStates(instanceName, oldSession).getPath()),
TestHelper.WAIT_DURATION));
// Current states are set to init states (OFFLINE)
List<CurrentState> curStates = _accessor
.getChildValues(_keyBuilder.currentStates(instanceName, _participants[1].getSessionId()),
false);
Assert.assertEquals(curStates.size(), 1);
Assert.assertTrue(TestHelper.verify(() -> {
for (CurrentState cs : originCurStates) {
String stateModelDefRef = cs.getStateModelDefRef();
for (String partition : cs.getPartitionStateMap().keySet()) {
StateModelDefinition stateModelDef =
_accessor.getProperty(_keyBuilder.stateModelDef(stateModelDefRef));
String initState = stateModelDef.getInitialState();
if (!initState.equals(curStates.get(0).getPartitionStateMap().get(partition))) {
return false;
}
}
}
return true;
}, TestHelper.WAIT_DURATION));
}
// Simulates session expires but in-memory status is still kept.
// No state model reset or current state carry-over
@Test(dependsOnMethods = "testRestartParticipantWhenFrozen")
public void testHandleNewSessionWhenFrozen() throws Exception {
// there are current states for the resource
Assert.assertFalse(_originCurStates.isEmpty());
ZkTestHelper.expireSession(_participants[0].getZkClient());
String currentSession = _participants[0].getSessionId();
Assert.assertFalse(_originSession.equals(currentSession));
Assert.assertTrue(TestHelper.verify(() ->
_gZkClient.exists(_keyBuilder.liveInstance(_instanceName).getPath()),
TestHelper.WAIT_DURATION));
LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(_instanceName));
// New live instance ephemeral node with FROZEN status
Assert.assertFalse(_originSession.equals(liveInstance.getEphemeralOwner()));
Assert.assertEquals(liveInstance.getStatus(), LiveInstance.LiveInstanceStatus.FROZEN);
// New session path does not exist since no current state carry over for the current session.
Assert.assertFalse(
_gZkClient.exists(_keyBuilder.currentStates(_instanceName, currentSession).getPath()));
// Old session CS still exist.
Assert.assertTrue(
_gZkClient.exists(_keyBuilder.currentStates(_instanceName, _originSession).getPath()));
}
@Test(dependsOnMethods = "testHandleNewSessionWhenFrozen")
public void testUnfreezeParticipant() throws Exception {
Message unfreezeMessage = MessageUtil
.createStatusChangeMessage(LiveInstance.LiveInstanceStatus.FROZEN,
LiveInstance.LiveInstanceStatus.NORMAL, _manager.getInstanceName(),
_manager.getSessionId(), _instanceName, _participants[0].getSessionId());
List<PropertyKey> keys = Collections
.singletonList(_keyBuilder.message(unfreezeMessage.getTgtName(), unfreezeMessage.getId()));
boolean[] success = _accessor.createChildren(keys, Collections.singletonList(unfreezeMessage));
Assert.assertTrue(success[0]);
// Live instance status is NORMAL, but set to null value in both memory and zk.
// After live instance status is updated, the process is completed.
verifyLiveInstanceStatus(_participants[0], LiveInstance.LiveInstanceStatus.NORMAL);
// Unfreeze message is correctly deleted
Assert.assertNull(
_accessor.getProperty(_keyBuilder.message(_instanceName, unfreezeMessage.getId())));
// current state is carried over
List<CurrentState> curStates = _accessor
.getChildValues(_keyBuilder.currentStates(_instanceName, _participants[0].getSessionId()),
false);
Assert.assertFalse(curStates.isEmpty());
// The original current states are deleted.
Assert.assertFalse(
_gZkClient.exists(_keyBuilder.currentStates(_instanceName, _originSession).getPath()));
// current states should be the same as the original current states
// with CS carry-over when unfreezing
Assert.assertTrue(verifyCurrentStates(_originCurStates, curStates));
}
private void verifyLiveInstanceStatus(MockParticipantManager participant,
LiveInstance.LiveInstanceStatus status) throws Exception {
// Verify live instance status in both memory and zk
Assert.assertTrue(TestHelper.verify(() -> {
LiveInstance.LiveInstanceStatus inMemoryLiveInstanceStatus =
((DefaultMessagingService) participant.getMessagingService()).getExecutor()
.getLiveInstanceStatus();
return inMemoryLiveInstanceStatus == status;
}, TestHelper.WAIT_DURATION));
Assert.assertTrue(TestHelper.verify(() -> {
LiveInstance liveInstance =
_accessor.getProperty(_keyBuilder.liveInstance(participant.getInstanceName()));
return liveInstance.getStatus() == status;
}, TestHelper.WAIT_DURATION));
}
private boolean verifyCurrentStates(List<CurrentState> originCurStates,
List<CurrentState> curStates) {
for (CurrentState ocs : originCurStates) {
for (CurrentState cs : curStates) {
if (cs.getId().equals(ocs.getId())
&& !cs.getPartitionStateMap().equals(ocs.getPartitionStateMap())) {
return false;
}
}
}
return true;
}
private void freezeParticipant(MockParticipantManager participant) throws Exception {
Message freezeMessage = MessageUtil
.createStatusChangeMessage(LiveInstance.LiveInstanceStatus.NORMAL,
LiveInstance.LiveInstanceStatus.FROZEN, _manager.getInstanceName(),
_manager.getSessionId(), participant.getInstanceName(), participant.getSessionId());
List<PropertyKey> keys = Collections
.singletonList(_keyBuilder.message(freezeMessage.getTgtName(), freezeMessage.getId()));
boolean[] success = _accessor.createChildren(keys, Collections.singletonList(freezeMessage));
Assert.assertTrue(success[0]);
// Live instance status is frozen in both memory and zk
verifyLiveInstanceStatus(participant, LiveInstance.LiveInstanceStatus.FROZEN);
// Freeze message is correctly deleted
Assert.assertTrue(TestHelper.verify(() -> !_gZkClient.exists(
_keyBuilder.message(participant.getInstanceName(), freezeMessage.getId()).getPath()),
TestHelper.WAIT_DURATION));
}
}
| 4,243 |
999 | <reponame>aridwiprayogo/marquez<gh_stars>100-1000
package marquez;
import static org.assertj.core.api.Assertions.assertThat;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.net.http.HttpResponse;
import java.time.ZonedDateTime;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import marquez.client.models.Dataset;
import marquez.client.models.DatasetId;
import marquez.client.models.DatasetVersion;
import marquez.client.models.DbTableMeta;
import marquez.client.models.JobMeta;
import marquez.client.models.Run;
import marquez.client.models.RunMeta;
import marquez.client.models.StreamVersion;
import marquez.common.Utils;
import marquez.db.LineageTestUtils;
import marquez.service.models.LineageEvent;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@org.junit.jupiter.api.Tag("IntegrationTests")
public class DatasetIntegrationTest extends BaseIntegrationTest {
@BeforeEach
public void setup() {
createNamespace(NAMESPACE_NAME);
createSource(DB_TABLE_SOURCE_NAME);
createSource(STREAM_SOURCE_NAME);
}
@Test
public void testApp_testTags() {
DbTableMeta DB_TABLE_META =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(ImmutableList.of(newFieldWith(SENSITIVE.getName()), newField()))
.tags(ImmutableSet.of(PII.getName()))
.description(DB_TABLE_DESCRIPTION)
.build();
Dataset dataset = client.createDataset(NAMESPACE_NAME, "test-dataset-tags", DB_TABLE_META);
assertThat(dataset.getFields().get(0).getTags())
.isEqualTo(ImmutableSet.of(SENSITIVE.getName()));
assertThat(dataset.getFields().get(1).getTags()).isEmpty();
assertThat(dataset.getTags()).isEqualTo(ImmutableSet.of(PII.getName()));
DbTableMeta UPDATED_META =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(
ImmutableList.of(
newFieldWith(PII.getName()),
DB_TABLE_META.getFields().get(0))) // changed fields
.tags(ImmutableSet.of(SENSITIVE.getName())) // added dataset tag
.description(DB_TABLE_DESCRIPTION)
.build();
Dataset updateDataset = client.createDataset(NAMESPACE_NAME, "test-dataset-tags", UPDATED_META);
assertThat(updateDataset.getTags())
.isEqualTo(ImmutableSet.of(SENSITIVE.getName(), PII.getName()));
assertThat(updateDataset.getFields()).isEqualTo(UPDATED_META.getFields());
Dataset getDataset = client.getDataset(NAMESPACE_NAME, "test-dataset-tags");
assertThat(getDataset.getFields()).isEqualTo(UPDATED_META.getFields());
assertThat(getDataset.getTags()).isEqualTo(ImmutableSet.of(SENSITIVE.getName(), PII.getName()));
}
@Test
public void testApp_getTableVersions() {
client.createDataset(NAMESPACE_NAME, DB_TABLE_NAME, DB_TABLE_META);
ImmutableMap<String, Object> outputFacets =
ImmutableMap.of("outputFacetKey", "outputFacetValue");
ImmutableMap<String, Object> inputFacets = ImmutableMap.of("inputFacetKey", "inputFacetValue");
final LineageEvent.DatasetFacets datasetFacets =
LineageTestUtils.newDatasetFacet(
outputFacets,
LineageEvent.SchemaField.builder()
.name("firstname")
.type("string")
.description("the first name")
.build());
datasetFacets
.getDocumentation()
.setDescription(DB_TABLE_META.getDescription().orElse("the dataset documentation"));
final LineageEvent lineageEvent =
LineageEvent.builder()
.producer("testApp_getTableVersions")
.eventType("COMPLETE")
.run(
new LineageEvent.Run(
UUID.randomUUID().toString(), LineageEvent.RunFacet.builder().build()))
.job(LineageEvent.Job.builder().namespace(NAMESPACE_NAME).name(JOB_NAME).build())
.eventTime(ZonedDateTime.now())
.inputs(Collections.emptyList())
.outputs(
Collections.singletonList(
LineageEvent.Dataset.builder()
.namespace(NAMESPACE_NAME)
.name(DB_TABLE_NAME)
.facets(datasetFacets)
.build()))
.build();
final CompletableFuture<Integer> resp =
this.sendLineage(Utils.toJson(lineageEvent))
.thenApply(HttpResponse::statusCode)
.whenComplete(
(val, error) -> {
if (error != null) {
Assertions.fail("Could not complete request");
}
});
assertThat(resp.join()).isEqualTo(201);
datasetFacets.setAdditional(inputFacets);
final LineageEvent readEvent =
LineageEvent.builder()
.producer("testApp_getTableVersions")
.eventType("COMPLETE")
.run(
new LineageEvent.Run(
UUID.randomUUID().toString(), LineageEvent.RunFacet.builder().build()))
.job(LineageEvent.Job.builder().namespace(NAMESPACE_NAME).name("aReadOnlyJob").build())
.eventTime(ZonedDateTime.now())
.inputs(
Collections.singletonList(
LineageEvent.Dataset.builder()
.namespace(NAMESPACE_NAME)
.name(DB_TABLE_NAME)
.facets(datasetFacets)
.build()))
.outputs(Collections.emptyList())
.build();
final CompletableFuture<Integer> readResp =
this.sendLineage(Utils.toJson(readEvent))
.thenApply(HttpResponse::statusCode)
.whenComplete(
(val, error) -> {
if (error != null) {
Assertions.fail("Could not complete request");
}
});
assertThat(readResp.join()).isEqualTo(201);
// update dataset facet to include input and output facets
// save the expected facets as a map for comparison
datasetFacets.setAdditional(
ImmutableMap.<String, Object>builder().putAll(inputFacets).putAll(outputFacets).build());
Map<String, Object> expectedFacetsMap =
Utils.getMapper().convertValue(datasetFacets, new TypeReference<Map<String, Object>>() {});
List<DatasetVersion> versions = client.listDatasetVersions(NAMESPACE_NAME, DB_TABLE_NAME);
assertThat(versions).hasSizeGreaterThanOrEqualTo(2);
versions.forEach(
datasetVersion -> {
assertThat(datasetVersion.getId())
.isEqualTo(new DatasetId(NAMESPACE_NAME, DB_TABLE_NAME));
assertThat(datasetVersion.getName()).isEqualTo(DB_TABLE_NAME);
assertThat(datasetVersion.getCreatedAt()).isNotNull();
assertThat(datasetVersion.getNamespace()).isEqualTo(NAMESPACE_NAME);
assertThat(datasetVersion.getVersion()).isNotNull();
assertThat(datasetVersion.getDescription()).isEqualTo(DB_TABLE_META.getDescription());
});
assertThat(versions.get(0).getFacets()).isEqualTo(expectedFacetsMap);
final DatasetVersion initialDatasetVersion =
client.getDatasetVersion(
NAMESPACE_NAME, DB_TABLE_NAME, versions.get(versions.size() - 1).getVersion());
assertThat(initialDatasetVersion.getPhysicalName()).isEqualTo(DB_TABLE_META.getPhysicalName());
assertThat(initialDatasetVersion.getSourceName()).isEqualTo(DB_TABLE_META.getSourceName());
assertThat(initialDatasetVersion.getFields()).hasSameElementsAs(DB_TABLE_META.getFields());
assertThat(initialDatasetVersion.getTags()).isEqualTo(DB_TABLE_META.getTags());
assertThat(initialDatasetVersion.getCreatedByRun()).isNotPresent();
assertThat(initialDatasetVersion.hasFacets()).isFalse();
final DatasetVersion latestDatasetVersion =
client.getDatasetVersion(NAMESPACE_NAME, DB_TABLE_NAME, versions.get(0).getVersion());
assertThat(latestDatasetVersion.getCreatedByRun()).isPresent();
assertThat(latestDatasetVersion.getCreatedByRun().get().getId())
.isEqualTo(lineageEvent.getRun().getRunId());
assertThat(latestDatasetVersion.hasFacets()).isTrue();
assertThat(latestDatasetVersion.getFacets()).isEqualTo(expectedFacetsMap);
}
@Test
public void testApp_getStreamVersion() {
client.createDataset(NAMESPACE_NAME, STREAM_NAME, STREAM_META);
List<DatasetVersion> versions = client.listDatasetVersions(NAMESPACE_NAME, STREAM_NAME);
assertThat(versions).hasSizeGreaterThan(0);
DatasetVersion datasetVersion =
client.getDatasetVersion(NAMESPACE_NAME, STREAM_NAME, versions.get(0).getVersion());
assertThat(datasetVersion).isInstanceOf(StreamVersion.class);
assertThat(datasetVersion.getId()).isEqualTo(new DatasetId(NAMESPACE_NAME, STREAM_NAME));
assertThat(datasetVersion.getName()).isEqualTo(STREAM_NAME);
assertThat(datasetVersion.getCreatedAt()).isNotNull();
assertThat(datasetVersion.getNamespace()).isEqualTo(NAMESPACE_NAME);
assertThat(datasetVersion.getVersion()).isNotNull();
assertThat(datasetVersion.getPhysicalName()).isEqualTo(STREAM_META.getPhysicalName());
assertThat(datasetVersion.getSourceName()).isEqualTo(STREAM_META.getSourceName());
assertThat(datasetVersion.getDescription()).isEqualTo(STREAM_META.getDescription());
assertThat(datasetVersion.getFields()).hasSameElementsAs(STREAM_META.getFields());
assertThat(datasetVersion.getTags()).isEqualTo(STREAM_META.getTags());
assertThat(((StreamVersion) datasetVersion).getSchemaLocation())
.isEqualTo(STREAM_META.getSchemaLocation());
assertThat(datasetVersion.getCreatedByRun()).isEqualTo(Optional.empty());
}
@Test
public void testApp_getDBTableVersionWithRun() {
DbTableMeta DB_TABLE_META =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.description(DB_TABLE_DESCRIPTION)
.build();
client.createDataset(NAMESPACE_NAME, "table1", DB_TABLE_META);
final JobMeta jobMeta =
JobMeta.builder()
.type(JOB_TYPE)
.inputs(ImmutableSet.of())
.outputs(NAMESPACE_NAME, "table1")
.location(JOB_LOCATION)
.context(JOB_CONTEXT)
.description(JOB_DESCRIPTION)
.build();
client.createJob(NAMESPACE_NAME, JOB_NAME, jobMeta);
final RunMeta runMeta = RunMeta.builder().build();
final Run run = client.createRun(NAMESPACE_NAME, JOB_NAME, runMeta);
DbTableMeta DB_TABLE_META_WITH_RUN =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.description(DB_TABLE_DESCRIPTION)
.runId(run.getId())
.build();
client.createDataset(NAMESPACE_NAME, "table1", DB_TABLE_META_WITH_RUN);
List<DatasetVersion> versions = client.listDatasetVersions(NAMESPACE_NAME, "table1");
assertThat(versions).hasSizeGreaterThan(1);
DatasetVersion version = versions.get(0); // most recent dataset version
assertThat(version.getCreatedByRun()).isNotEqualTo(Optional.empty());
Run createdRun = version.getCreatedByRun().get();
assertThat(createdRun.getCreatedAt()).isEqualTo(run.getCreatedAt());
assertThat(createdRun.getId()).isEqualTo(run.getId());
assertThat(createdRun.getUpdatedAt()).isEqualTo(run.getUpdatedAt());
assertThat(createdRun.getDurationMs()).isEqualTo(run.getDurationMs());
assertThat(createdRun.getState()).isEqualTo(run.getState());
assertThat(createdRun.getArgs()).isEqualTo(run.getArgs());
assertThat(createdRun.getNominalStartTime()).isEqualTo(run.getNominalStartTime());
assertThat(createdRun.getNominalEndTime()).isEqualTo(run.getNominalEndTime());
}
@Test
public void testApp_notExistsDatasetName() {
Assertions.assertThrows(
Exception.class, () -> client.getDataset(NAMESPACE_NAME, "not-existing"));
}
@Test
public void testApp_notExistsDatasetVersionName() {
Assertions.assertThrows(
Exception.class,
() ->
client.getDatasetVersion(NAMESPACE_NAME, "not-existing", UUID.randomUUID().toString()));
}
@Test
public void testApp_notExistsNamespace() {
Assertions.assertThrows(
Exception.class, () -> client.getDataset("non-existing", "not-existing"));
}
@Test
public void testApp_notExistsRun() {
DbTableMeta RUN_NOT_EXISTS =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.description(DB_TABLE_DESCRIPTION)
.runId(UUID.randomUUID().toString())
.build();
Assertions.assertThrows(
Exception.class, () -> client.createDataset(NAMESPACE_NAME, DB_TABLE_NAME, RUN_NOT_EXISTS));
}
@Test
public void testApp_notExistsSource() {
DbTableMeta RUN_NOT_EXISTS =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName("not-exists")
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.description(DB_TABLE_DESCRIPTION)
.runId(UUID.randomUUID().toString())
.build();
Assertions.assertThrows(
Exception.class, () -> client.createDataset(NAMESPACE_NAME, DB_TABLE_NAME, RUN_NOT_EXISTS));
}
@Test
public void testApp_upsertDescription() {
DbTableMeta DESCRIPTION =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.description(DB_TABLE_DESCRIPTION)
.build();
Dataset dataset = client.createDataset(NAMESPACE_NAME, DB_TABLE_NAME, DESCRIPTION);
assertThat(dataset.getDescription()).isEqualTo(DESCRIPTION.getDescription());
DbTableMeta WO_DESCRIPTION =
DbTableMeta.builder()
.physicalName(DB_TABLE_PHYSICAL_NAME)
.sourceName(DB_TABLE_SOURCE_NAME)
.fields(DB_TABLE_FIELDS)
.tags(DB_TABLE_TAGS)
.build();
Dataset dataset2 = client.createDataset(NAMESPACE_NAME, DB_TABLE_NAME, WO_DESCRIPTION);
// Description stays
assertThat(dataset2.getDescription()).isEqualTo(DESCRIPTION.getDescription());
}
}
| 6,587 |
546 | #ifndef MSNHYOLOINFO_H
#define MSNHYOLOINFO_H
#include <vector>
#include "Msnhnet/utils/MsnhTypes.h"
#include "Msnhnet/utils/MsnhException.h"
namespace Msnhnet
{
struct YoloInfo
{
YoloInfo(const int& outHeight, const int& outWidth, const int& outChannel)
:outHeight(outHeight),outWidth(outWidth),outChannel(outChannel){}
int outHeight = 0;
int outWidth = 0;
int outChannel = 0;
int getOutputNum(){return outChannel*outWidth*outHeight;}
};
struct YoloBox
{
Box::XYWHBox xywhBox;
float conf = 0;
float bestClsConf = 0;
int bestClsIdx = 0;
float angle = 0;
float regAngle = 0;
std::vector<float> classesVal;
std::vector<float> angleSplits;
};
enum YoloType
{
YoloV3=30,
YoloV3_ANGLE,
YoloV3_GAUSSIAN,
YoloV4 = 40,
YoloV4_ANGLE,
YoloV5 = 50,
YoloV5_ANGLE,
};
inline YoloType getYoloTypeFromStr(const std::string &str)
{
if(str == "yolov3")
{
return YoloType::YoloV3;
}
else if(str == "yolov3Angle")
{
return YoloType::YoloV3_ANGLE;
}
else if(str == "yolov3Gaussian")
{
return YoloType::YoloV3_GAUSSIAN;
}
else if(str == "yolov4")
{
return YoloType::YoloV4;
}
else if(str == "yolov4Angle")
{
return YoloType::YoloV4_ANGLE;
}
else if(str == "yolov5")
{
return YoloType::YoloV5;
}
else if(str == "yolov5Angle")
{
return YoloType::YoloV5_ANGLE;
}
else
{
throw Msnhnet::Exception(1,str + " yolo type is not supported!",__FILE__,__LINE__,__FUNCTION__);
}
}
inline std::string getStrFromYoloType(const YoloType &type)
{
switch (type) {
case YoloV3:
return "yolov3";
case YoloV3_ANGLE:
return "yolov3Angle";
case YoloV3_GAUSSIAN:
return "yolov3Gaussian";
case YoloV4:
return "yolov4";
case YoloV4_ANGLE:
return "yolov4Angle";
case YoloV5:
return "yolov5";
case YoloV5_ANGLE:
return "yolov5Angle";
default:
throw Msnhnet::Exception(1,std::to_string((int)type) + " yolo type is not supported!",__FILE__,__LINE__,__FUNCTION__);
}
}
}
#endif
| 1,169 |
3,562 | <reponame>kaiker19/incubator-doris<filename>be/src/glibc-compatibility/FastMemcpy.c
//=====================================================================
//
// FastMemcpy.c - <EMAIL>, 2015
//
// feature:
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
//
//=====================================================================
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#if (defined(_WIN32) || defined(WIN32))
#include <windows.h>
#include <mmsystem.h>
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
#elif defined(__unix)
#include <sys/time.h>
#include <unistd.h>
#else
#error it can only be compiled under windows or unix
#endif
#include "FastMemcpy.h"
unsigned int gettime()
{
#if (defined(_WIN32) || defined(WIN32))
return timeGetTime();
#else
static struct timezone tz={ 0,0 };
struct timeval time;
gettimeofday(&time,&tz);
return (time.tv_sec * 1000 + time.tv_usec / 1000);
#endif
}
void sleepms(unsigned int millisec)
{
#if defined(_WIN32) || defined(WIN32)
Sleep(millisec);
#else
usleep(millisec * 1000);
#endif
}
void benchmark(int dstalign, int srcalign, size_t size, int times)
{
char *DATA1 = (char*)malloc(size + 64);
char *DATA2 = (char*)malloc(size + 64);
size_t LINEAR1 = ((size_t)DATA1);
size_t LINEAR2 = ((size_t)DATA2);
char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1);
char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2);
char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1);
char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3);
unsigned int t1, t2;
int k;
sleepms(100);
t1 = gettime();
for (k = times; k > 0; k--) {
memcpy(dst, src, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (k = times; k > 0; k--) {
memcpy_fast(dst, src, size);
}
t2 = gettime() - t2;
free(DATA1);
free(DATA2);
printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n",
dstalign? "aligned" : "unalign",
srcalign? "aligned" : "unalign", (int)t2, (int)t1);
}
void bench(int copysize, int times)
{
printf("benchmark(size=%d bytes, times=%d):\n", copysize, times);
benchmark(1, 1, copysize, times);
benchmark(1, 0, copysize, times);
benchmark(0, 1, copysize, times);
benchmark(0, 0, copysize, times);
printf("\n");
}
void random_bench(int maxsize, int times)
{
static char A[11 * 1024 * 1024 + 2];
static char B[11 * 1024 * 1024 + 2];
static int random_offsets[0x10000];
static int random_sizes[0x8000];
unsigned int i, p1, p2;
unsigned int t1, t2;
for (i = 0; i < 0x10000; i++) { // generate random offsets
random_offsets[i] = rand() % (10 * 1024 * 1024 + 1);
}
for (i = 0; i < 0x8000; i++) { // generate random sizes
random_sizes[i] = 1 + rand() % maxsize;
}
sleepms(100);
t1 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy(A + offset1, B + offset2, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy_fast(A + offset1, B + offset2, size);
}
t2 = gettime() - t2;
printf("benchmark random access:\n");
printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1);
}
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
int main(void)
{
bench(32, 0x1000000);
bench(64, 0x1000000);
bench(512, 0x800000);
bench(1024, 0x400000);
bench(4096, 0x80000);
bench(8192, 0x40000);
bench(1024 * 1024 * 1, 0x800);
bench(1024 * 1024 * 4, 0x200);
bench(1024 * 1024 * 8, 0x100);
random_bench(2048, 8000000);
return 0;
}
/*
benchmark(size=32 bytes, times=16777216):
result(dst aligned, src aligned): memcpy_fast=78ms memcpy=260 ms
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=250 ms
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=266 ms
result(dst unalign, src unalign): memcpy_fast=78ms memcpy=234 ms
benchmark(size=64 bytes, times=16777216):
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=281 ms
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=328 ms
result(dst unalign, src aligned): memcpy_fast=109ms memcpy=343 ms
result(dst unalign, src unalign): memcpy_fast=93ms memcpy=344 ms
benchmark(size=512 bytes, times=8388608):
result(dst aligned, src aligned): memcpy_fast=125ms memcpy=218 ms
result(dst aligned, src unalign): memcpy_fast=156ms memcpy=484 ms
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=546 ms
result(dst unalign, src unalign): memcpy_fast=172ms memcpy=515 ms
benchmark(size=1024 bytes, times=4194304):
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=172 ms
result(dst aligned, src unalign): memcpy_fast=187ms memcpy=453 ms
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=437 ms
result(dst unalign, src unalign): memcpy_fast=156ms memcpy=452 ms
benchmark(size=4096 bytes, times=524288):
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=202 ms
result(dst unalign, src aligned): memcpy_fast=94ms memcpy=203 ms
result(dst unalign, src unalign): memcpy_fast=110ms memcpy=218 ms
benchmark(size=8192 bytes, times=262144):
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=202 ms
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=203 ms
result(dst unalign, src unalign): memcpy_fast=94ms memcpy=203 ms
benchmark(size=1048576 bytes, times=2048):
result(dst aligned, src aligned): memcpy_fast=203ms memcpy=191 ms
result(dst aligned, src unalign): memcpy_fast=219ms memcpy=281 ms
result(dst unalign, src aligned): memcpy_fast=218ms memcpy=328 ms
result(dst unalign, src unalign): memcpy_fast=218ms memcpy=312 ms
benchmark(size=4194304 bytes, times=512):
result(dst aligned, src aligned): memcpy_fast=312ms memcpy=406 ms
result(dst aligned, src unalign): memcpy_fast=296ms memcpy=421 ms
result(dst unalign, src aligned): memcpy_fast=312ms memcpy=468 ms
result(dst unalign, src unalign): memcpy_fast=297ms memcpy=452 ms
benchmark(size=8388608 bytes, times=256):
result(dst aligned, src aligned): memcpy_fast=281ms memcpy=452 ms
result(dst aligned, src unalign): memcpy_fast=280ms memcpy=468 ms
result(dst unalign, src aligned): memcpy_fast=298ms memcpy=514 ms
result(dst unalign, src unalign): memcpy_fast=344ms memcpy=472 ms
benchmark random access:
memcpy_fast=515ms memcpy=1014ms
*/
| 2,695 |
2,151 | [
{
"cmd": [],
"name": "isolated_tests",
"~followup_annotations": [
"@@@STEP_LOG_LINE@details@isolated_tests: {'base_unittests': 'ffffffffffffffffffffffffffffffffffffffff'}@@@",
"@@@STEP_LOG_END@details@@@"
]
},
{
"name": "$result",
"recipe_result": null,
"status_code": 0
}
] | 152 |
852 | #include "RecoLuminosity/LumiProducer/interface/LumiCorrectionParamRcd.h"
#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h"
EVENTSETUP_RECORD_REG(LumiCorrectionParamRcd);
| 70 |
428 | /*
* Copyright (C) 2015 The Libphonenumber Authors
* Copyright (C) 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.michaelrocks.libphonenumber.android;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import io.michaelrocks.libphonenumber.android.Phonemetadata.PhoneMetadata;
/**
* Implementation of {@link MetadataSource} that reads from multiple resource files.
*/
final class MultiFileMetadataSourceImpl implements MetadataSource {
// The prefix of the binary files containing phone number metadata for different regions.
// This enables us to set up with different metadata, such as for testing.
private final String phoneNumberMetadataFilePrefix;
// The prefix of the metadata files from which alternate format data is loaded.
private final String alternateFormatsFilePrefix;
// The prefix of the metadata files from which short number data is loaded.
private final String shortNumberFilePrefix;
// The {@link MetadataManager} used to load metadata.
private final MetadataManager metadataManager;
// A mapping from a region code to the phone number metadata for that region code.
// Unlike the mappings for alternate formats and short number metadata, the phone number metadata
// is loaded from a non-statically determined file prefix; therefore this map is bound to the
// instance and not static.
private final ConcurrentHashMap<String, PhoneMetadata> geographicalRegions =
new ConcurrentHashMap<String, PhoneMetadata>();
// A mapping from a country calling code for a non-geographical entity to the phone number
// metadata for that country calling code. Examples of the country calling codes include 800
// (International Toll Free Service) and 808 (International Shared Cost Service).
// Unlike the mappings for alternate formats and short number metadata, the phone number metadata
// is loaded from a non-statically determined file prefix; therefore this map is bound to the
// instance and not static.
private final ConcurrentHashMap<Integer, PhoneMetadata> nonGeographicalRegions =
new ConcurrentHashMap<Integer, PhoneMetadata>();
MultiFileMetadataSourceImpl(String phoneNumberMetadataFilePrefix, String alternateFormatsFilePrefix,
String shortNumberFilePrefix, MetadataLoader metadataLoader) {
this.phoneNumberMetadataFilePrefix = phoneNumberMetadataFilePrefix;
this.alternateFormatsFilePrefix = alternateFormatsFilePrefix;
this.shortNumberFilePrefix = shortNumberFilePrefix;
this.metadataManager = new MetadataManager(metadataLoader);
}
// It is assumed that metadataLoader is not null. Checks should happen before passing it in here.
MultiFileMetadataSourceImpl(MetadataLoader metadataLoader) {
this(MetadataManager.MULTI_FILE_PHONE_NUMBER_METADATA_FILE_PREFIX, MetadataManager.ALTERNATE_FORMATS_FILE_PREFIX,
MetadataManager.SHORT_NUMBER_METADATA_FILE_PREFIX, metadataLoader);
}
@Override
public PhoneMetadata getMetadataForRegion(String regionCode) {
return metadataManager.getMetadataFromMultiFilePrefix(regionCode, geographicalRegions,
phoneNumberMetadataFilePrefix);
}
@Override
public PhoneMetadata getMetadataForNonGeographicalRegion(int countryCallingCode) {
if (!isNonGeographical(countryCallingCode)) {
// The given country calling code was for a geographical region.
return null;
}
return metadataManager.getMetadataFromMultiFilePrefix(countryCallingCode, nonGeographicalRegions,
phoneNumberMetadataFilePrefix);
}
@Override
public PhoneMetadata getAlternateFormatsForCountry(final int countryCallingCode) {
return metadataManager.getAlternateFormatsForCountry(countryCallingCode, alternateFormatsFilePrefix);
}
@Override
public PhoneMetadata getShortNumberMetadataForRegion(final String regionCode) {
return metadataManager.getShortNumberMetadataForRegion(regionCode, shortNumberFilePrefix);
}
// A country calling code is non-geographical if it only maps to the non-geographical region code,
// i.e. "001".
private boolean isNonGeographical(int countryCallingCode) {
List<String> regionCodes =
CountryCodeToRegionCodeMap.getCountryCodeToRegionCodeMap().get(countryCallingCode);
return (regionCodes.size() == 1
&& PhoneNumberUtil.REGION_CODE_FOR_NON_GEO_ENTITY.equals(regionCodes.get(0)));
}
}
| 1,341 |
2,643 | #ifndef MM_CHANNEL_LIMIT_POLICY
#define MM_CHANNEL_LIMIT_POLICY
typedef enum {
MM_CHANNEL_UNLIMITED,
MM_CHANNEL_LIMIT_HARD,
MM_CHANNEL_LIMIT_SOFT,
} mm_channel_limit_policy_t;
#endif /* MM_CHANNEL_LIMIT_POLICY */
| 115 |
1,473 | /*
* Copyright 2018 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.plugin.activemq.client.util;
import org.apache.activemq.ActiveMQSession;
import org.apache.activemq.advisory.ProducerEvent;
import org.apache.activemq.advisory.ProducerEventSource;
import org.apache.activemq.advisory.ProducerListener;
import javax.jms.Destination;
import javax.jms.MessageProducer;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* @author <NAME>
*/
public class MessageProducerBuilder {
private final ActiveMQSession session;
private final Destination destination;
private boolean waitTillStarted = false;
public MessageProducerBuilder(ActiveMQSession session, Destination destination) {
this.session = session;
this.destination = destination;
}
public MessageProducerBuilder waitTillStarted() {
this.waitTillStarted = true;
return this;
}
public MessageProducer build() throws Exception {
if (waitTillStarted) {
ProducerEventSource producerEventSource = new ProducerEventSource(session.getConnection(), destination);
final CountDownLatch latch = new CountDownLatch(1);
producerEventSource.setProducerListener(new ProducerListener() {
@Override
public void onProducerEvent(ProducerEvent event) {
latch.countDown();
}
});
MessageProducer producer = null;
try {
producerEventSource.start();
producer = this.session.createProducer(this.destination);
if (!latch.await(5L, TimeUnit.SECONDS)) {
throw new TimeoutException("Timed out waiting for MessageProducer start event.");
}
} finally {
producerEventSource.stop();
}
return producer;
} else {
return this.session.createProducer(this.destination);
}
}
}
| 971 |
2,816 | /*!
* mpi.h - multi-precision integers for libtorsion
* Copyright (c) 2020, <NAME> (MIT License).
* https://github.com/bcoin-org/libtorsion
*
* A from-scratch reimplementation of GMP.
*/
#ifndef _TORSION_MPI_H
#define _TORSION_MPI_H
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
#include <torsion/common.h>
/*
* Symbol Aliases
*/
#define mp_alloc_limbs __torsion_mp_alloc_limbs
#define mp_realloc_limbs __torsion_mp_realloc_limbs
#define mp_free_limbs __torsion_mp_free_limbs
#define mpn_zero __torsion_mpn_zero
#define mpn_cleanse __torsion_mpn_cleanse
#define mpn_set_1 __torsion_mpn_set_1
#define mpn_copyi __torsion_mpn_copyi
#define mpn_copyd __torsion_mpn_copyd
#define mpn_zero_p __torsion_mpn_zero_p
#define mpn_cmp __torsion_mpn_cmp
#define mpn_add_1 __torsion_mpn_add_1
#define mpn_add_n __torsion_mpn_add_n
#define mpn_add __torsion_mpn_add
#define mpn_sub_1 __torsion_mpn_sub_1
#define mpn_sub_n __torsion_mpn_sub_n
#define mpn_sub __torsion_mpn_sub
#define mpn_mul_1 __torsion_mpn_mul_1
#define mpn_addmul_1 __torsion_mpn_addmul_1
#define mpn_submul_1 __torsion_mpn_submul_1
#define mpn_mul_n __torsion_mpn_mul_n
#define mpn_mul __torsion_mpn_mul
#define mpn_sqr __torsion_mpn_sqr
#define mpn_mulshift __torsion_mpn_mulshift
#define mpn_reduce_weak __torsion_mpn_reduce_weak
#define mpn_barrett __torsion_mpn_barrett
#define mpn_reduce __torsion_mpn_reduce
#define mpn_mont __torsion_mpn_mont
#define mpn_montmul __torsion_mpn_montmul
#define mpn_montmul_var __torsion_mpn_montmul_var
#define mpn_divmod_1 __torsion_mpn_divmod_1
#define mpn_div_1 __torsion_mpn_div_1
#define mpn_mod_1 __torsion_mpn_mod_1
#define mpn_divmod __torsion_mpn_divmod
#define mpn_div __torsion_mpn_div
#define mpn_mod __torsion_mpn_mod
#define mpn_divexact_1 __torsion_mpn_divexact_1
#define mpn_divexact __torsion_mpn_divexact
#define mpn_divround_1 __torsion_mpn_divround_1
#define mpn_divround __torsion_mpn_divround
#define mpn_and_n __torsion_mpn_and_n
#define mpn_ior_n __torsion_mpn_ior_n
#define mpn_xor_n __torsion_mpn_xor_n
#define mpn_andn_n __torsion_mpn_andn_n
#define mpn_iorn_n __torsion_mpn_iorn_n
#define mpn_nand_n __torsion_mpn_nand_n
#define mpn_nior_n __torsion_mpn_nior_n
#define mpn_nxor_n __torsion_mpn_nxor_n
#define mpn_com __torsion_mpn_com
#define mpn_lshift __torsion_mpn_lshift
#define mpn_rshift __torsion_mpn_rshift
#define mpn_getbit __torsion_mpn_getbit
#define mpn_getbits __torsion_mpn_getbits
#define mpn_tstbit __torsion_mpn_tstbit
#define mpn_setbit __torsion_mpn_setbit
#define mpn_clrbit __torsion_mpn_clrbit
#define mpn_combit __torsion_mpn_combit
#define mpn_scan0 __torsion_mpn_scan0
#define mpn_scan1 __torsion_mpn_scan1
#define mpn_popcount __torsion_mpn_popcount
#define mpn_hamdist __torsion_mpn_hamdist
#define mpn_mask __torsion_mask
#define mpn_neg __torsion_mpn_neg
#define mpn_gcd __torsion_mpn_gcd
#define mpn_gcd_1 __torsion_mpn_gcd_1
#define mpn_invert __torsion_mpn_invert
#define mpn_invert_n __torsion_mpn_invert_n
#define mpn_jacobi __torsion_mpn_jacobi
#define mpn_jacobi_n __torsion_mpn_jacobi_n
#define mpn_powm __torsion_mpn_powm
#define mpn_sec_powm __torsion_mpn_sec_powm
#define mpn_strip __torsion_mpn_strip
#define mpn_odd_p __torsion_mpn_odd_p
#define mpn_even_p __torsion_mpn_even_p
#define mpn_ctz __torsion_mpn_ctz
#define mpn_bitlen __torsion_mpn_bitlen
#define mpn_bytelen __torsion_mpn_bytelen
#define mpn_sizeinbase __torsion_mpn_sizeinbase
#define mpn_select __torsion_mpn_select
#define mpn_select_zero __torsion_mpn_select_zero
#define mpn_sec_zero_p __torsion_mpn_sec_zero_p
#define mpn_sec_equal_p __torsion_mpn_sec_equal_p
#define mpn_sec_lt_p __torsion_mpn_sec_lt_p
#define mpn_sec_lte_p __torsion_mpn_sec_lte_p
#define mpn_sec_gt_p __torsion_mpn_sec_gt_p
#define mpn_sec_gte_p __torsion_mpn_sec_gte_p
#define mpn_sec_cmp __torsion_mpn_sec_cmp
#define mpn_import __torsion_mpn_import
#define mpn_export __torsion_mpn_export
#define mpn_set_str __torsion_mpn_set_str
#define mpn_get_str __torsion_mpn_get_str
#define mpn_print __torsion_mpn_print
#define mpn_random __torsion_mpn_random
#define mpz_init __torsion_mpz_init
#define mpz_init2 __torsion_mpz_init2
#define mpz_init_set __torsion_mpz_init_set
#define mpz_init_set_ui __torsion_mpz_init_set_ui
#define mpz_init_set_si __torsion_mpz_init_set_si
#define mpz_init_set_str __torsion_mpz_init_set_str
#define mpz_clear __torsion_mpz_clear
#define mpz_cleanse __torsion_mpz_cleanse
#define mpz_set __torsion_mpz_set
#define mpz_roset __torsion_mpz_roset
#define mpz_roinit_n __torsion_mpz_roinit_n
#define mpz_set_ui __torsion_mpz_set_ui
#define mpz_set_si __torsion_mpz_set_si
#define mpz_get_ui __torsion_mpz_get_ui
#define mpz_get_si __torsion_mpz_get_si
#define mpz_sgn __torsion_mpz_sgn
#define mpz_cmp __torsion_mpz_cmp
#define mpz_cmp_ui __torsion_mpz_cmp_ui
#define mpz_cmp_si __torsion_mpz_cmp_si
#define mpz_cmpabs __torsion_mpz_cmpabs
#define mpz_cmpabs_ui __torsion_mpz_cmpabs_ui
#define mpz_cmpabs_si __torsion_mpz_cmpabs_si
#define mpz_add __torsion_mpz_add
#define mpz_add_ui __torsion_mpz_add_ui
#define mpz_add_si __torsion_mpz_add_si
#define mpz_sub __torsion_mpz_sub
#define mpz_sub_ui __torsion_mpz_sub_ui
#define mpz_sub_si __torsion_mpz_sub_si
#define mpz_ui_sub __torsion_mpz_ui_sub
#define mpz_si_sub __torsion_mpz_si_sub
#define mpz_mul __torsion_mpz_mul
#define mpz_mul_ui __torsion_mpz_mul_ui
#define mpz_mul_si __torsion_mpz_mul_si
#define mpz_sqr __torsion_mpz_sqr
#define mpz_addmul __torsion_mpz_addmul
#define mpz_addmul_ui __torsion_mpz_addmul_ui
#define mpz_addmul_si __torsion_mpz_addmul_si
#define mpz_submul __torsion_mpz_submul
#define mpz_submul_ui __torsion_mpz_submul_ui
#define mpz_submul_si __torsion_mpz_submul_si
#define mpz_mulshift __torsion_mpz_mulshift
#define mpz_quorem __torsion_mpz_quorem
#define mpz_quo __torsion_mpz_quo
#define mpz_rem __torsion_mpz_rem
#define mpz_quo_ui __torsion_mpz_quo_ui
#define mpz_rem_ui __torsion_mpz_rem_ui
#define mpz_quo_si __torsion_mpz_quo_si
#define mpz_rem_si __torsion_mpz_rem_si
#define mpz_divmod __torsion_mpz_divmod
#define mpz_div __torsion_mpz_div
#define mpz_mod __torsion_mpz_mod
#define mpz_div_ui __torsion_mpz_div_ui
#define mpz_mod_ui __torsion_mpz_mod_ui
#define mpz_div_si __torsion_mpz_div_si
#define mpz_mod_si __torsion_mpz_mod_si
#define mpz_divexact __torsion_mpz_divexact
#define mpz_divexact_ui __torsion_mpz_divexact_ui
#define mpz_divexact_si __torsion_mpz_divexact_si
#define mpz_divround __torsion_mpz_divround
#define mpz_divround_ui __torsion_mpz_divround_ui
#define mpz_divround_si __torsion_mpz_divround_si
#define mpz_divisible_p __torsion_mpz_divisible_p
#define mpz_divisible_ui_p __torsion_mpz_divisible_ui_p
#define mpz_divisible_2exp_p __torsion_mpz_divisible_2exp_p
#define mpz_congruent_p __torsion_mpz_congruent_p
#define mpz_congruent_ui_p __torsion_mpz_congruent_ui_p
#define mpz_congruent_2exp_p __torsion_mpz_congruent_2exp_p
#define mpz_pow_ui __torsion_mpz_pow_ui
#define mpz_ui_pow_ui __torsion_mpz_ui_pow_ui
#define mpz_rootrem __torsion_mpz_rootrem
#define mpz_root __torsion_mpz_root
#define mpz_perfect_power_p __torsion_mpz_perfect_power_p
#define mpz_sqrtrem __torsion_mpz_sqrtrem
#define mpz_sqrt __torsion_mpz_sqrt
#define mpz_perfect_square_p __torsion_mpz_perfect_square_p
#define mpz_and __torsion_mpz_and
#define mpz_and_ui __torsion_mpz_and_ui
#define mpz_and_si __torsion_mpz_and_si
#define mpz_ior __torsion_mpz_ior
#define mpz_ior_ui __torsion_mpz_ior_ui
#define mpz_ior_si __torsion_mpz_ior_si
#define mpz_xor __torsion_mpz_xor
#define mpz_xor_ui __torsion_mpz_xor_ui
#define mpz_xor_si __torsion_mpz_xor_si
#define mpz_com __torsion_mpz_com
#define mpz_mul_2exp __torsion_mpz_mul_2exp
#define mpz_quo_2exp __torsion_mpz_quo_2exp
#define mpz_rem_2exp __torsion_mpz_rem_2exp
#define mpz_div_2exp __torsion_mpz_div_2exp
#define mpz_mod_2exp __torsion_mpz_mod_2exp
#define mpz_tstbit __torsion_mpz_tstbit
#define mpz_setbit __torsion_mpz_setbit
#define mpz_clrbit __torsion_mpz_clrbit
#define mpz_combit __torsion_mpz_combit
#define mpz_scan0 __torsion_mpz_scan0
#define mpz_scan1 __torsion_mpz_scan1
#define mpz_popcount __torsion_mpz_popcount
#define mpz_hamdist __torsion_mpz_hamdist
#define mpz_abs __torsion_mpz_abs
#define mpz_neg __torsion_mpz_neg
#define mpz_gcd __torsion_mpz_gcd
#define mpz_gcd_ui __torsion_mpz_gcd_ui
#define mpz_lcm __torsion_mpz_lcm
#define mpz_lcm_ui __torsion_mpz_lcm_ui
#define mpz_gcdext __torsion_mpz_gcdext
#define mpz_invert __torsion_mpz_invert
#define mpz_legendre __torsion_mpz_legendre
#define mpz_jacobi __torsion_mpz_jacobi
#define mpz_kronecker __torsion_mpz_kronecker
#define mpz_kronecker_ui __torsion_mpz_kronecker_ui
#define mpz_kronecker_si __torsion_mpz_kronecker_si
#define mpz_ui_kronecker __torsion_mpz_ui_kronecker
#define mpz_si_kronecker __torsion_mpz_si_kronecker
#define mpz_powm __torsion_mpz_powm
#define mpz_powm_ui __torsion_mpz_powm_ui
#define mpz_powm_sec __torsion_mpz_powm_sec
#define mpz_sqrtm __torsion_mpz_sqrtm
#define mpz_sqrtpq __torsion_mpz_sqrtpq
#define mpz_remove __torsion_mpz_remove
#define mpz_fac_ui __torsion_mpz_fac_ui
#define mpz_2fac_ui __torsion_mpz_2fac_ui
#define mpz_mfac_uiui __torsion_mpz_mfac_uiui
#define mpz_primorial_ui __torsion_mpz_primorial_ui
#define mpz_bin_ui __torsion_mpz_bin_ui
#define mpz_bin_uiui __torsion_mpz_bin_uiui
#define mpz_fib_ui __torsion_mpz_fib_ui
#define mpz_fib2_ui __torsion_mpz_fib2_ui
#define mpz_lucnum_ui __torsion_mpz_lucnum_ui
#define mpz_lucnum2_ui __torsion_mpz_lucnum2_ui
#define mpz_mr_prime_p __torsion_mpz_mr_prime_p
#define mpz_lucas_prime_p __torsion_mpz_lucas_prime_p
#define mpz_probab_prime_p __torsion_mpz_probab_prime_p
#define mpz_randprime __torsion_mpz_randprime
#define mpz_nextprime __torsion_mpz_nextprime
#define mpz_findprime __torsion_mpz_findprime
#define mpz_fits_ui_p __torsion_mpz_fits_ui_p
#define mpz_fits_si_p __torsion_mpz_fits_si_p
#define mpz_odd_p __torsion_mpz_odd_p
#define mpz_even_p __torsion_mpz_even_p
#define mpz_ctz __torsion_mpz_ctz
#define mpz_bitlen __torsion_mpz_bitlen
#define mpz_bytelen __torsion_mpz_bytelen
#define mpz_sizeinbase __torsion_mpz_sizeinbase
#define mpz_swap __torsion_mpz_swap
#define _mpz_realloc __torsion__mpz_realloc
#define mpz_realloc2 __torsion_mpz_realloc2
#define mpz_getlimbn __torsion_mpz_getlimbn
#define mpz_size __torsion_mpz_size
#define mpz_limbs_read __torsion_mpz_limbs_read
#define mpz_limbs_write __torsion_mpz_limbs_write
#define mpz_limbs_modify __torsion_mpz_limbs_modify
#define mpz_limbs_finish __torsion_mpz_limbs_finish
#define mpz_import __torsion_mpz_import
#define mpz_export __torsion_mpz_export
#define mpz_set_str __torsion_mpz_set_str
#define mpz_get_str __torsion_mpz_get_str
#define mpz_print __torsion_mpz_print
#define mpz_urandomb __torsion_mpz_urandomb
#define mpz_urandomm __torsion_mpz_urandomm
#define test_mpi_internal __torsion_test_mpi_internal
#define bench_mpi_internal __torsion_bench_mpi_internal
/*
* Types
*/
#if defined(UINTPTR_MAX) && defined(UINT64_MAX)
/* Check size of uintptr_t if available. */
# if UINTPTR_MAX == UINT64_MAX
# define MP_HAVE_64BIT
# endif
#endif
#if defined(MP_HAVE_64BIT)
typedef uint64_t mp_limb_t;
typedef int64_t mp_long_t;
# define MP_LIMB_BITS 64
# define MP_LIMB_BYTES 8
# define MP_LIMB_C UINT64_C
# define MP_LIMB_MAX UINT64_MAX
# define MP_LONG_C INT64_C
# define MP_LONG_MIN INT64_MIN
# define MP_LONG_MAX INT64_MAX
#else
typedef uint32_t mp_limb_t;
typedef int32_t mp_long_t;
# define MP_LIMB_BITS 32
# define MP_LIMB_BYTES 4
# define MP_LIMB_C UINT32_C
# define MP_LIMB_MAX UINT32_MAX
# define MP_LONG_C INT32_C
# define MP_LONG_MIN INT32_MIN
# define MP_LONG_MAX INT32_MAX
#endif
typedef long mp_size_t;
typedef long mp_bits_t;
#define MP_SIZE_C(x) x ## L
#define MP_SIZE_MIN LONG_MIN
#define MP_SIZE_MAX LONG_MAX
#define MP_BITS_C(x) x ## L
#define MP_BITS_MIN LONG_MIN
#define MP_BITS_MAX LONG_MAX
typedef mp_bits_t mp_bitcnt_t; /* compat */
#define MP_LIMB_HI (MP_LIMB_C(1) << (MP_LIMB_BITS - 1))
#define MP_MASK(bits) ((MP_LIMB_C(1) << (bits)) - 1)
#define MP_LOW_BITS (MP_LIMB_BITS / 2)
#define MP_LOW_MASK (MP_LIMB_MAX >> MP_LOW_BITS)
struct mpz_s {
mp_limb_t *limbs;
mp_size_t alloc;
mp_size_t size;
};
typedef struct mpz_s mpz_t[1];
typedef int mp_puts_f(const char *s);
typedef void mp_rng_f(void *out, size_t size, void *arg);
typedef void mp_start_f(uint64_t *start, const char *name);
typedef void mp_end_f(uint64_t *start, uint64_t ops);
/*
* Definitions
*/
#define MP_SLIDE_WIDTH 4
#define MP_SLIDE_SIZE (1 << (MP_SLIDE_WIDTH - 1))
#define MP_FIXED_WIDTH 4
#define MP_FIXED_SIZE (1 << MP_FIXED_WIDTH)
/*
* Itches
*/
#define MPN_SQR_ITCH(n) (2 * (n))
#define MPN_MULSHIFT_ITCH(n) (2 * (n))
#define MPN_REDUCE_WEAK_ITCH(n) (n)
#define MPN_BARRETT_ITCH(shift) ((shift) + 1)
#define MPN_REDUCE_ITCH(n, shift) (1 + (shift) + ((shift) - (n) + 1))
#define MPN_MONT_ITCH(n) (2 * (n) + 1)
#define MPN_MONTMUL_ITCH(n) (2 * (n))
#define MPN_GCD_ITCH(xn, yn) ((xn) + (yn))
#define MPN_GCD_1_ITCH(xn) (xn)
#define MPN_INVERT_ITCH(n) (4 * ((n) + 1))
#define MPN_JACOBI_ITCH(n) (2 * (n))
#define MPN_SLIDE_ITCH(yn, mn) ((yn) > 2 ? (MP_SLIDE_SIZE * (mn)) : 0)
#define MPN_POWM_ITCH(yn, mn) (6 * (mn) + MPN_SLIDE_ITCH(yn, mn))
#define MPN_SEC_POWM_ITCH(n) (5 * (n) + MP_FIXED_SIZE * (n) + 1)
/* Either Barrett or Montgomery precomputation. */
#define MPN_BARRETT_MONT_ITCH(shift) ((shift) + 2)
/*
* Macros
*/
#define MPZ_ROINIT_N(xp, xs) {{(mp_limb_t *)(xp), 0, (xs)}}
/*
* Allocation
*/
mp_limb_t *
mp_alloc_limbs(mp_size_t size);
mp_limb_t *
mp_realloc_limbs(mp_limb_t *ptr, mp_size_t size);
void
mp_free_limbs(mp_limb_t *ptr);
/*
* MPN Interface
*/
/*
* Initialization
*/
void
mpn_zero(mp_limb_t *zp, mp_size_t zn);
/*
* Uninitialization
*/
void
mpn_cleanse(mp_limb_t *zp, mp_size_t zn);
/*
* Assignment
*/
void
mpn_set_1(mp_limb_t *zp, mp_size_t zn, mp_limb_t x);
void
mpn_copyi(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn);
void
mpn_copyd(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn);
/*
* Comparison
*/
int
mpn_zero_p(const mp_limb_t *xp, mp_size_t xn);
int
mpn_cmp(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
/*
* Addition
*/
mp_limb_t
mpn_add_1(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t y);
mp_limb_t
mpn_add_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
mp_limb_t
mpn_add(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn);
/*
* Subtraction
*/
mp_limb_t
mpn_sub_1(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t y);
mp_limb_t
mpn_sub_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
mp_limb_t
mpn_sub(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn);
/*
* Multiplication
*/
mp_limb_t
mpn_mul_1(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t y);
mp_limb_t
mpn_addmul_1(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t y);
mp_limb_t
mpn_submul_1(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t y);
void
mpn_mul_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
void
mpn_mul(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn);
void
mpn_sqr(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_limb_t *scratch);
/*
* Multiply + Shift
*/
mp_limb_t
mpn_mulshift(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n,
mp_bits_t bits,
mp_limb_t *scratch);
/*
* Weak Reduction
*/
int
mpn_reduce_weak(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *np,
mp_size_t n,
mp_limb_t hi,
mp_limb_t *scratch);
/*
* Barrett Reduction
*/
void
mpn_barrett(mp_limb_t *mp, const mp_limb_t *np,
mp_size_t n,
mp_size_t shift,
mp_limb_t *scratch);
void
mpn_reduce(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *mp,
const mp_limb_t *np,
mp_size_t n,
mp_size_t shift,
mp_limb_t *scratch);
/*
* Montgomery Multiplication
*/
void
mpn_mont(mp_limb_t *kp,
mp_limb_t *rp,
const mp_limb_t *mp,
mp_size_t n,
mp_limb_t *scratch);
void
mpn_montmul(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
const mp_limb_t *mp,
mp_size_t n,
mp_limb_t k,
mp_limb_t *scratch);
void
mpn_montmul_var(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
const mp_limb_t *mp,
mp_size_t n,
mp_limb_t k,
mp_limb_t *scratch);
/*
* Division
*/
mp_limb_t
mpn_divmod_1(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn, mp_limb_t d);
void
mpn_div_1(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn, mp_limb_t d);
mp_limb_t
mpn_mod_1(const mp_limb_t *np, mp_size_t nn, mp_limb_t d);
void
mpn_divmod(mp_limb_t *qp, mp_limb_t *rp,
const mp_limb_t *np, mp_size_t nn,
const mp_limb_t *dp, mp_size_t dn);
void
mpn_div(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn,
const mp_limb_t *dp, mp_size_t dn);
void
mpn_mod(mp_limb_t *rp, const mp_limb_t *np, mp_size_t nn,
const mp_limb_t *dp, mp_size_t dn);
/*
* Exact Division
*/
void
mpn_divexact_1(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn, mp_limb_t d);
void
mpn_divexact(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn,
const mp_limb_t *dp, mp_size_t dn);
/*
* Round Division
*/
void
mpn_divround_1(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn, mp_limb_t d);
void
mpn_divround(mp_limb_t *qp, const mp_limb_t *np, mp_size_t nn,
const mp_limb_t *dp, mp_size_t dn);
/*
* AND
*/
void
mpn_and_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* OR
*/
void
mpn_ior_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* XOR
*/
void
mpn_xor_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* AND+NOT
*/
void
mpn_andn_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* OR+NOT
*/
void
mpn_iorn_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* NOT+AND
*/
void
mpn_nand_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* NOT+OR
*/
void
mpn_nior_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* NOT+XOR
*/
void
mpn_nxor_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n);
/*
* NOT
*/
void
mpn_com(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn);
/*
* Left Shift
*/
mp_limb_t
mpn_lshift(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_bits_t bits);
/*
* Right Shift
*/
mp_limb_t
mpn_rshift(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_bits_t bits);
/*
* Bit Manipulation
*/
mp_limb_t
mpn_getbit(const mp_limb_t *xp, mp_size_t xn, mp_bits_t pos);
mp_limb_t
mpn_getbits(const mp_limb_t *xp, mp_size_t xn, mp_bits_t pos, mp_bits_t width);
int
mpn_tstbit(const mp_limb_t *xp, mp_bits_t pos);
void
mpn_setbit(mp_limb_t *zp, mp_bits_t pos);
void
mpn_clrbit(mp_limb_t *zp, mp_bits_t pos);
void
mpn_combit(mp_limb_t *zp, mp_bits_t pos);
mp_bits_t
mpn_scan0(const mp_limb_t *xp, mp_size_t xn, mp_bits_t pos);
mp_bits_t
mpn_scan1(const mp_limb_t *xp, mp_size_t xn, mp_bits_t pos);
mp_bits_t
mpn_popcount(const mp_limb_t *xp, mp_size_t xn);
mp_bits_t
mpn_hamdist(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
void
mpn_mask(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn, mp_bits_t bits);
/*
* Negation
*/
mp_limb_t
mpn_neg(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn);
/*
* Number Theoretic Functions
*/
mp_size_t
mpn_gcd(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn,
mp_limb_t *scratch);
mp_limb_t
mpn_gcd_1(const mp_limb_t *xp, mp_size_t xn, mp_limb_t y, mp_limb_t *scratch);
int
mpn_invert(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn,
mp_limb_t *scratch);
int
mpn_invert_n(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n,
mp_limb_t *scratch);
int
mpn_jacobi(const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn,
mp_limb_t *scratch);
int
mpn_jacobi_n(const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n,
mp_limb_t *scratch);
void
mpn_powm(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn,
const mp_limb_t *mp, mp_size_t mn,
mp_limb_t *scratch);
void
mpn_sec_powm(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t xn,
const mp_limb_t *yp, mp_size_t yn,
const mp_limb_t *mp, mp_size_t mn,
mp_limb_t *scratch);
/*
* Helpers
*/
mp_size_t
mpn_strip(const mp_limb_t *xp, mp_size_t xn);
int
mpn_odd_p(const mp_limb_t *xp, mp_size_t xn);
int
mpn_even_p(const mp_limb_t *xp, mp_size_t xn);
mp_bits_t
mpn_ctz(const mp_limb_t *xp, mp_size_t xn);
mp_bits_t
mpn_bitlen(const mp_limb_t *xp, mp_size_t xn);
size_t
mpn_bytelen(const mp_limb_t *xp, mp_size_t xn);
size_t
mpn_sizeinbase(const mp_limb_t *xp, mp_size_t xn, int base);
/*
* Constant Time
*/
void
mpn_select(mp_limb_t *zp, const mp_limb_t *xp,
const mp_limb_t *yp,
mp_size_t n,
int flag);
void
mpn_select_zero(mp_limb_t *zp, const mp_limb_t *xp, mp_size_t n, int flag);
int
mpn_sec_zero_p(const mp_limb_t *xp, mp_size_t xn);
int
mpn_sec_equal_p(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
int
mpn_sec_lt_p(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
int
mpn_sec_lte_p(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
int
mpn_sec_gt_p(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
int
mpn_sec_gte_p(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
int
mpn_sec_cmp(const mp_limb_t *xp, const mp_limb_t *yp, mp_size_t n);
/*
* Import
*/
void
mpn_import(mp_limb_t *zp, mp_size_t zn,
const unsigned char *raw, size_t len,
int endian);
/*
* Export
*/
void
mpn_export(unsigned char *raw, size_t len,
const mp_limb_t *xp, mp_size_t xn,
int endian);
/*
* String Import
*/
int
mpn_set_str(mp_limb_t *zp, mp_size_t zn, const char *str, int base);
/*
* String Export
*/
size_t
mpn_get_str(char *str, const mp_limb_t *xp, mp_size_t xn, int base);
/*
* STDIO
*/
void
mpn_print(const mp_limb_t *xp, mp_size_t xn, int base, mp_puts_f *mp_puts);
/*
* RNG
*/
void
mpn_random(mp_limb_t *zp, mp_size_t zn, mp_rng_f *rng, void *arg);
/*
* MPZ Interface
*/
/*
* Initialization
*/
void
mpz_init(mpz_t z);
void
mpz_init2(mpz_t z, mp_bits_t bits);
void
mpz_init_set(mpz_t z, const mpz_t x);
void
mpz_init_set_ui(mpz_t z, mp_limb_t x);
void
mpz_init_set_si(mpz_t z, mp_long_t x);
int
mpz_init_set_str(mpz_t z, const char *str, int base);
/*
* Uninitialization
*/
void
mpz_clear(mpz_t z);
void
mpz_cleanse(mpz_t z);
/*
* Assignment
*/
void
mpz_set(mpz_t z, const mpz_t x);
void
mpz_roset(mpz_t z, const mpz_t x);
void
mpz_roinit_n(mpz_t z, const mp_limb_t *xp, mp_size_t xs);
void
mpz_set_ui(mpz_t z, mp_limb_t x);
void
mpz_set_si(mpz_t z, mp_long_t x);
/*
* Conversion
*/
mp_limb_t
mpz_get_ui(const mpz_t x);
mp_long_t
mpz_get_si(const mpz_t x);
/*
* Comparison
*/
int
mpz_sgn(const mpz_t x);
int
mpz_cmp(const mpz_t x, const mpz_t y);
int
mpz_cmp_ui(const mpz_t x, mp_limb_t y);
int
mpz_cmp_si(const mpz_t x, mp_long_t y);
/*
* Unsigned Comparison
*/
int
mpz_cmpabs(const mpz_t x, const mpz_t y);
int
mpz_cmpabs_ui(const mpz_t x, mp_limb_t y);
int
mpz_cmpabs_si(const mpz_t x, mp_long_t y);
/*
* Addition
*/
void
mpz_add(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_add_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_add_si(mpz_t z, const mpz_t x, mp_long_t y);
/*
* Subtraction
*/
void
mpz_sub(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_sub_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_sub_si(mpz_t z, const mpz_t x, mp_long_t y);
void
mpz_ui_sub(mpz_t z, mp_limb_t x, const mpz_t y);
void
mpz_si_sub(mpz_t z, mp_long_t x, const mpz_t y);
/*
* Multiplication
*/
void
mpz_mul(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_mul_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_mul_si(mpz_t z, const mpz_t x, mp_long_t y);
void
mpz_sqr(mpz_t z, const mpz_t x);
void
mpz_addmul(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_addmul_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_addmul_si(mpz_t z, const mpz_t x, mp_long_t y);
void
mpz_submul(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_submul_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_submul_si(mpz_t z, const mpz_t x, mp_long_t y);
/*
* Multiply + Shift
*/
void
mpz_mulshift(mpz_t z, const mpz_t x, const mpz_t y, mp_bits_t bits);
/*
* Truncation Division
*/
void
mpz_quorem(mpz_t q, mpz_t r, const mpz_t n, const mpz_t d);
void
mpz_quo(mpz_t q, const mpz_t n, const mpz_t d);
void
mpz_rem(mpz_t r, const mpz_t n, const mpz_t d);
mp_limb_t
mpz_quo_ui(mpz_t q, const mpz_t n, mp_limb_t d);
mp_limb_t
mpz_rem_ui(const mpz_t n, mp_limb_t d);
mp_long_t
mpz_quo_si(mpz_t q, const mpz_t n, mp_long_t d);
mp_long_t
mpz_rem_si(const mpz_t n, mp_long_t d);
/*
* Euclidean Division
*/
void
mpz_divmod(mpz_t q, mpz_t r, const mpz_t n, const mpz_t d);
void
mpz_div(mpz_t q, const mpz_t n, const mpz_t d);
void
mpz_mod(mpz_t r, const mpz_t n, const mpz_t d);
mp_limb_t
mpz_div_ui(mpz_t q, const mpz_t n, mp_limb_t d);
mp_limb_t
mpz_mod_ui(const mpz_t n, mp_limb_t d);
mp_long_t
mpz_div_si(mpz_t q, const mpz_t n, mp_long_t d);
mp_long_t
mpz_mod_si(const mpz_t n, mp_long_t d);
/*
* Exact Division
*/
void
mpz_divexact(mpz_t q, const mpz_t n, const mpz_t d);
void
mpz_divexact_ui(mpz_t q, const mpz_t n, mp_limb_t d);
void
mpz_divexact_si(mpz_t q, const mpz_t n, mp_long_t d);
/*
* Round Division
*/
void
mpz_divround(mpz_t q, const mpz_t n, const mpz_t d);
void
mpz_divround_ui(mpz_t q, const mpz_t n, mp_limb_t d);
void
mpz_divround_si(mpz_t q, const mpz_t n, mp_long_t d);
/*
* Divisibility
*/
int
mpz_divisible_p(const mpz_t n, const mpz_t d);
int
mpz_divisible_ui_p(const mpz_t n, mp_limb_t d);
int
mpz_divisible_2exp_p(const mpz_t n, mp_bits_t bits);
/*
* Congruence
*/
int
mpz_congruent_p(const mpz_t x, const mpz_t y, const mpz_t d);
int
mpz_congruent_ui_p(const mpz_t x, const mpz_t y, mp_limb_t d);
int
mpz_congruent_2exp_p(const mpz_t x, const mpz_t y, mp_bits_t bits);
/*
* Exponentiation
*/
void
mpz_pow_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_ui_pow_ui(mpz_t z, mp_limb_t x, mp_limb_t y);
/*
* Roots
*/
void
mpz_rootrem(mpz_t z, mpz_t r, const mpz_t x, mp_limb_t k);
int
mpz_root(mpz_t z, const mpz_t x, mp_limb_t k);
int
mpz_perfect_power_p(const mpz_t x);
void
mpz_sqrtrem(mpz_t z, mpz_t r, const mpz_t x);
void
mpz_sqrt(mpz_t z, const mpz_t x);
int
mpz_perfect_square_p(const mpz_t x);
/*
* AND
*/
void
mpz_and(mpz_t z, const mpz_t x, const mpz_t y);
mp_limb_t
mpz_and_ui(const mpz_t x, mp_limb_t y);
void
mpz_and_si(mpz_t z, const mpz_t x, mp_long_t y);
/*
* OR
*/
void
mpz_ior(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_ior_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_ior_si(mpz_t z, const mpz_t x, mp_long_t y);
/*
* XOR
*/
void
mpz_xor(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_xor_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_xor_si(mpz_t z, const mpz_t x, mp_long_t y);
/*
* NOT
*/
void
mpz_com(mpz_t z, const mpz_t x);
/*
* Left Shift
*/
void
mpz_mul_2exp(mpz_t z, const mpz_t x, mp_bits_t bits);
/*
* Unsigned Right Shift
*/
void
mpz_quo_2exp(mpz_t z, const mpz_t x, mp_bits_t bits);
void
mpz_rem_2exp(mpz_t z, const mpz_t x, mp_bits_t bits);
/*
* Right Shift
*/
void
mpz_div_2exp(mpz_t z, const mpz_t x, mp_bits_t bits);
void
mpz_mod_2exp(mpz_t z, const mpz_t x, mp_bits_t bits);
/*
* Bit Manipulation
*/
int
mpz_tstbit(const mpz_t x, mp_bits_t pos);
void
mpz_setbit(mpz_t z, mp_bits_t pos);
void
mpz_clrbit(mpz_t z, mp_bits_t pos);
void
mpz_combit(mpz_t z, mp_bits_t pos);
mp_bits_t
mpz_scan0(const mpz_t x, mp_bits_t pos);
mp_bits_t
mpz_scan1(const mpz_t x, mp_bits_t pos);
mp_bits_t
mpz_popcount(const mpz_t x);
mp_bits_t
mpz_hamdist(const mpz_t x, const mpz_t y);
/*
* Negation
*/
void
mpz_abs(mpz_t z, const mpz_t x);
void
mpz_neg(mpz_t z, const mpz_t x);
/*
* Number Theoretic Functions
*/
void
mpz_gcd(mpz_t z, const mpz_t x, const mpz_t y);
mp_limb_t
mpz_gcd_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_lcm(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_lcm_ui(mpz_t z, const mpz_t x, mp_limb_t y);
void
mpz_gcdext(mpz_t g, mpz_t s, mpz_t t, const mpz_t x, const mpz_t y);
int
mpz_invert(mpz_t z, const mpz_t x, const mpz_t y);
int
mpz_legendre(const mpz_t x, const mpz_t p);
int
mpz_jacobi(const mpz_t x, const mpz_t y);
int
mpz_kronecker(const mpz_t x, const mpz_t y);
int
mpz_kronecker_ui(const mpz_t x, mp_limb_t y);
int
mpz_kronecker_si(const mpz_t x, mp_long_t y);
int
mpz_ui_kronecker(mp_limb_t x, const mpz_t y);
int
mpz_si_kronecker(mp_long_t x, const mpz_t y);
void
mpz_powm(mpz_t z, const mpz_t x, const mpz_t y, const mpz_t m);
void
mpz_powm_ui(mpz_t z, const mpz_t x, mp_limb_t y, const mpz_t m);
void
mpz_powm_sec(mpz_t z, const mpz_t x, const mpz_t y, const mpz_t m);
int
mpz_sqrtm(mpz_t z, const mpz_t x, const mpz_t p);
int
mpz_sqrtpq(mpz_t z, const mpz_t x, const mpz_t p, const mpz_t q);
mp_bits_t
mpz_remove(mpz_t z, const mpz_t x, const mpz_t y);
void
mpz_fac_ui(mpz_t z, mp_limb_t n);
void
mpz_2fac_ui(mpz_t z, mp_limb_t n);
void
mpz_mfac_uiui(mpz_t z, mp_limb_t n, mp_limb_t m);
void
mpz_primorial_ui(mpz_t z, mp_limb_t n);
void
mpz_bin_ui(mpz_t z, const mpz_t n, mp_limb_t k);
void
mpz_bin_uiui(mpz_t z, mp_limb_t n, mp_limb_t k);
void
mpz_fib_ui(mpz_t z, mp_limb_t n);
void
mpz_fib2_ui(mpz_t z, mpz_t p, mp_limb_t n);
void
mpz_lucnum_ui(mpz_t z, mp_limb_t n);
void
mpz_lucnum2_ui(mpz_t z, mpz_t p, mp_limb_t n);
/*
* Primality Testing
*/
int
mpz_mr_prime_p(const mpz_t n, int reps, int force2, mp_rng_f *rng, void *arg);
int
mpz_lucas_prime_p(const mpz_t n, mp_limb_t limit);
int
mpz_probab_prime_p(const mpz_t x, int rounds, mp_rng_f *rng, void *arg);
void
mpz_randprime(mpz_t z, mp_bits_t bits, mp_rng_f *rng, void *arg);
void
mpz_nextprime(mpz_t z, const mpz_t x, mp_rng_f *rng, void *arg);
int
mpz_findprime(mpz_t z, const mpz_t x, mp_limb_t max, mp_rng_f *rng, void *arg);
/*
* Helpers
*/
int
mpz_fits_ui_p(const mpz_t x);
int
mpz_fits_si_p(const mpz_t x);
int
mpz_odd_p(const mpz_t x);
int
mpz_even_p(const mpz_t x);
mp_bits_t
mpz_ctz(const mpz_t x);
mp_bits_t
mpz_bitlen(const mpz_t x);
size_t
mpz_bytelen(const mpz_t x);
size_t
mpz_sizeinbase(const mpz_t x, int base);
void
mpz_swap(mpz_t x, mpz_t y);
void *
_mpz_realloc(mpz_t z, mp_size_t n);
void
mpz_realloc2(mpz_t z, mp_bits_t bits);
/*
* Limb Helpers
*/
mp_limb_t
mpz_getlimbn(const mpz_t x, mp_size_t n);
mp_size_t
mpz_size(const mpz_t x);
const mp_limb_t *
mpz_limbs_read(const mpz_t x);
mp_limb_t *
mpz_limbs_write(mpz_t z, mp_size_t n);
mp_limb_t *
mpz_limbs_modify(mpz_t z, mp_size_t n);
void
mpz_limbs_finish(mpz_t z, mp_size_t n);
/*
* Import
*/
void
mpz_import(mpz_t z, const unsigned char *raw, size_t size, int endian);
/*
* Export
*/
void
mpz_export(unsigned char *raw, const mpz_t x, size_t size, int endian);
/*
* String Import
*/
int
mpz_set_str(mpz_t z, const char *str, int base);
/*
* String Export
*/
char *
mpz_get_str(const mpz_t x, int base);
/*
* STDIO
*/
void
mpz_print(const mpz_t x, int base, mp_puts_f *mp_puts);
/*
* RNG
*/
void
mpz_urandomb(mpz_t z, mp_bits_t bits, mp_rng_f *rng, void *arg);
void
mpz_urandomm(mpz_t z, const mpz_t x, mp_rng_f *rng, void *arg);
/*
* Testing
*/
TORSION_EXTERN void
test_mpi_internal(mp_rng_f *rng, void *arg);
/*
* Benchmarks
*/
TORSION_EXTERN void
bench_mpi_internal(mp_start_f *start, mp_end_f *end, mp_rng_f *rng, void *arg);
#endif /* _TORSION_MPI_H */
| 18,301 |
1,452 | <reponame>ZachMyers3/Yacht<gh_stars>1000+
from pydantic import BaseModel
from typing import Any, Optional
class Compose(BaseModel):
name: str
class ComposeWrite(Compose):
content: Optional[Any]
class ComposeRead(ComposeWrite):
path: str
| 90 |
1,100 | <reponame>WelsonAA/Intro-to-Java-Programming
import java.util.Scanner;
/*
Adapted by Sleekpanther from SquirrelCoder's initial idea from 29-Jan-17.
Uses arrays, even though they technically aren't introduced until chapter 7. But it simplifies this a lot
6.34 (Print calendar) Programming Exercise 3.21 uses Zeller's congruence to calculate
the day of the week. Simplify Listing 6.12, PrintCalendar.java, using Zeller's
algorithm to get the start day of the month.
Exercise 3.21 details
(Science: day of the week) Zeller's congruence is an algorithm developed by
<NAME> to calculate the day of the week. The formula is:
h=(q + (26(m+1))/10 + k + k/4 + j/4 +5*j)%7
h is the day of the week (0: Saturday, 1: Sunday, 2: Monday, 3: Tuesday, 4: Wednesday, 5: Thursday, 6: Friday).
q is the day of the month.
m is the month (3: March, 4: April, ..., 12: December). January and February are counted as months 13 and 14 of the previous year.
j is the century (i.e. year/100)
k is the year of the century (i.e., year % 100).
Note that the division in the formula performs an integer division. Write a program
that prompts the user to enter a year, month, and day of the month, and
displays the name of the day of the week.
(Hint: January and February are counted as 13 and 14 in the formula, so you need
to convert the user input 1 to 13 and 2 to 14 for the month and change the year to the previous year.)
*/
public class PrintCalendar {
public static void main(String[] args) {
Scanner keyboard = new Scanner(System.in);
// get & validate user year
System.out.print("Enter full year (e.g., 2012): ");
int year = keyboard.nextInt();
while (!isValidYear(year)) { //validate input
System.out.println("Invalid Year!");
System.out.print("Enter full year (e.g., 2012): ");
year = keyboard.nextInt();
}
// get & validate user month
System.out.print("Enter month as number between 1 and 12: ");
int month = keyboard.nextInt();
while (!isValidMonth(month)) { //validate input
System.out.println("Invalid Month!");
System.out.print("Enter month as number between 1 and 12: ");
month = keyboard.nextInt();
}
printCalendarHeader(month, year); // print the calendar header
printFirstDay(month, year); // print the calendar first day
printCalendarItself(month, year); // print the calendar itself
}
public static boolean isValidYear(int year) {
return year > 0; //might want to check an upper bound, not sure if this formula works for HUGE numbers
}
public static boolean isValidMonth(int month) {
return month > 0 && month <= 12;
}
public static void printCalendarHeader(int month, int year) {
String[] months = {"January", "February", "March", "April", "May", "June", "July", "August", "Septemter", "October", "November", "December"};
System.out.print("\t\t"+months[month-1]+"\t"); //access the month array with a -1 offset since arrays count from 0
System.out.println(year);
System.out.println("---------------------------");
System.out.println("Sun\tMon\tTue\tWed\tThu\tFri\tSat");
}
public static void printFirstDay(int month, int year) {
int firstDay = dayOfWeek(1, month, year); //calculate the 1st day
String leadingTabs = "1"; //Holds any leading tabs to align 1st row of numbers in a calendar. This takes care of firstDay=1
//cases for firstDay between 2 & 6 (adds a "\t" at the beginning of the string each iteration)
//Loop starts from 1 since we want 1 less tab than the value of firstDay (firstDay=1 is 0 tabs, firstDay=2 is 1 tab, firstDay=3 is 2 tabs, firstDay=4 is 3 tabs, firstDay=5 is 4 tabs)
for(int i = 1; i<firstDay; i++){
leadingTabs = "\t" + leadingTabs;
}
if(firstDay == 0){ //reset it & ignore what the loop did if it's 0. THIS IS A SPECIAL CASE. We want 6 tabs
leadingTabs = "\t\t\t\t\t\t1";
}
System.out.print(leadingTabs + "\t");
}
public static void printCalendarItself(int month, int year) {
// find out the last day of that month
// whether it's 28/29/30/31 days
int lastDayOfMonth = lastDayOfMonth(month, year);
// print the calendar itself
for (int i = 2; i <= lastDayOfMonth; i++) {
int printedDay = dayOfWeek(i, month, year);
if (printedDay == 1) {
System.out.println();
}
System.out.print(i + "\t");
}
}
//Implement Zeller's Algorithm
public static int dayOfWeek(int dayOfMonth, int month, int year) {
if (month == 1 || month == 2) {
month = month + 12;
year--;
}
int q, m, j, k;
q = dayOfMonth;
m = month; //adjusted month (corrected for January & February being 13 & 14 respectively)
j = year/100; //century
k = year%100; //year of the century
int dayOfTheWeek = (q + (26*(m+1) /10) + k + k/4 + j/4 + (5*j)) % 7; //performs integer division where appropriate (like the Algorithms wants)
return dayOfTheWeek;
}
public static boolean isLeapYear(int year) {
return year % 400 == 0 || (year % 4 == 0 && year % 100 != 0);
}
public static int lastDayOfMonth(int month, int year) {
int lastDayOfMonth;
if (month == 1 || month == 3 || month == 5 || month == 7 || month == 8 || month == 10 || month == 12) {
lastDayOfMonth = 31;
} else if (month == 2) {
if (isLeapYear(year)) {
lastDayOfMonth = 29;
} else {
lastDayOfMonth = 28;
}
} else {
lastDayOfMonth = 30;
}
return lastDayOfMonth;
}
} | 1,854 |
2,813 | package org.jabref.logic.layout.format;
import org.jabref.logic.layout.LayoutFormatter;
/**
* Formatter that returns the last page from the "pages" field, if set.
*
* For instance, if the pages field is set to "345-360" or "345--360",
* this formatter will return "360".
*/
public class LastPage implements LayoutFormatter {
@Override
public String format(String s) {
if (s == null) {
return "";
}
String[] pageParts = s.split("[\\-]+");
if (pageParts.length == 2) {
return pageParts[1];
} else if (pageParts.length >= 1) {
return pageParts[0];
} else {
return "";
}
}
}
| 295 |
2,151 | <reponame>fugu-helper/android_external_swiftshader<gh_stars>1000+
//===- ExecutionDriver.cpp - Allow execution of LLVM program --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains code used to execute the program utilizing one of the
// various ways of running LLVM bitcode.
//
//===----------------------------------------------------------------------===//
#include "BugDriver.h"
#include "ToolRunner.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/SystemUtils.h"
#include "llvm/Support/raw_ostream.h"
#include <fstream>
using namespace llvm;
namespace {
// OutputType - Allow the user to specify the way code should be run, to test
// for miscompilation.
//
enum OutputType {
AutoPick, RunLLI, RunJIT, RunLLC, RunLLCIA, RunCBE, CBE_bug, LLC_Safe,
CompileCustom, Custom
};
cl::opt<double>
AbsTolerance("abs-tolerance", cl::desc("Absolute error tolerated"),
cl::init(0.0));
cl::opt<double>
RelTolerance("rel-tolerance", cl::desc("Relative error tolerated"),
cl::init(0.0));
cl::opt<OutputType>
InterpreterSel(cl::desc("Specify the \"test\" i.e. suspect back-end:"),
cl::values(clEnumValN(AutoPick, "auto", "Use best guess"),
clEnumValN(RunLLI, "run-int",
"Execute with the interpreter"),
clEnumValN(RunJIT, "run-jit", "Execute with JIT"),
clEnumValN(RunLLC, "run-llc", "Compile with LLC"),
clEnumValN(RunLLCIA, "run-llc-ia",
"Compile with LLC with integrated assembler"),
clEnumValN(RunCBE, "run-cbe", "Compile with CBE"),
clEnumValN(CBE_bug,"cbe-bug", "Find CBE bugs"),
clEnumValN(LLC_Safe, "llc-safe", "Use LLC for all"),
clEnumValN(CompileCustom, "compile-custom",
"Use -compile-command to define a command to "
"compile the bitcode. Useful to avoid linking."),
clEnumValN(Custom, "run-custom",
"Use -exec-command to define a command to execute "
"the bitcode. Useful for cross-compilation."),
clEnumValEnd),
cl::init(AutoPick));
cl::opt<OutputType>
SafeInterpreterSel(cl::desc("Specify \"safe\" i.e. known-good backend:"),
cl::values(clEnumValN(AutoPick, "safe-auto", "Use best guess"),
clEnumValN(RunLLC, "safe-run-llc", "Compile with LLC"),
clEnumValN(RunCBE, "safe-run-cbe", "Compile with CBE"),
clEnumValN(Custom, "safe-run-custom",
"Use -exec-command to define a command to execute "
"the bitcode. Useful for cross-compilation."),
clEnumValEnd),
cl::init(AutoPick));
cl::opt<std::string>
SafeInterpreterPath("safe-path",
cl::desc("Specify the path to the \"safe\" backend program"),
cl::init(""));
cl::opt<bool>
AppendProgramExitCode("append-exit-code",
cl::desc("Append the exit code to the output so it gets diff'd too"),
cl::init(false));
cl::opt<std::string>
InputFile("input", cl::init("/dev/null"),
cl::desc("Filename to pipe in as stdin (default: /dev/null)"));
cl::list<std::string>
AdditionalSOs("additional-so",
cl::desc("Additional shared objects to load "
"into executing programs"));
cl::list<std::string>
AdditionalLinkerArgs("Xlinker",
cl::desc("Additional arguments to pass to the linker"));
cl::opt<std::string>
CustomCompileCommand("compile-command", cl::init("llc"),
cl::desc("Command to compile the bitcode (use with -compile-custom) "
"(default: llc)"));
cl::opt<std::string>
CustomExecCommand("exec-command", cl::init("simulate"),
cl::desc("Command to execute the bitcode (use with -run-custom) "
"(default: simulate)"));
}
namespace llvm {
// Anything specified after the --args option are taken as arguments to the
// program being debugged.
cl::list<std::string>
InputArgv("args", cl::Positional, cl::desc("<program arguments>..."),
cl::ZeroOrMore, cl::PositionalEatsArgs);
cl::opt<std::string>
OutputPrefix("output-prefix", cl::init("bugpoint"),
cl::desc("Prefix to use for outputs (default: 'bugpoint')"));
}
namespace {
cl::list<std::string>
ToolArgv("tool-args", cl::Positional, cl::desc("<tool arguments>..."),
cl::ZeroOrMore, cl::PositionalEatsArgs);
cl::list<std::string>
SafeToolArgv("safe-tool-args", cl::Positional,
cl::desc("<safe-tool arguments>..."),
cl::ZeroOrMore, cl::PositionalEatsArgs);
cl::opt<std::string>
GCCBinary("gcc", cl::init("gcc"),
cl::desc("The gcc binary to use. (default 'gcc')"));
cl::list<std::string>
GCCToolArgv("gcc-tool-args", cl::Positional,
cl::desc("<gcc-tool arguments>..."),
cl::ZeroOrMore, cl::PositionalEatsArgs);
}
//===----------------------------------------------------------------------===//
// BugDriver method implementation
//
/// initializeExecutionEnvironment - This method is used to set up the
/// environment for executing LLVM programs.
///
bool BugDriver::initializeExecutionEnvironment() {
outs() << "Initializing execution environment: ";
// Create an instance of the AbstractInterpreter interface as specified on
// the command line
SafeInterpreter = 0;
std::string Message;
switch (InterpreterSel) {
case AutoPick:
InterpreterSel = RunCBE;
Interpreter =
AbstractInterpreter::createCBE(getToolName(), Message, GCCBinary,
&ToolArgv, &GCCToolArgv);
if (!Interpreter) {
InterpreterSel = RunJIT;
Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
&ToolArgv);
}
if (!Interpreter) {
InterpreterSel = RunLLC;
Interpreter = AbstractInterpreter::createLLC(getToolName(), Message,
GCCBinary, &ToolArgv,
&GCCToolArgv);
}
if (!Interpreter) {
InterpreterSel = RunLLI;
Interpreter = AbstractInterpreter::createLLI(getToolName(), Message,
&ToolArgv);
}
if (!Interpreter) {
InterpreterSel = AutoPick;
Message = "Sorry, I can't automatically select an interpreter!\n";
}
break;
case RunLLI:
Interpreter = AbstractInterpreter::createLLI(getToolName(), Message,
&ToolArgv);
break;
case RunLLC:
case RunLLCIA:
case LLC_Safe:
Interpreter = AbstractInterpreter::createLLC(getToolName(), Message,
GCCBinary, &ToolArgv,
&GCCToolArgv,
InterpreterSel == RunLLCIA);
break;
case RunJIT:
Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
&ToolArgv);
break;
case RunCBE:
case CBE_bug:
Interpreter = AbstractInterpreter::createCBE(getToolName(), Message,
GCCBinary, &ToolArgv,
&GCCToolArgv);
break;
case CompileCustom:
Interpreter =
AbstractInterpreter::createCustomCompiler(Message, CustomCompileCommand);
break;
case Custom:
Interpreter =
AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
break;
default:
Message = "Sorry, this back-end is not supported by bugpoint right now!\n";
break;
}
if (!Interpreter)
errs() << Message;
else // Display informational messages on stdout instead of stderr
outs() << Message;
std::string Path = SafeInterpreterPath;
if (Path.empty())
Path = getToolName();
std::vector<std::string> SafeToolArgs = SafeToolArgv;
switch (SafeInterpreterSel) {
case AutoPick:
// In "cbe-bug" mode, default to using LLC as the "safe" backend.
if (!SafeInterpreter &&
InterpreterSel == CBE_bug) {
SafeInterpreterSel = RunLLC;
SafeToolArgs.push_back("--relocation-model=pic");
SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
GCCBinary,
&SafeToolArgs,
&GCCToolArgv);
}
// In "llc-safe" mode, default to using LLC as the "safe" backend.
if (!SafeInterpreter &&
InterpreterSel == LLC_Safe) {
SafeInterpreterSel = RunLLC;
SafeToolArgs.push_back("--relocation-model=pic");
SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
GCCBinary,
&SafeToolArgs,
&GCCToolArgv);
}
// Pick a backend that's different from the test backend. The JIT and
// LLC backends share a lot of code, so prefer to use the CBE as the
// safe back-end when testing them.
if (!SafeInterpreter &&
InterpreterSel != RunCBE) {
SafeInterpreterSel = RunCBE;
SafeInterpreter = AbstractInterpreter::createCBE(Path.c_str(), Message,
GCCBinary,
&SafeToolArgs,
&GCCToolArgv);
}
if (!SafeInterpreter &&
InterpreterSel != RunLLC &&
InterpreterSel != RunJIT) {
SafeInterpreterSel = RunLLC;
SafeToolArgs.push_back("--relocation-model=pic");
SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
GCCBinary,
&SafeToolArgs,
&GCCToolArgv);
}
if (!SafeInterpreter) {
SafeInterpreterSel = AutoPick;
Message = "Sorry, I can't automatically select an interpreter!\n";
}
break;
case RunLLC:
case RunLLCIA:
SafeToolArgs.push_back("--relocation-model=pic");
SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
GCCBinary, &SafeToolArgs,
&GCCToolArgv,
SafeInterpreterSel == RunLLCIA);
break;
case RunCBE:
SafeInterpreter = AbstractInterpreter::createCBE(Path.c_str(), Message,
GCCBinary, &SafeToolArgs,
&GCCToolArgv);
break;
case Custom:
SafeInterpreter =
AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
break;
default:
Message = "Sorry, this back-end is not supported by bugpoint as the "
"\"safe\" backend right now!\n";
break;
}
if (!SafeInterpreter) { outs() << Message << "\nExiting.\n"; exit(1); }
gcc = GCC::create(Message, GCCBinary, &GCCToolArgv);
if (!gcc) { outs() << Message << "\nExiting.\n"; exit(1); }
// If there was an error creating the selected interpreter, quit with error.
return Interpreter == 0;
}
/// compileProgram - Try to compile the specified module, returning false and
/// setting Error if an error occurs. This is used for code generation
/// crash testing.
///
void BugDriver::compileProgram(Module *M, std::string *Error) const {
// Emit the program to a bitcode file...
sys::Path BitcodeFile (OutputPrefix + "-test-program.bc");
std::string ErrMsg;
if (BitcodeFile.makeUnique(true, &ErrMsg)) {
errs() << ToolName << ": Error making unique filename: " << ErrMsg
<< "\n";
exit(1);
}
if (writeProgramToFile(BitcodeFile.str(), M)) {
errs() << ToolName << ": Error emitting bitcode to file '"
<< BitcodeFile.str() << "'!\n";
exit(1);
}
// Remove the temporary bitcode file when we are done.
FileRemover BitcodeFileRemover(BitcodeFile.str(), !SaveTemps);
// Actually compile the program!
Interpreter->compileProgram(BitcodeFile.str(), Error, Timeout, MemoryLimit);
}
/// executeProgram - This method runs "Program", capturing the output of the
/// program to a file, returning the filename of the file. A recommended
/// filename may be optionally specified.
///
std::string BugDriver::executeProgram(const Module *Program,
std::string OutputFile,
std::string BitcodeFile,
const std::string &SharedObj,
AbstractInterpreter *AI,
std::string *Error) const {
if (AI == 0) AI = Interpreter;
assert(AI && "Interpreter should have been created already!");
bool CreatedBitcode = false;
std::string ErrMsg;
if (BitcodeFile.empty()) {
// Emit the program to a bitcode file...
sys::Path uniqueFilename(OutputPrefix + "-test-program.bc");
if (uniqueFilename.makeUnique(true, &ErrMsg)) {
errs() << ToolName << ": Error making unique filename: "
<< ErrMsg << "!\n";
exit(1);
}
BitcodeFile = uniqueFilename.str();
if (writeProgramToFile(BitcodeFile, Program)) {
errs() << ToolName << ": Error emitting bitcode to file '"
<< BitcodeFile << "'!\n";
exit(1);
}
CreatedBitcode = true;
}
// Remove the temporary bitcode file when we are done.
sys::Path BitcodePath(BitcodeFile);
FileRemover BitcodeFileRemover(BitcodePath.str(),
CreatedBitcode && !SaveTemps);
if (OutputFile.empty()) OutputFile = OutputPrefix + "-execution-output";
// Check to see if this is a valid output filename...
sys::Path uniqueFile(OutputFile);
if (uniqueFile.makeUnique(true, &ErrMsg)) {
errs() << ToolName << ": Error making unique filename: "
<< ErrMsg << "\n";
exit(1);
}
OutputFile = uniqueFile.str();
// Figure out which shared objects to run, if any.
std::vector<std::string> SharedObjs(AdditionalSOs);
if (!SharedObj.empty())
SharedObjs.push_back(SharedObj);
int RetVal = AI->ExecuteProgram(BitcodeFile, InputArgv, InputFile, OutputFile,
Error, AdditionalLinkerArgs, SharedObjs,
Timeout, MemoryLimit);
if (!Error->empty())
return OutputFile;
if (RetVal == -1) {
errs() << "<timeout>";
static bool FirstTimeout = true;
if (FirstTimeout) {
outs() << "\n"
"*** Program execution timed out! This mechanism is designed to handle\n"
" programs stuck in infinite loops gracefully. The -timeout option\n"
" can be used to change the timeout threshold or disable it completely\n"
" (with -timeout=0). This message is only displayed once.\n";
FirstTimeout = false;
}
}
if (AppendProgramExitCode) {
std::ofstream outFile(OutputFile.c_str(), std::ios_base::app);
outFile << "exit " << RetVal << '\n';
outFile.close();
}
// Return the filename we captured the output to.
return OutputFile;
}
/// executeProgramSafely - Used to create reference output with the "safe"
/// backend, if reference output is not provided.
///
std::string BugDriver::executeProgramSafely(const Module *Program,
std::string OutputFile,
std::string *Error) const {
return executeProgram(Program, OutputFile, "", "", SafeInterpreter, Error);
}
std::string BugDriver::compileSharedObject(const std::string &BitcodeFile,
std::string &Error) {
assert(Interpreter && "Interpreter should have been created already!");
sys::Path OutputFile;
// Using the known-good backend.
GCC::FileType FT = SafeInterpreter->OutputCode(BitcodeFile, OutputFile,
Error);
if (!Error.empty())
return "";
std::string SharedObjectFile;
bool Failure = gcc->MakeSharedObject(OutputFile.str(), FT, SharedObjectFile,
AdditionalLinkerArgs, Error);
if (!Error.empty())
return "";
if (Failure)
exit(1);
// Remove the intermediate C file
OutputFile.eraseFromDisk();
return "./" + SharedObjectFile;
}
/// createReferenceFile - calls compileProgram and then records the output
/// into ReferenceOutputFile. Returns true if reference file created, false
/// otherwise. Note: initializeExecutionEnvironment should be called BEFORE
/// this function.
///
bool BugDriver::createReferenceFile(Module *M, const std::string &Filename) {
std::string Error;
compileProgram(Program, &Error);
if (!Error.empty())
return false;
ReferenceOutputFile = executeProgramSafely(Program, Filename, &Error);
if (!Error.empty()) {
errs() << Error;
if (Interpreter != SafeInterpreter) {
errs() << "*** There is a bug running the \"safe\" backend. Either"
<< " debug it (for example with the -run-cbe bugpoint option,"
<< " if CBE is being used as the \"safe\" backend), or fix the"
<< " error some other way.\n";
}
return false;
}
outs() << "\nReference output is: " << ReferenceOutputFile << "\n\n";
return true;
}
/// diffProgram - This method executes the specified module and diffs the
/// output against the file specified by ReferenceOutputFile. If the output
/// is different, 1 is returned. If there is a problem with the code
/// generator (e.g., llc crashes), this will set ErrMsg.
///
bool BugDriver::diffProgram(const Module *Program,
const std::string &BitcodeFile,
const std::string &SharedObject,
bool RemoveBitcode,
std::string *ErrMsg) const {
// Execute the program, generating an output file...
sys::Path Output(executeProgram(Program, "", BitcodeFile, SharedObject, 0,
ErrMsg));
if (!ErrMsg->empty())
return false;
std::string Error;
bool FilesDifferent = false;
if (int Diff = DiffFilesWithTolerance(sys::Path(ReferenceOutputFile),
sys::Path(Output.str()),
AbsTolerance, RelTolerance, &Error)) {
if (Diff == 2) {
errs() << "While diffing output: " << Error << '\n';
exit(1);
}
FilesDifferent = true;
}
else {
// Remove the generated output if there are no differences.
Output.eraseFromDisk();
}
// Remove the bitcode file if we are supposed to.
if (RemoveBitcode)
sys::Path(BitcodeFile).eraseFromDisk();
return FilesDifferent;
}
bool BugDriver::isExecutingJIT() {
return InterpreterSel == RunJIT;
}
| 9,377 |
1,343 | <filename>CWrapper/src/CWrapper.cpp
#include "Anime4KCPP.hpp"
#include "AC.h"
static std::string lastCoreError("No error");
static Anime4KCPP::ACInitializer initializer;
Anime4KCPP::Parameters getParameters(ac_parameters* c_parameters)
{
if (c_parameters == nullptr)
return Anime4KCPP::Parameters{};
return Anime4KCPP::Parameters(
c_parameters->passes,
c_parameters->pushColorCount,
c_parameters->strengthColor,
c_parameters->strengthGradient,
c_parameters->zoomFactor,
c_parameters->fastMode,
c_parameters->preprocessing,
c_parameters->postprocessing,
c_parameters->preFilters,
c_parameters->postFilters,
c_parameters->HDN,
c_parameters->HDNLevel,
c_parameters->alpha);
}
Anime4KCPP::Processor::Type getProcessorType(ac_processType type, ac_error* error)
{
switch (type)
{
case AC_CPU_Anime4K09:
return Anime4KCPP::Processor::Type::CPU_Anime4K09;
case AC_CPU_ACNet:
return Anime4KCPP::Processor::Type::CPU_ACNet;
#ifdef ENABLE_OPENCL
case AC_OpenCL_Anime4K09:
return Anime4KCPP::Processor::Type::OpenCL_Anime4K09;
case AC_OpenCL_ACNet:
return Anime4KCPP::Processor::Type::OpenCL_ACNet;
#endif
#ifdef ENABLE_CUDA
case AC_Cuda_Anime4K09:
return Anime4KCPP::Processor::Type::Cuda_Anime4K09;
case AC_Cuda_ACNet:
return Anime4KCPP::Processor::Type::Cuda_ACNet;
#endif
default:
if (error != nullptr)
*error = AC_ERROR_PORCESSOR_TYPE;
return Anime4KCPP::Processor::Type::CPU_Anime4K09;
}
}
template<std::size_t size>
void infocpy(char(&dst)[size], const char* src)
{
std::size_t i = size;
char* pdst = dst;
while (i-- && (*pdst++ = *src++));
}
extern "C"
{
ac_version acGetVersion(void)
{
ac_version ret;
infocpy(ret.coreVersion, Anime4KCPP::CoreInfo::version());
infocpy(ret.wrapperVersion,ANIME4KCPP_C_WRAPPER_VERSION);
return ret;
}
void acGetVersion2(ac_version* v)
{
infocpy(v->coreVersion,Anime4KCPP::CoreInfo::version());
infocpy(v->wrapperVersion, ANIME4KCPP_C_WRAPPER_VERSION);
}
ac_instance acGetInstance(ac_bool initGPU, ac_bool initGPUCNN, unsigned int platformID, unsigned int deviceID, ac_parameters* parameters, ac_processType type, ac_error* error)
{
if (error != nullptr)
*error = AC_OK;
#ifdef ENABLE_OPENCL
if (initGPU == AC_TRUE && !Anime4KCPP::OpenCL::Anime4K09::isInitialized())
{
if (error != nullptr)
*error = AC_ERROR_OPENCL_NOT_SUPPORTED;
return nullptr;
try
{
Anime4KCPP::OpenCL::Anime4K09::init(platformID, deviceID);
}
catch (const std::exception& err)
{
if (error != nullptr)
*error = AC_ERROR_INIT_GPU;
lastCoreError = err.what();
return nullptr;
}
}
#endif
#ifdef ENABLE_OPENCL
if (initGPUCNN == AC_TRUE && !Anime4KCPP::OpenCL::Anime4K09::isInitialized())
{
try
{
Anime4KCPP::OpenCL::ACNet::init(platformID, deviceID);
}
catch (const std::exception& err)
{
if (error != nullptr)
*error = AC_ERROR_INIT_GPU;
lastCoreError = err.what();
return nullptr;
}
}
#endif
switch (type)
{
case AC_CPU_Anime4K09:
return reinterpret_cast<ac_instance>(new Anime4KCPP::CPU::Anime4K09(getParameters(parameters)));
break;
case AC_CPU_ACNet:
return reinterpret_cast<ac_instance>(new Anime4KCPP::CPU::ACNet(getParameters(parameters)));
break;
#ifdef ENABLE_OPENCL
case AC_OpenCL_Anime4K09:
return reinterpret_cast<ac_instance>(new Anime4KCPP::OpenCL::Anime4K09(getParameters(parameters)));
break;
case AC_OpenCL_ACNet:
return reinterpret_cast<ac_instance>(new Anime4KCPP::OpenCL::ACNet(getParameters(parameters)));
break;
#endif
default:
if (error != nullptr)
*error = AC_ERROR_PORCESSOR_TYPE;
return nullptr;
}
}
ac_error acInitProcessor(ac_manager_t managers, ac_managerData* managerData)
{
initializer.release(true);
if (managers & AC_Manager_OpenCL_Anime4K09)
{
#ifndef ENABLE_OPENCL
return AC_ERROR_OPENCL_NOT_SUPPORTED;
#else
if (managerData == nullptr || managerData->OpenCLAnime4K09Data == nullptr)
{
return AC_ERROR_NULL_DATA;
}
initializer.pushManager<Anime4KCPP::OpenCL::Manager<Anime4KCPP::OpenCL::Anime4K09>>
(managerData->OpenCLAnime4K09Data->pID,
managerData->OpenCLAnime4K09Data->dID,
managerData->OpenCLAnime4K09Data->OpenCLQueueNum,
static_cast<bool>(managerData->OpenCLAnime4K09Data->OpenCLParallelIO));
#endif // !ENABLE_OPENCL
}
if (managers & AC_Manager_OpenCL_ACNet)
{
#ifndef ENABLE_OPENCL
return AC_ERROR_OPENCL_NOT_SUPPORTED;
#else
if (managerData == nullptr || managerData->OpenCLACNetData == nullptr)
{
return AC_ERROR_NULL_DATA;
}
initializer.pushManager<Anime4KCPP::OpenCL::Manager<Anime4KCPP::OpenCL::ACNet>>
(managerData->OpenCLACNetData->pID,
managerData->OpenCLACNetData->dID,
static_cast<Anime4KCPP::CNNType::Value>(managerData->OpenCLACNetData->CNNType),
managerData->OpenCLACNetData->OpenCLQueueNum,
static_cast<bool>(managerData->OpenCLACNetData->OpenCLParallelIO));
#endif // !ENABLE_OPENCL
}
if (managers & AC_Manager_Cuda)
{
#ifndef ENABLE_CUDA
return AC_ERROR_CUDA_NOT_SUPPORTED;
#else
if (managerData == nullptr || managerData->CUDAData == nullptr)
{
return AC_ERROR_NULL_DATA;
}
initializer.pushManager<Anime4KCPP::Cuda::Manager>(managerData->CUDAData->dID);
#endif // !CUDA_ENABLE
}
try
{
initializer.init();
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_INIT_PROCESSOR;
}
return AC_OK;
}
void acReleaseAllProcessors(void)
{
initializer.release(true);
}
ac_instance acGetInstance2(ac_manager_t managers, ac_managerData* managerData, ac_parameters* parameters, ac_processType type, ac_error* error)
{
ac_error err = acInitProcessor(managers, managerData);
if (error != nullptr)
*error = err;
if (err != AC_OK)
return nullptr;
return acGetInstance3(parameters, type, error);
}
ac_instance acGetInstance3(ac_parameters* parameters, ac_processType type, ac_error* error)
{
return reinterpret_cast<ac_instance>(Anime4KCPP::ACCreator::create(getParameters(parameters), getProcessorType(type, error)));
}
void acFreeInstance(ac_instance instance, ac_bool releaseGPU, ac_bool releaseGPUCNN)
{
if (instance != nullptr)
delete reinterpret_cast<Anime4KCPP::AC*>(instance);
#ifdef ENABLE_OPENCL
if (releaseGPU == AC_TRUE && Anime4KCPP::OpenCL::Anime4K09::isInitialized())
Anime4KCPP::OpenCL::Anime4K09::release();
if (releaseGPUCNN == AC_TRUE && Anime4KCPP::OpenCL::ACNet::isInitialized())
Anime4KCPP::OpenCL::ACNet::release();
#endif
initializer.release(true);
}
void acFreeInstance2(ac_instance instance)
{
initializer.release(reinterpret_cast<Anime4KCPP::AC*>(instance));
initializer.release(true);
}
ac_error acInitParameters(ac_parameters* parameters)
{
if (parameters == nullptr)
return AC_ERROR_NULL_PARAMETERS;
parameters->passes = 2;
parameters->pushColorCount = 2;
parameters->strengthColor = 0.3F;
parameters->strengthGradient = 1.0F;
parameters->zoomFactor = 2.0F;
parameters->fastMode = AC_FALSE;
parameters->preprocessing = AC_FALSE;
parameters->postprocessing = AC_FALSE;
parameters->preFilters = 4;
parameters->postFilters = 40;
parameters->HDN = AC_FALSE;
return AC_OK;
}
ac_error acLoadImage(ac_instance instance, const char* srcFile)
{
#if ENABLE_IMAGE_IO
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(srcFile);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_LOAD_IMAGE;
}
return AC_OK;
#else
return AC_ERROR_IMAGE_IO_DISABLE;
#endif // ENABLE_IMAGE_IO
}
ac_error acLoadImageFromBuffer(ac_instance instance, const uint8_t* buf, size_t size)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(buf, size);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_LOAD_IMAGE;
}
return AC_OK;
}
ac_error acProcess(ac_instance instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->process();
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_GPU_PROCESS;
}
return AC_OK;
}
ac_error acShowImage(ac_instance instance, ac_bool R2B)
{
#if ENABLE_PREVIEW_GUI
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->showImage(R2B);
return AC_OK;
#else
return AC_ERROR_PREVIEW_GUI_DISABLE;
#endif // ENABLE_PREVIEW_GUI
}
ac_error acSaveImage(ac_instance instance, const char* dstFile)
{
#if ENABLE_IMAGE_IO
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImage(dstFile);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_NOT_YUV444;
}
return AC_OK;
#else
return AC_ERROR_IMAGE_IO_DISABLE;
#endif // ENABLE_IMAGE_IO
}
ac_error acSaveImageToBuffer(ac_instance instance, const char* suffix, uint8_t* buf, size_t size)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
std::vector<uint8_t> data;
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImage(suffix, data);
if (data.size() > size)
return AC_ERROR_INSUFFICIENT_BUFFER_SIZE;
std::copy(data.begin(), data.end(), buf);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_FAILED_TO_ENCODE;
}
return AC_OK;
}
ac_error acSetParameters(ac_instance instance, ac_parameters* parameters)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->setParameters(getParameters(parameters));
return AC_OK;
}
ac_error acInitGPU(void)
{
#ifdef ENABLE_OPENCL
try
{
if (!Anime4KCPP::OpenCL::Anime4K09::isInitialized())
Anime4KCPP::OpenCL::Anime4K09::init();
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_INIT_GPU;
}
#endif
return AC_OK;
}
void acReleaseGPU(void)
{
#ifdef ENABLE_OPENCL
if (Anime4KCPP::OpenCL::Anime4K09::isInitialized())
Anime4KCPP::OpenCL::Anime4K09::release();
#endif
}
ac_error acInitGPUCNN(void)
{
#ifdef ENABLE_OPENCL
try
{
if (!Anime4KCPP::OpenCL::ACNet::isInitialized())
Anime4KCPP::OpenCL::ACNet::init();
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_INIT_GPU;
}
#endif
return AC_OK;
}
void acReleaseGPUCNN(void)
{
#ifdef ENABLE_OPENCL
if (Anime4KCPP::OpenCL::ACNet::isInitialized())
Anime4KCPP::OpenCL::ACNet::release();
#endif
}
ac_error acInitGPU2(unsigned int managers, ac_managerData* managerData)
{
return acInitProcessor(managers, managerData);
}
void acReleaseGPU2(void)
{
acReleaseAllProcessors();
}
ac_error acLoadImageRGBPlanarB(ac_instance instance, int rows, int cols, size_t stride, uint8_t* r, uint8_t* g, uint8_t* b, ac_bool inputAsYUV444)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, r, g, b, inputAsYUV444);
return AC_OK;
}
ac_error acLoadImageYUVPlanarB(ac_instance instance,
int rowsY, int colsY, size_t strideY, uint8_t* y,
int rowsU, int colsU, size_t strideU, uint8_t* u,
int rowsV, int colsV, size_t strideV, uint8_t* v)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(
rowsY, colsY, strideY, y,
rowsU, colsU, strideU, u,
rowsV, colsV, strideV, v);
return AC_OK;
}
ac_error acLoadImageRGBPackedB(ac_instance instance, int rows, int cols, size_t stride, uint8_t* data, ac_bool inputAsYUV444, ac_bool inputAsRGB32)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (inputAsRGB32 && inputAsYUV444)
return AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, inputAsYUV444, inputAsRGB32);
return AC_OK;
}
ac_error acLoadImageGrayscaleB(ac_instance instance, int rows, int cols, size_t stride, uint8_t* data)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, false, false, true);
return AC_OK;
}
ac_error acLoadImageRGBPlanarW(ac_instance instance, int rows, int cols, size_t stride, uint16_t* r, uint16_t* g, uint16_t* b, ac_bool inputAsYUV444)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, r, g, b, inputAsYUV444);
return AC_OK;
}
ac_error acLoadImageYUVPlanarW(ac_instance instance,
int rowsY, int colsY, size_t strideY, uint16_t* y,
int rowsU, int colsU, size_t strideU, uint16_t* u,
int rowsV, int colsV, size_t strideV, uint16_t* v)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(
rowsY, colsY, strideY, y,
rowsU, colsU, strideU, u,
rowsV, colsV, strideV, v);
return AC_OK;
}
ac_error acLoadImageRGBPackedW(ac_instance instance, int rows, int cols, size_t stride, uint16_t* data, ac_bool inputAsYUV444, ac_bool inputAsRGB32)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (inputAsRGB32 && inputAsYUV444)
return AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, inputAsYUV444, inputAsRGB32);
return AC_OK;
}
ac_error acLoadImageGrayscaleW(ac_instance instance, int rows, int cols, size_t stride, uint16_t* data)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, false, false, true);
return AC_OK;
}
ac_error acLoadImageRGBPlanarF(ac_instance instance, int rows, int cols, size_t stride, float* r, float* g, float* b, ac_bool inputAsYUV444)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, r, g, b, inputAsYUV444);
return AC_OK;
}
ac_error acLoadImageYUVPlanarF(ac_instance instance,
int rowsY, int colsY, size_t strideY, float* y,
int rowsU, int colsU, size_t strideU, float* u,
int rowsV, int colsV, size_t strideV, float* v)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(
rowsY, colsY, strideY, y,
rowsU, colsU, strideU, u,
rowsV, colsV, strideV, v);
return AC_OK;
}
ac_error acLoadImageRGBPackedF(ac_instance instance, int rows, int cols, size_t stride, float* data, ac_bool inputAsYUV444, ac_bool inputAsRGB32)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (inputAsRGB32 && inputAsYUV444)
return AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, inputAsYUV444, inputAsRGB32);
return AC_OK;
}
ac_error acLoadImageGrayscaleF(ac_instance instance, int rows, int cols, size_t stride, float* data)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::AC*>(instance)->loadImage(rows, cols, stride, data, false, false, true);
return AC_OK;
}
ac_error acSaveImageRGBPlanar(ac_instance instance,
uint8_t* r, size_t strideR,
uint8_t* g, size_t strideG,
uint8_t* b, size_t strideB)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (r == nullptr || g == nullptr || b == nullptr)
return AC_ERROR_SAVE_TO_NULL_POINTER;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImage(r, strideR, g, strideG, b, strideB);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_NOT_YUV444;
}
return AC_OK;
}
ac_error acSaveImageRGBPacked(ac_instance instance, uint8_t* data, size_t stride)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (data == nullptr)
return AC_ERROR_SAVE_TO_NULL_POINTER;
try
{
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImage(data, stride);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_NOT_YUV444;
}
return AC_OK;
}
ac_error acGetInfo(ac_instance instance, char* info, size_t* length)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (info == nullptr && length == nullptr)
return AC_OK;
std::string ret = reinterpret_cast<Anime4KCPP::AC*>(instance)->getInfo();
if (length != nullptr)
*length = ret.size() + 1;
if (info != nullptr)
memcpy(info, ret.c_str(), ret.size() + 1);
return AC_OK;
}
ac_error acGetFiltersInfo(ac_instance instance, char* info, size_t* length)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (info == nullptr && length == nullptr)
return AC_OK;
std::string ret = reinterpret_cast<Anime4KCPP::AC*>(instance)->getFiltersInfo();
if (length != nullptr)
*length = ret.size() + 1;
if (info != nullptr)
memcpy(info, ret.c_str(), ret.size() + 1);
return AC_OK;
}
ac_bool acCheckGPUSupport(unsigned int pID, unsigned int dID, char* info, size_t* length)
{
#ifndef ENABLE_OPENCL
return AC_FALSE;
#else
Anime4KCPP::OpenCL::GPUInfo ret = Anime4KCPP::OpenCL::checkGPUSupport(pID, dID);
ac_bool rst = ac_bool(ret.supported);
if (length != nullptr)
*length = ret().size() + 1;
if (info != nullptr)
memcpy(info, ret().c_str(), ret().size() + 1);
return rst;
#endif
}
ac_bool acCheckGPUSupport2(ac_GPGPU GPGPUModel, unsigned int pID, unsigned int dID, char* info, size_t* length)
{
std::string infoString;
ac_bool rst = AC_FALSE;
switch (GPGPUModel)
{
case AC_CUDA:
#ifdef ENABLE_CUDA
{
Anime4KCPP::Cuda::GPUInfo ret = Anime4KCPP::Cuda::checkGPUSupport(dID);
rst = (ac_bool)ret.supported;
infoString = ret();
}
#else
{
rst = (ac_bool)false;
infoString = "CUDA is not supported";
}
#endif
break;
case AC_OpenCL:
#ifdef ENABLE_OPENCL
{
Anime4KCPP::OpenCL::GPUInfo ret = Anime4KCPP::OpenCL::checkGPUSupport(pID, dID);
rst = (ac_bool)ret.supported;
infoString = ret();
}
#else
{
rst = (ac_bool)false;
infoString = "OpenCL is not supported";
}
#endif
break;
}
if (length != nullptr)
*length = infoString.size() + 1;
if (info != nullptr)
memcpy(info, infoString.c_str(), infoString.size() + 1);
return rst;
}
void acListGPUs(char* info, size_t* length, size_t* platforms, size_t* devices)
{
#ifdef ENABLE_OPENCL
Anime4KCPP::OpenCL::GPUList ret = Anime4KCPP::OpenCL::listGPUs();
if (length != nullptr)
*length = ret().size() + 1;
if (info != nullptr)
memcpy(info, ret().c_str(), ret().size() + 1);
if (platforms != nullptr)
*platforms = ret.platforms;
if (devices != nullptr)
for (int i : ret.devices)
*(devices++) = i;
#else
std::string ret = "OpenCL is not supported";
if (length != nullptr)
*length = ret.size() + 1;
if (info != nullptr)
memcpy(info, ret.c_str(), ret.size() + 1);
if (platforms != nullptr)
*platforms = 0;
#endif
}
ac_bool acIsInitializedGPU(void)
{
#ifdef ENABLE_OPENCL
return ac_bool(Anime4KCPP::OpenCL::Anime4K09::isInitialized());
#else
return AC_FALSE;
#endif
}
ac_bool acIsInitializedGPUCNN(void)
{
#ifdef ENABLE_OPENCL
return ac_bool(Anime4KCPP::OpenCL::ACNet::isInitialized());
#else
return AC_FALSE;
#endif
}
void acGetLastCoreErrorString(char* err, size_t* length)
{
if (length != nullptr)
*length = lastCoreError.size() + 1;
if (err != nullptr)
memcpy(err, lastCoreError.c_str(), lastCoreError.size() + 1);
}
void acBenchmark(const int pID, const int dID, double* CPUScore, double* GPUScore)
{
double _CPUScore = Anime4KCPP::benchmark<Anime4KCPP::CPU::ACNet, 1920, 1080>();
#ifdef ENABLE_OPENCL
double _OpenCLScore = Anime4KCPP::benchmark<Anime4KCPP::OpenCL::ACNet, 1920, 1080>(pID, dID, Anime4KCPP::CNNType::ACNetHDNL0);
#else
double _OpenCLScore = 0.0;
#endif
*CPUScore = _CPUScore;
*GPUScore = _OpenCLScore;
}
double acBenchmark2(ac_processType processType, const int pID, const int dID)
{
switch (processType)
{
case AC_CPU_Anime4K09:
return Anime4KCPP::benchmark<Anime4KCPP::CPU::Anime4K09, 1920, 1080>();
case AC_CPU_ACNet:
return Anime4KCPP::benchmark<Anime4KCPP::CPU::ACNet, 1920, 1080>();
#ifdef ENABLE_OPENCL
case AC_OpenCL_Anime4K09:
return Anime4KCPP::benchmark<Anime4KCPP::OpenCL::Anime4K09, 1920, 1080>(pID, dID);
case AC_OpenCL_ACNet:
return Anime4KCPP::benchmark<Anime4KCPP::OpenCL::ACNet, 1920, 1080>(pID, dID, Anime4KCPP::CNNType::ACNetHDNL0);
#endif // ENABLE_OPENCL
#ifdef ENABLE_CUDA
case AC_Cuda_Anime4K09:
return Anime4KCPP::benchmark<Anime4KCPP::Cuda::Anime4K09, 1920, 1080>(dID);
case AC_Cuda_ACNet:
return Anime4KCPP::benchmark<Anime4KCPP::Cuda::ACNet, 1920, 1080>(dID);
#endif // ENABLE_CUDA
default:
return 0.0;
}
}
ac_processType acGetProcessType(ac_instance instance, ac_error* error)
{
if (error != nullptr)
*error = AC_OK;
if (instance == nullptr)
{
if (error != nullptr)
*error = AC_ERROR_NULL_INSTANCE;
return AC_CPU_Anime4K09;
}
Anime4KCPP::Processor::Type type = reinterpret_cast<Anime4KCPP::AC*>(instance)->getProcessorType();
switch (type)
{
case Anime4KCPP::Processor::Type::CPU_Anime4K09:
return AC_CPU_Anime4K09;
case Anime4KCPP::Processor::Type::CPU_ACNet:
return AC_CPU_ACNet;
#ifdef ENABLE_OPENCL
case Anime4KCPP::Processor::Type::OpenCL_Anime4K09:
return AC_OpenCL_Anime4K09;
case Anime4KCPP::Processor::Type::OpenCL_ACNet:
return AC_OpenCL_ACNet;
#endif
#ifdef ENABLE_CUDA
case Anime4KCPP::Processor::Type::Cuda_Anime4K09:
return AC_Cuda_Anime4K09;
case Anime4KCPP::Processor::Type::Cuda_ACNet:
return AC_Cuda_ACNet;
#endif
default:
return AC_CPU_Anime4K09;
}
}
ac_error acGetProcessorInfo(ac_instance instance, char* info, size_t* length)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (info == nullptr && length == nullptr)
return AC_OK;
std::string ret = reinterpret_cast<Anime4KCPP::AC*>(instance)->getProcessorInfo();
if (length != nullptr)
*length = ret.size() + 1;
if (info != nullptr)
memcpy(info, ret.c_str(), ret.size() + 1);
return AC_OK;
}
ac_error acSaveImageBufferSize(ac_instance instance, size_t* dataSize, size_t dstStride)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (dataSize != nullptr)
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImageBufferSize(*dataSize, dstStride);
else
return AC_ERROR_NULL_DATA;
return AC_OK;
}
ac_error acSaveImageBufferSizeRGB(ac_instance instance,
size_t* rSize, size_t dstStrideR,
size_t* gSize, size_t dstStrideG,
size_t* bSize, size_t dstStrideB)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (rSize != nullptr && gSize != nullptr && bSize != nullptr)
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImageBufferSize(
*rSize, dstStrideR,
*gSize, dstStrideG,
*bSize, dstStrideB);
else
return AC_ERROR_NULL_DATA;
return AC_OK;
}
ac_error saveImageShape(ac_instance instance, int* cols, int* rows, int* channels)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
if (cols != nullptr && rows != nullptr && channels != nullptr)
reinterpret_cast<Anime4KCPP::AC*>(instance)->saveImageShape(*cols, *rows, *channels);
else
return AC_ERROR_NULL_DATA;
return AC_OK;
}
#ifdef ENABLE_VIDEO
ac_videoProcessor acGetVideoProcessor(ac_parameters* parameters, ac_processType type, ac_error* error)
{
return acGetVideoProcessorWithThreads(parameters, type, 0, error);
}
ac_videoProcessor acGetVideoProcessorWithThreads(ac_parameters* parameters, ac_processType type, unsigned int threads, ac_error* error)
{
if (error != nullptr)
*error = AC_OK;
return reinterpret_cast<ac_instance>(new Anime4KCPP::VideoProcessor(getParameters(parameters), getProcessorType(type, error), threads));
}
ac_videoProcessor acGetVideoProcessorFromInstance(ac_instance instance)
{
return acGetVideoProcessorFromInstanceWithThreads(instance, 0);
}
ac_videoProcessor acGetVideoProcessorFromInstanceWithThreads(ac_instance instance, unsigned int threads)
{
return reinterpret_cast<ac_instance>(new Anime4KCPP::VideoProcessor(*reinterpret_cast<Anime4KCPP::AC*>(instance), threads));
}
void acFreeVideoProcessor(ac_videoProcessor instance)
{
if (instance != nullptr)
{
delete reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance);
instance = nullptr;
}
}
ac_error acLoadVideo(ac_videoProcessor instance, const char* srcFile)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->loadVideo(srcFile);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_LOAD_VIDEO;
}
return AC_OK;
}
ac_error acProcessVideo(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->process();
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_GPU_PROCESS;
}
return AC_OK;
}
ac_error acProcessWithPrintProgress(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
auto s = std::chrono::steady_clock::now();
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->processWithProgress(
[&s](double progress)
{
auto e = std::chrono::steady_clock::now();
double currTime = std::chrono::duration_cast<std::chrono::milliseconds>(e - s).count() / 1000.0;
std::fprintf(stderr,
"%7.2f%% elpsed: %8.2fs remaining: %8.2fs\r",
progress * 100,
currTime,
currTime / progress - currTime);
if (progress == 1.0)
std::putc('\n', stderr);
});
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_GPU_PROCESS;
}
return AC_OK;
}
ac_error acProcessWithProgress(ac_videoProcessor instance, void (*callBack)(double))
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->processWithProgress(callBack);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_GPU_PROCESS;
}
return AC_OK;
}
ac_error acProcessWithProgressTime(ac_videoProcessor instance, void (*callBack)(double, double))
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
time_t start = time(nullptr);
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->processWithProgress(
[&callBack, &start](double v)
{
callBack(v, static_cast<double>(time(nullptr) - start));
});
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_GPU_PROCESS;
}
return AC_OK;
}
ac_error acStopVideoProcess(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->stopVideoProcess();
return AC_OK;
}
ac_error acPauseVideoProcess(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->pauseVideoProcess();
return AC_OK;
}
ac_error acContinueVideoProcess(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->continueVideoProcess();
return AC_OK;
}
ac_error acSetSaveVideoInfo(ac_videoProcessor instance, const char* dstFile, ac_codec codec, double fps)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
try
{
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->setVideoSaveInfo(dstFile, Anime4KCPP::Codec(codec), fps);
}
catch (const std::exception& err)
{
lastCoreError = err.what();
return AC_ERROR_INIT_VIDEO_WRITER;
}
return AC_OK;
}
ac_error acSaveVideo(ac_videoProcessor instance)
{
if (instance == nullptr)
return AC_ERROR_NULL_INSTANCE;
reinterpret_cast<Anime4KCPP::VideoProcessor*>(instance)->saveVideo();
return AC_OK;
}
#endif
}
| 16,742 |
474 | <reponame>chenqian-dev/QNShortVideo-TuTu<filename>QNShortVideo-With-TuTu-iOS/tusdkfilterprocessormodule/tusdkfilterprocessormodule/TuSDKFramework/TuSDK.framework/Versions/A/Headers/SLGPUImageTwoInputFilter.h
#import "SLGPUImageFilter.h"
extern NSString *const LSQKGPUImageTwoInputTextureVertexShaderString;
@interface SLGPUImageTwoInputFilter : SLGPUImageFilter
{
SLGPUImageFramebuffer *secondInputFramebuffer;
GLint filterSecondTextureCoordinateAttribute;
GLint filterInputTextureUniform2;
LSQGPUImageRotationMode inputRotation2;
CMTime firstFrameTime, secondFrameTime;
BOOL hasSetFirstTexture, hasReceivedFirstFrame, hasReceivedSecondFrame, firstFrameWasVideo, secondFrameWasVideo;
BOOL firstFrameCheckDisabled, secondFrameCheckDisabled;
}
- (void)disableFirstFrameCheck;
- (void)disableSecondFrameCheck;
@end
| 283 |
434 | /*
* mkdlinkfw
*
* Copyright (C) 2018 <NAME> <<EMAIL>>
*
* This tool is based on mktplinkfw.
* Copyright (C) 2009 <NAME> <<EMAIL>>
* Copyright (C) 2008,2009 <NAME> <<EMAIL>>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#ifndef mkdlinkfw_lib_h
#define mkdlinkfw_lib_h
#define AUH_MAGIC "DLK"
#define AUH_SIZE 80
#define AUH_LVPS 0x01
#define AUH_HDR_ID 0x4842
#define AUH_HDR_VER 0x02
#define AUH_SEC_ID 0x04
#define AUH_INFO_TYPE 0x04
#define STAG_SIZE 16
#define STAG_ID 0x04
#define STAG_MAGIC 0x2B24
#define STAG_CMARK_FACTORY 0xFF
#define SCH2_SIZE 40
#define SCH2_MAGIC 0x2124
#define SCH2_VER 0x02
#define FLAT 0
#define JZ 1
#define GZIP 2
#define LZMA 3
#define RAM_ENTRY_ADDR 0x80000000
#define RAM_LOAD_ADDR 0x80000000
#define JBOOT_SIZE 0x10000
#define ALL_HEADERS_SIZE (AUH_SIZE + STAG_SIZE + SCH2_SIZE)
#define MAX_HEADER_COUNTER 10
#define TIMESTAMP_MAGIC 0x35016f00L
#define FACTORY 0
#define SYSUPGRADE 1
#define ALIGN(x, a) ({ typeof(a) __a = (a); (((x) + __a - 1) & ~(__a - 1)); })
#define ERR(fmt, ...) do { \
fflush(0); \
fprintf(stderr, "[%s] *** error: " fmt "\n", \
progname, ## __VA_ARGS__); \
} while (0)
#define ERRS(fmt, ...) do { \
int save = errno; \
fflush(0); \
fprintf(stderr, "[%s] *** error: " fmt ": %s\n", \
progname, ## __VA_ARGS__, strerror(save)); \
} while (0)
#define DBG(fmt, ...) do { \
fprintf(stderr, "[%s] " fmt "\n", progname, ## __VA_ARGS__); \
} while (0)
struct file_info {
char *file_name; /* name of the file */
uint32_t file_size; /* length of the file */
};
uint32_t jboot_timestamp(void);
uint16_t jboot_checksum(uint16_t start_val, uint16_t *data, int size);
int get_file_stat(struct file_info *fdata);
int read_to_buf(const struct file_info *fdata, char *buf);
int pad_jffs2(char *buf, int currlen, int maxlen);
int write_fw(const char *ofname, const char *data, int len);
#endif /* mkdlinkfw_lib_h */
| 885 |
679 | <reponame>Grosskopf/openoffice<filename>main/extensions/source/abpilot/abspilot.cxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_extensions.hxx"
#include "abspilot.hxx"
#include "abpilot.hrc"
#include "abpresid.hrc"
#include "componentmodule.hxx"
#include <tools/debug.hxx>
#include <svtools/localresaccess.hxx>
#include "typeselectionpage.hxx"
#include "admininvokationpage.hxx"
#include "tableselectionpage.hxx"
#include <vcl/waitobj.hxx>
#include <vcl/msgbox.hxx>
#include "abpfinalpage.hxx"
#include "fieldmappingpage.hxx"
#include "fieldmappingimpl.hxx"
//.........................................................................
namespace abp
{
//.........................................................................
#define STATE_SELECT_ABTYPE 0
#define STATE_INVOKE_ADMIN_DIALOG 1
#define STATE_TABLE_SELECTION 2
#define STATE_MANUAL_FIELD_MAPPING 3
#define STATE_FINAL_CONFIRM 4
#define PATH_COMPLETE 1
#define PATH_NO_SETTINGS 2
#define PATH_NO_FIELDS 3
#define PATH_NO_SETTINGS_NO_FIELDS 4
using namespace ::svt;
using namespace ::com::sun::star::uno;
using namespace ::com::sun::star::lang;
//=====================================================================
//= OAddessBookSourcePilot
//=====================================================================
//---------------------------------------------------------------------
OAddessBookSourcePilot::OAddessBookSourcePilot(Window* _pParent, const Reference< XMultiServiceFactory >& _rxORB)
:OAddessBookSourcePilot_Base( _pParent, ModuleRes( RID_DLG_ADDRESSBOOKSOURCEPILOT ),
WZB_HELP | WZB_FINISH | WZB_CANCEL | WZB_NEXT | WZB_PREVIOUS )
,m_xORB(_rxORB)
,m_aNewDataSource(_rxORB)
,m_eNewDataSourceType( AST_INVALID )
{
SetPageSizePixel(LogicToPixel(Size(WINDOW_SIZE_X, WINDOW_SIZE_Y), MAP_APPFONT));
ShowButtonFixedLine(sal_True);
declarePath( PATH_COMPLETE,
STATE_SELECT_ABTYPE,
STATE_INVOKE_ADMIN_DIALOG,
STATE_TABLE_SELECTION,
STATE_MANUAL_FIELD_MAPPING,
STATE_FINAL_CONFIRM,
WZS_INVALID_STATE
);
declarePath( PATH_NO_SETTINGS,
STATE_SELECT_ABTYPE,
STATE_TABLE_SELECTION,
STATE_MANUAL_FIELD_MAPPING,
STATE_FINAL_CONFIRM,
WZS_INVALID_STATE
);
declarePath( PATH_NO_FIELDS,
STATE_SELECT_ABTYPE,
STATE_INVOKE_ADMIN_DIALOG,
STATE_TABLE_SELECTION,
STATE_FINAL_CONFIRM,
WZS_INVALID_STATE
);
declarePath( PATH_NO_SETTINGS_NO_FIELDS,
STATE_SELECT_ABTYPE,
STATE_TABLE_SELECTION,
STATE_FINAL_CONFIRM,
WZS_INVALID_STATE
);
m_pPrevPage->SetHelpId(HID_ABSPILOT_PREVIOUS);
m_pNextPage->SetHelpId(HID_ABSPILOT_NEXT);
m_pCancel->SetHelpId(HID_ABSPILOT_CANCEL);
m_pFinish->SetHelpId(HID_ABSPILOT_FINISH);
m_pHelp->SetUniqueId(UID_ABSPILOT_HELP);
m_pCancel->SetClickHdl( LINK( this, OAddessBookSourcePilot, OnCancelClicked) );
// some initial settings
#ifdef MACOSX
m_aSettings.eType = AST_MACAB;
#elif WITH_MOZILLA
#ifdef UNX
m_aSettings.eType = AST_MORK;
#else
m_aSettings.eType = AST_OE;
#endif
#else
m_aSettings.eType = AST_OTHER;
#endif
m_aSettings.sDataSourceName = String(ModuleRes(RID_STR_DEFAULT_NAME));
m_aSettings.bRegisterDataSource = false;
m_aSettings.bIgnoreNoTable = false;
defaultButton(WZB_NEXT);
enableButtons(WZB_FINISH, sal_False);
ActivatePage();
typeSelectionChanged( m_aSettings.eType );
}
//---------------------------------------------------------------------
String OAddessBookSourcePilot::getStateDisplayName( WizardState _nState ) const
{
sal_uInt16 nResId = 0;
switch ( _nState )
{
case STATE_SELECT_ABTYPE: nResId = STR_SELECT_ABTYPE; break;
case STATE_INVOKE_ADMIN_DIALOG: nResId = STR_INVOKE_ADMIN_DIALOG; break;
case STATE_TABLE_SELECTION: nResId = STR_TABLE_SELECTION; break;
case STATE_MANUAL_FIELD_MAPPING: nResId = STR_MANUAL_FIELD_MAPPING; break;
case STATE_FINAL_CONFIRM: nResId = STR_FINAL_CONFIRM; break;
}
DBG_ASSERT( nResId, "OAddessBookSourcePilot::getStateDisplayName: don't know this state!" );
String sDisplayName;
if ( nResId )
{
svt::OLocalResourceAccess aAccess( ModuleRes( RID_DLG_ADDRESSBOOKSOURCEPILOT ), RSC_MODALDIALOG );
sDisplayName = String( ModuleRes( nResId ) );
}
return sDisplayName;
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::implCommitAll()
{
// in real, the data source already exists in the data source context
// Thus, if the user changed the name, we have to rename the data source
if ( m_aSettings.sDataSourceName != m_aNewDataSource.getName() )
m_aNewDataSource.rename( m_aSettings.sDataSourceName );
// 1. the data source
m_aNewDataSource.store();
// 2. check if we need to register the data source
if ( m_aSettings.bRegisterDataSource )
m_aNewDataSource.registerDataSource(m_aSettings.sRegisteredDataSourceName);
// 3. write the data source / table names into the configuration
addressconfig::writeTemplateAddressSource( getORB(), m_aSettings.bRegisterDataSource ? m_aSettings.sRegisteredDataSourceName : m_aSettings.sDataSourceName, m_aSettings.sSelectedTable );
// 4. write the field mapping
fieldmapping::writeTemplateAddressFieldMapping( getORB(), m_aSettings.aFieldMapping );
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::implCleanup()
{
if ( m_aNewDataSource.isValid() )
m_aNewDataSource.remove();
}
//---------------------------------------------------------------------
IMPL_LINK( OAddessBookSourcePilot, OnCancelClicked, void*, /*NOTINTERESTEDIN*/ )
{
// do cleanups
implCleanup();
// reset the click hdl
m_pCancel->SetClickHdl( Link() );
// simulate the click again - this time, the default handling of the button will strike ....
m_pCancel->Click();
return 0L;
}
//---------------------------------------------------------------------
sal_Bool OAddessBookSourcePilot::Close()
{
implCleanup();
return OAddessBookSourcePilot_Base::Close();
}
//---------------------------------------------------------------------
sal_Bool OAddessBookSourcePilot::onFinish()
{
if ( !OAddessBookSourcePilot_Base::onFinish() )
return sal_False;
implCommitAll();
addressconfig::markPilotSuccess( getORB() );
return sal_True;
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::enterState( WizardState _nState )
{
switch ( _nState )
{
case STATE_SELECT_ABTYPE:
impl_updateRoadmap( static_cast< TypeSelectionPage* >( GetPage( STATE_SELECT_ABTYPE ) )->getSelectedType() );
break;
case STATE_FINAL_CONFIRM:
if ( !needManualFieldMapping( ) )
implDoAutoFieldMapping();
break;
case STATE_TABLE_SELECTION:
implDefaultTableName();
break;
}
OAddessBookSourcePilot_Base::enterState(_nState);
}
//---------------------------------------------------------------------
sal_Bool OAddessBookSourcePilot::prepareLeaveCurrentState( CommitPageReason _eReason )
{
if ( !OAddessBookSourcePilot_Base::prepareLeaveCurrentState( _eReason ) )
return sal_False;
if ( _eReason == eTravelBackward )
return sal_True;
sal_Bool bAllow = sal_True;
switch ( getCurrentState() )
{
case STATE_SELECT_ABTYPE:
implCreateDataSource();
if ( needAdminInvokationPage() )
break;
// no break here
case STATE_INVOKE_ADMIN_DIALOG:
if ( !connectToDataSource( sal_False ) )
{
// connecting did not succeed -> do not allow proceeding
bAllow = sal_False;
break;
}
// ........................................................
// now that we connected to the data source, check whether we need the "table selection" page
const StringBag& aTables = m_aNewDataSource.getTableNames();
if ( aTables.empty() )
{
if ( RET_YES != QueryBox( this, ModuleRes( RID_QRY_NOTABLES ) ).Execute() )
{
// cannot ask the user, or the user chose to use this data source, though there are no tables
bAllow = sal_False;
break;
}
m_aSettings.bIgnoreNoTable = true;
}
if ( aTables.size() == 1 )
// remember the one and only table we have
m_aSettings.sSelectedTable = *aTables.begin();
break;
}
impl_updateRoadmap( m_aSettings.eType );
return bAllow;
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::implDefaultTableName()
{
const StringBag& rTableNames = getDataSource().getTableNames();
if ( rTableNames.end() != rTableNames.find( getSettings().sSelectedTable ) )
// already a valid table selected
return;
const sal_Char* pGuess = NULL;
switch ( getSettings().eType )
{
case AST_MORK :
case AST_THUNDERBIRD : pGuess = "Personal Address book"; break;
case AST_LDAP : pGuess = "LDAP Directory"; break;
case AST_EVOLUTION :
case AST_EVOLUTION_GROUPWISE:
case AST_EVOLUTION_LDAP : pGuess = "Personal"; break;
default:
DBG_ERROR( "OAddessBookSourcePilot::implDefaultTableName: unhandled case!" );
return;
}
const ::rtl::OUString sGuess = ::rtl::OUString::createFromAscii( pGuess );
if ( rTableNames.end() != rTableNames.find( sGuess ) )
getSettings().sSelectedTable = sGuess;
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::implDoAutoFieldMapping()
{
DBG_ASSERT( !needManualFieldMapping( ), "OAddessBookSourcePilot::implDoAutoFieldMapping: invalid call!" );
fieldmapping::defaultMapping( getORB(), m_aSettings.aFieldMapping );
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::implCreateDataSource()
{
if (m_aNewDataSource.isValid())
{ // we already have a data source object
if ( m_aSettings.eType == m_eNewDataSourceType )
// and it already has the correct type
return;
// it has a wrong type -> remove it
m_aNewDataSource.remove();
}
ODataSourceContext aContext( getORB() );
aContext.disambiguate( m_aSettings.sDataSourceName );
switch (m_aSettings.eType)
{
case AST_MORK:
m_aNewDataSource = aContext.createNewMORK( m_aSettings.sDataSourceName );
break;
case AST_THUNDERBIRD:
m_aNewDataSource = aContext.createNewThunderbird( m_aSettings.sDataSourceName );
break;
case AST_EVOLUTION:
m_aNewDataSource = aContext.createNewEvolution( m_aSettings.sDataSourceName );
break;
case AST_EVOLUTION_GROUPWISE:
m_aNewDataSource = aContext.createNewEvolutionGroupwise( m_aSettings.sDataSourceName );
break;
case AST_EVOLUTION_LDAP:
m_aNewDataSource = aContext.createNewEvolutionLdap( m_aSettings.sDataSourceName );
break;
case AST_KAB:
m_aNewDataSource = aContext.createNewKab( m_aSettings.sDataSourceName );
break;
case AST_MACAB:
m_aNewDataSource = aContext.createNewMacab( m_aSettings.sDataSourceName );
break;
case AST_LDAP:
m_aNewDataSource = aContext.createNewLDAP( m_aSettings.sDataSourceName );
break;
case AST_OUTLOOK:
m_aNewDataSource = aContext.createNewOutlook( m_aSettings.sDataSourceName );
break;
case AST_OE:
m_aNewDataSource = aContext.createNewOE( m_aSettings.sDataSourceName );
break;
case AST_OTHER:
m_aNewDataSource = aContext.createNewDBase( m_aSettings.sDataSourceName );
break;
case AST_INVALID:
DBG_ERROR( "OAddessBookSourcePilot::implCreateDataSource: illegal data source type!" );
break;
}
m_eNewDataSourceType = m_aSettings.eType;
}
//---------------------------------------------------------------------
sal_Bool OAddessBookSourcePilot::connectToDataSource( sal_Bool _bForceReConnect )
{
DBG_ASSERT( m_aNewDataSource.isValid(), "OAddessBookSourcePilot::implConnect: invalid current data source!" );
WaitObject aWaitCursor( this );
if ( _bForceReConnect && m_aNewDataSource.isConnected( ) )
m_aNewDataSource.disconnect( );
return m_aNewDataSource.connect( this );
}
//---------------------------------------------------------------------
OWizardPage* OAddessBookSourcePilot::createPage(WizardState _nState)
{
switch (_nState)
{
case STATE_SELECT_ABTYPE:
return new TypeSelectionPage( this );
case STATE_INVOKE_ADMIN_DIALOG:
return new AdminDialogInvokationPage( this );
case STATE_TABLE_SELECTION:
return new TableSelectionPage( this );
case STATE_MANUAL_FIELD_MAPPING:
return new FieldMappingPage( this );
case STATE_FINAL_CONFIRM:
return new FinalPage( this );
default:
DBG_ERROR("OAddessBookSourcePilot::createPage: invalid state!");
return NULL;
}
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::impl_updateRoadmap( AddressSourceType _eType )
{
bool bSettingsPage = needAdminInvokationPage( _eType );
bool bTablesPage = needTableSelection( _eType );
bool bFieldsPage = needManualFieldMapping( _eType );
bool bConnected = m_aNewDataSource.isConnected();
bool bCanSkipTables =
( m_aNewDataSource.hasTable( m_aSettings.sSelectedTable )
|| m_aSettings.bIgnoreNoTable
);
enableState( STATE_INVOKE_ADMIN_DIALOG, bSettingsPage );
enableState( STATE_TABLE_SELECTION,
bTablesPage && ( bConnected ? !bCanSkipTables : !bSettingsPage )
// if we do not need a settings page, we connect upon "Next" on the first page
);
enableState( STATE_MANUAL_FIELD_MAPPING,
bFieldsPage && bConnected && m_aNewDataSource.hasTable( m_aSettings.sSelectedTable )
);
enableState( STATE_FINAL_CONFIRM,
bConnected && bCanSkipTables
);
}
//---------------------------------------------------------------------
void OAddessBookSourcePilot::typeSelectionChanged( AddressSourceType _eType )
{
PathId nCurrentPathID( PATH_COMPLETE );
bool bSettingsPage = needAdminInvokationPage( _eType );
bool bFieldsPage = needManualFieldMapping( _eType );
if ( !bSettingsPage )
if ( !bFieldsPage )
nCurrentPathID = PATH_NO_SETTINGS_NO_FIELDS;
else
nCurrentPathID = PATH_NO_SETTINGS;
else
if ( !bFieldsPage )
nCurrentPathID = PATH_NO_FIELDS;
else
nCurrentPathID = PATH_COMPLETE;
activatePath( nCurrentPathID, true );
m_aNewDataSource.disconnect();
m_aSettings.bIgnoreNoTable = false;
impl_updateRoadmap( _eType );
}
//.........................................................................
} // namespace abp
//.........................................................................
| 6,548 |
1,444 | <reponame>GabrielSturtevant/mage<filename>Mage.Sets/src/mage/cards/r/Reshape.java
package mage.cards.r;
import java.util.UUID;
import mage.abilities.costs.common.SacrificeTargetCost;
import mage.abilities.effects.common.search.SearchLibraryWithLessCMCPutInPlayEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.filter.common.FilterArtifactCard;
import mage.filter.common.FilterControlledArtifactPermanent;
import mage.target.common.TargetControlledPermanent;
/**
*
* @author jonubuu
*/
public final class Reshape extends CardImpl {
public Reshape(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.SORCERY}, "{X}{U}{U}");
// As an additional cost to cast Reshape, sacrifice an artifact.
this.getSpellAbility().addCost(new SacrificeTargetCost(new TargetControlledPermanent(1, 1, new FilterControlledArtifactPermanent("an artifact"), false)));
// Search your library for an artifact card with converted mana cost X or less and put it onto the battlefield. Then shuffle your library.
this.getSpellAbility().addEffect(new SearchLibraryWithLessCMCPutInPlayEffect(new FilterArtifactCard()));
}
private Reshape(final Reshape card) {
super(card);
}
@Override
public Reshape copy() {
return new Reshape(this);
}
}
| 479 |
7,892 | <filename>lib-src/libsbsms/include/sbsms.h
// -*- mode: c++ -*-
#ifndef SBSMS_INCLUDE
#define SBSMS_INCLUDE
#include <stdio.h>
namespace _sbsms_ {
typedef float t_fft[2];
typedef t_fft audio;
typedef long long int SampleCountType;
typedef long long int TimeType;
typedef unsigned char TrackIndexType;
enum {
maxBands = 10,
numQualityParams = 52
};
struct SBSMSQualityParams {
int bands;
int H;
int N[maxBands];
int N0[maxBands];
int N1[maxBands];
int N2[maxBands];
int res[maxBands];
};
class SBSMSQuality {
public:
SBSMSQuality(const SBSMSQualityParams *params);
SBSMSQualityParams params;
long getFrameSize();
long getMaxPresamples();
};
extern const SBSMSQualityParams SBSMSQualityStandard;
struct SBSMSFrame {
float ratio0;
float ratio1;
audio *buf;
long size;
};
typedef long (*SBSMSResampleCB)(void *cbData, SBSMSFrame *frame);
class SBSMSInterface /* not final */ {
public:
virtual ~SBSMSInterface() {}
virtual long samples(audio *buf, long n) { return 0; }
virtual float getStretch(float t)=0;
virtual float getMeanStretch(float t0, float t1)=0;
virtual float getPitch(float t)=0;
virtual long getPresamples()=0;
virtual SampleCountType getSamplesToInput()=0;
virtual SampleCountType getSamplesToOutput()=0;
};
class SBSMSTrackPoint {
public:
virtual ~SBSMSTrackPoint() {}
virtual float getF()=0;
virtual float getM()=0;
virtual float getPhase()=0;
};
class SBSMSTrack {
public:
virtual ~SBSMSTrack() {}
virtual SBSMSTrackPoint *getSBSMSTrackPoint(const TimeType &time)=0;
virtual TrackIndexType getIndex()=0;
virtual bool isFirst(const TimeType &synthtime)=0;
virtual bool isLast(const TimeType &synthtime)=0;
};
class SBSMSRenderer {
public:
virtual ~SBSMSRenderer() {}
virtual void startFrame() {}
virtual void startTime(int c, const TimeType &time, int n) {}
virtual void render(int c, SBSMSTrack *t) {}
virtual void endTime(int c) {}
virtual void endFrame() {}
virtual void end(const SampleCountType &samples) {}
};
enum SBSMSError {
SBSMSErrorNone = 0,
SBSMSErrorInvalidRate
};
class SBSMSImp;
class SBSMS {
public:
SBSMS(int channels, SBSMSQuality *quality, bool bSynthesize);
~SBSMS();
long read(SBSMSInterface *iface, audio *buf, long n);
void addRenderer(SBSMSRenderer *renderer);
void removeRenderer(SBSMSRenderer *renderer);
long renderFrame(SBSMSInterface *iface);
long getInputFrameSize();
SBSMSError getError();
friend class SBSMSImp;
protected:
SBSMSImp *imp;
};
enum SlideType {
SlideIdentity = 0,
SlideConstant,
SlideLinearInputRate,
SlideLinearOutputRate,
SlideLinearInputStretch,
SlideLinearOutputStretch,
SlideGeometricInput,
SlideGeometricOutput
};
class SlideImp;
class Slide {
public:
Slide(SlideType slideType, float rate0 = 1.0f, float rate1 = 1.0f, const SampleCountType &n = 0);
~Slide();
float getTotalStretch();
float getStretchedTime(float t);
float getInverseStretchedTime(float t);
float getRate(float t);
float getStretch(float t);
float getMeanStretch(float t0, float t1);
float getRate();
float getStretch();
void step();
protected:
SlideImp *imp;
};
class SBSMSInterfaceSlidingImp;
class SBSMSInterfaceSliding /* not final */ : public SBSMSInterface {
public:
SBSMSInterfaceSliding(Slide *rateSlide,
Slide *pitchSlide,
bool bPitchReferenceInput,
const SampleCountType &samplesToInput,
long preSamples,
SBSMSQuality *quality);
virtual ~SBSMSInterfaceSliding();
virtual float getStretch(float t);
virtual float getMeanStretch(float t0, float t1);
virtual float getPitch(float t);
virtual long getPresamples();
virtual SampleCountType getSamplesToInput();
virtual SampleCountType getSamplesToOutput();
friend class SBSMSInterfaceSlidingImp;
protected:
SBSMSInterfaceSlidingImp *imp;
};
class ResamplerImp;
class Resampler {
public:
Resampler(SBSMSResampleCB func, void *data, SlideType slideType = SlideConstant);
~Resampler();
long read(audio *audioOut, long frames);
void reset();
long samplesInOutput();
protected:
ResamplerImp *imp;
};
}
#endif
| 1,575 |
1,355 | <filename>src/python/grid_fractional_boundary_condition_solver.cpp<gh_stars>1000+
// Copyright (c) 2018 <NAME>
//
// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.
#include "grid_fractional_boundary_condition_solver.h"
#include "pybind11_utils.h"
#include <jet/grid_fractional_boundary_condition_solver2.h>
#include <jet/grid_fractional_boundary_condition_solver3.h>
namespace py = pybind11;
using namespace jet;
void addGridFractionalBoundaryConditionSolver2(py::module& m) {
py::class_<GridFractionalBoundaryConditionSolver2,
GridFractionalBoundaryConditionSolver2Ptr,
GridBoundaryConditionSolver2>(
m, "GridFractionalBoundaryConditionSolver2",
R"pbdoc(
Fractional 2-D boundary condition solver for grids.
This class constrains the velocity field by projecting the flow to the
signed-distance field representation of the collider. This implementation
should pair up with GridFractionalSinglePhasePressureSolver2 to provide
sub-grid resolutional velocity projection.
)pbdoc")
.def(py::init<>())
.def(
"constrainVelocity",
[](GridFractionalBoundaryConditionSolver2& instance,
FaceCenteredGrid2Ptr velocity, unsigned int extrapolationDepth) {
instance.constrainVelocity(velocity.get(), extrapolationDepth);
},
R"pbdoc(
Constrains the velocity field to conform the collider boundary.
Parameters
----------
- velocity : Input and output velocity grid.
- extrapolationDepth : Number of inner-collider grid cells that
velocity will get extrapolated.
)pbdoc",
py::arg("velocity"), py::arg("extrapolationDepth") = 5)
.def_property_readonly(
"colliderSdf", &GridFractionalBoundaryConditionSolver2::collider,
R"pbdoc(
Signed distance field of the collider.
)pbdoc")
.def_property_readonly(
"colliderVelocityField",
&GridFractionalBoundaryConditionSolver2::colliderVelocityField,
R"pbdoc(
Velocity field of the collider.
)pbdoc");
}
void addGridFractionalBoundaryConditionSolver3(py::module& m) {
py::class_<GridFractionalBoundaryConditionSolver3,
GridFractionalBoundaryConditionSolver3Ptr,
GridBoundaryConditionSolver3>(
m, "GridFractionalBoundaryConditionSolver3",
R"pbdoc(
Fractional 3-D boundary condition solver for grids.
This class constrains the velocity field by projecting the flow to the
signed-distance field representation of the collider. This implementation
should pair up with GridFractionalSinglePhasePressureSolver3 to provide
sub-grid resolutional velocity projection.
)pbdoc")
.def(py::init<>())
.def(
"constrainVelocity",
[](GridFractionalBoundaryConditionSolver3& instance,
FaceCenteredGrid3Ptr velocity, unsigned int extrapolationDepth) {
instance.constrainVelocity(velocity.get(), extrapolationDepth);
},
R"pbdoc(
Constrains the velocity field to conform the collider boundary.
Parameters
----------
- velocity : Input and output velocity grid.
- extrapolationDepth : Number of inner-collider grid cells that
velocity will get extrapolated.
)pbdoc",
py::arg("velocity"), py::arg("extrapolationDepth") = 5)
.def_property_readonly(
"colliderSdf", &GridFractionalBoundaryConditionSolver3::collider,
R"pbdoc(
Signed distance field of the collider.
)pbdoc")
.def_property_readonly(
"colliderVelocityField",
&GridFractionalBoundaryConditionSolver3::colliderVelocityField,
R"pbdoc(
Velocity field of the collider.
)pbdoc");
}
| 1,825 |
1,155 | <gh_stars>1000+
/*
*******************************************************************************
* Copyright (C) 2003-2014, International Business Machines
* Corporation and others. All Rights Reserved.
*******************************************************************************
*
* File prscmnts.cpp
*
* Modification History:
*
* Date Name Description
* 08/22/2003 ram Creation.
*******************************************************************************
*/
// Safer use of UnicodeString.
#ifndef UNISTR_FROM_CHAR_EXPLICIT
# define UNISTR_FROM_CHAR_EXPLICIT explicit
#endif
// Less important, but still a good idea.
#ifndef UNISTR_FROM_STRING_EXPLICIT
# define UNISTR_FROM_STRING_EXPLICIT explicit
#endif
#include "unicode/regex.h"
#include "unicode/unistr.h"
#include "unicode/parseerr.h"
#include "prscmnts.h"
#include <stdio.h>
#include <stdlib.h>
U_NAMESPACE_USE
#if UCONFIG_NO_REGULAR_EXPRESSIONS==0 /* donot compile when RegularExpressions not available */
#define MAX_SPLIT_STRINGS 20
const char *patternStrings[UPC_LIMIT]={
"^translate\\s*(.*)",
"^note\\s*(.*)"
};
U_CFUNC int32_t
removeText(UChar *source, int32_t srcLen,
UnicodeString patString,uint32_t options,
UnicodeString replaceText, UErrorCode *status){
if(status == NULL || U_FAILURE(*status)){
return 0;
}
UnicodeString src(source, srcLen);
RegexMatcher myMatcher(patString, src, options, *status);
if(U_FAILURE(*status)){
return 0;
}
UnicodeString dest;
dest = myMatcher.replaceAll(replaceText,*status);
return dest.extract(source, srcLen, *status);
}
U_CFUNC int32_t
trim(UChar *src, int32_t srcLen, UErrorCode *status){
srcLen = removeText(src, srcLen, UnicodeString("^[ \\r\\n]+ "), 0, UnicodeString(), status); // remove leading new lines
srcLen = removeText(src, srcLen, UnicodeString("^\\s+"), 0, UnicodeString(), status); // remove leading spaces
srcLen = removeText(src, srcLen, UnicodeString("\\s+$"), 0, UnicodeString(), status); // remvoe trailing spcaes
return srcLen;
}
U_CFUNC int32_t
removeCmtText(UChar* source, int32_t srcLen, UErrorCode* status){
srcLen = trim(source, srcLen, status);
UnicodeString patString("^\\s*?\\*\\s*?"); // remove pattern like " * " at the begining of the line
srcLen = removeText(source, srcLen, patString, UREGEX_MULTILINE, UnicodeString(), status);
return removeText(source, srcLen, UnicodeString("[ \\r\\n]+"), 0, UnicodeString(" "), status);// remove new lines;
}
U_CFUNC int32_t
getText(const UChar* source, int32_t srcLen,
UChar** dest, int32_t destCapacity,
UnicodeString patternString,
UErrorCode* status){
if(status == NULL || U_FAILURE(*status)){
return 0;
}
UnicodeString stringArray[MAX_SPLIT_STRINGS];
RegexPattern *pattern = RegexPattern::compile(UnicodeString("@"), 0, *status);
UnicodeString src (source,srcLen);
if (U_FAILURE(*status)) {
return 0;
}
pattern->split(src, stringArray, MAX_SPLIT_STRINGS, *status);
RegexMatcher matcher(patternString, UREGEX_DOTALL, *status);
if (U_FAILURE(*status)) {
return 0;
}
for(int32_t i=0; i<MAX_SPLIT_STRINGS; i++){
matcher.reset(stringArray[i]);
if(matcher.lookingAt(*status)){
UnicodeString out = matcher.group(1, *status);
return out.extract(*dest, destCapacity,*status);
}
}
return 0;
}
#define AT_SIGN 0x0040
U_CFUNC int32_t
getDescription( const UChar* source, int32_t srcLen,
UChar** dest, int32_t destCapacity,
UErrorCode* status){
if(status == NULL || U_FAILURE(*status)){
return 0;
}
UnicodeString stringArray[MAX_SPLIT_STRINGS];
RegexPattern *pattern = RegexPattern::compile(UnicodeString("@"), UREGEX_MULTILINE, *status);
UnicodeString src(source, srcLen);
if (U_FAILURE(*status)) {
return 0;
}
pattern->split(src, stringArray,MAX_SPLIT_STRINGS , *status);
if(stringArray[0].indexOf((UChar)AT_SIGN)==-1){
int32_t destLen = stringArray[0].extract(*dest, destCapacity, *status);
return trim(*dest, destLen, status);
}
return 0;
}
U_CFUNC int32_t
getCount(const UChar* source, int32_t srcLen,
UParseCommentsOption option, UErrorCode *status){
if(status == NULL || U_FAILURE(*status)){
return 0;
}
UnicodeString stringArray[MAX_SPLIT_STRINGS];
RegexPattern *pattern = RegexPattern::compile(UnicodeString("@"), UREGEX_MULTILINE, *status);
UnicodeString src (source, srcLen);
if (U_FAILURE(*status)) {
return 0;
}
int32_t retLen = pattern->split(src, stringArray, MAX_SPLIT_STRINGS, *status);
UnicodeString patternString(patternStrings[option]);
RegexMatcher matcher(patternString, UREGEX_DOTALL, *status);
if (U_FAILURE(*status)) {
return 0;
}
int32_t count = 0;
for(int32_t i=0; i<retLen; i++){
matcher.reset(stringArray[i]);
if(matcher.lookingAt(*status)){
count++;
}
}
if(option == UPC_TRANSLATE && count > 1){
fprintf(stderr, "Multiple @translate tags cannot be supported.\n");
exit(U_UNSUPPORTED_ERROR);
}
return count;
}
U_CFUNC int32_t
getAt(const UChar* source, int32_t srcLen,
UChar** dest, int32_t destCapacity,
int32_t index,
UParseCommentsOption option,
UErrorCode* status){
if(status == NULL || U_FAILURE(*status)){
return 0;
}
UnicodeString stringArray[MAX_SPLIT_STRINGS];
RegexPattern *pattern = RegexPattern::compile(UnicodeString("@"), UREGEX_MULTILINE, *status);
UnicodeString src (source, srcLen);
if (U_FAILURE(*status)) {
return 0;
}
int32_t retLen = pattern->split(src, stringArray, MAX_SPLIT_STRINGS, *status);
UnicodeString patternString(patternStrings[option]);
RegexMatcher matcher(patternString, UREGEX_DOTALL, *status);
if (U_FAILURE(*status)) {
return 0;
}
int32_t count = 0;
for(int32_t i=0; i<retLen; i++){
matcher.reset(stringArray[i]);
if(matcher.lookingAt(*status)){
if(count == index){
UnicodeString out = matcher.group(1, *status);
return out.extract(*dest, destCapacity,*status);
}
count++;
}
}
return 0;
}
U_CFUNC int32_t
getTranslate( const UChar* source, int32_t srcLen,
UChar** dest, int32_t destCapacity,
UErrorCode* status){
UnicodeString notePatternString("^translate\\s*?(.*)");
int32_t destLen = getText(source, srcLen, dest, destCapacity, notePatternString, status);
return trim(*dest, destLen, status);
}
U_CFUNC int32_t
getNote(const UChar* source, int32_t srcLen,
UChar** dest, int32_t destCapacity,
UErrorCode* status){
UnicodeString notePatternString("^note\\s*?(.*)");
int32_t destLen = getText(source, srcLen, dest, destCapacity, notePatternString, status);
return trim(*dest, destLen, status);
}
#endif /* UCONFIG_NO_REGULAR_EXPRESSIONS */
| 3,101 |
343 | <reponame>nzeh/syzygy<filename>syzygy/block_graph/hot_patching_metadata.h<gh_stars>100-1000
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Declares the data structures that will be injected into images transformed
// by hot patching transformations. These data structures contain the necessary
// metadata that is required to perform the hot patching of blocks at runtime.
#ifndef SYZYGY_BLOCK_GRAPH_HOT_PATCHING_METADATA_H_
#define SYZYGY_BLOCK_GRAPH_HOT_PATCHING_METADATA_H_
#include <vector>
namespace block_graph {
// Ensure there are no padding bytes because these structs are going to be
// written to the .syzyhp stream directly.
#pragma pack(push, 1)
// This data structure describes a single Block in the HotPatchingMetadata.
struct HotPatchingBlockMetadata {
// The RVA of the start of the block.
uint32_t relative_address;
// The size of the code in the block data.
uint16_t code_size;
// The size of the block data.
uint16_t block_size;
};
// This struct contains the data that will be injected into images transformed
// by hot patching transformations, it contains the necessary metadata that is
// required to perform the hot patching of blocks at runtime.
struct HotPatchingMetadataHeader {
// Version information.
uint32_t version;
// Number of HotPatchingBlockMetadata structures to follow.
uint32_t number_of_blocks;
};
#pragma pack(pop)
// The current version of the HotPatchingMetadata structure. This needs to
// be incremented if any time a non-backwards compatible change is made to the
// serialization format.
const uint32_t kHotPatchingMetadataVersion = 1U;
} // namespace block_graph
#endif // SYZYGY_BLOCK_GRAPH_HOT_PATCHING_METADATA_H_
| 652 |
583 | import os
import sys
import shutil
import subprocess
# Clear all incremental stuff
def clear_stuff():
if os.path.exists('objc'):
shutil.rmtree('objc')
if os.path.exists('object'):
shutil.rmtree('object')
if os.path.exists('flowc.debug'):
os.remove('flowc.debug')
if os.path.exists('flowc.bytecode'):
os.remove('flowc.bytecode')
def run_compiler(useMd5):
if useMd5:
return subprocess.check_output("flowc verbose=1 use-md5=1 test1.flow", shell=True)
else:
return subprocess.check_output("flowc verbose=1 test1.flow", shell=True)
# Check that incremental files are created and loaded
def test1(useMd5):
result1 = run_compiler(useMd5)
if not 'Saving incremental for test1_1' in result1.split('\n'):
print('FAILED 1\n' + result1)
return False
if not 'Saving incremental for test1' in result1.split('\n'):
print('FAILED 2\n' + result1)
return False
result2 = run_compiler(useMd5)
if not 'Loaded incremental for test1_1' in result2.split('\n'):
print('FAILED 3\n' + result2)
return False
if not 'Loaded incremental for test1' in result2.split('\n'):
print('FAILED 4\n' + result2)
return False
print('PASSED: Check that incremental files are created and loaded')
return True
# Change one inclded string
def test2(useMd5):
# Change a file
content_1 = open('test_content_1').read()
open('test_content_1', 'w').write(content_1)
result = run_compiler(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change one inclded string')
return True
# Change the other inclded string
def test3(useMd5):
# Change a file
content_2 = open('test_content_2').read()
open('test_content_2', 'w').write(content_2)
result = run_compiler(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change the other inclded string')
return True
# Change both inclded strings
def test4(useMd5):
# Change a file
content_1 = open('test_content_1').read()
content_2 = open('test_content_2').read()
open('test_content_1', 'w').write(content_1)
open('test_content_2', 'w').write(content_2)
result = run_compiler(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change both inclded strings')
return True
# Incremental file is loaded, no changes
def test5(useMd5):
result = run_compiler(useMd5)
if not 'Loaded incremental for test1_1' in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not 'Loaded incremental for test1' in result.split('\n'):
print('FAILED 2\n' + result)
return False
print('PASSED: Incremental file is loaded, no changes')
return True
def runtests():
clear_stuff()
tests = [test1, test2, test3, test4, test5]
i = 1
print('Testing with no use-md5 option')
for test in tests:
sys.stdout.write('TEST ' + str(i) + ' ')
sys.stdout.flush()
if not test(False):
return
i += 1
clear_stuff()
print('Testing with use-md5=1 option')
for test in tests:
sys.stdout.write('TEST ' + str(i) + ' ')
sys.stdout.flush()
if not test(True):
return
i += 1;
clear_stuff()
def main():
runtests()
if __name__ == "__main__":
main()
| 1,766 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package org.openoffice.netbeans.modules.office.loader;
import java.io.File;
import java.io.IOException;
import java.beans.PropertyEditor;
import java.beans.PropertyEditorSupport;
import org.openide.loaders.DataFolder;
import org.openide.loaders.DataObject;
import org.openide.loaders.DataFilter;
import org.openide.loaders.DataObjectExistsException;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.nodes.CookieSet;
import org.openide.nodes.Node;
import org.openide.nodes.PropertySupport;
import org.openide.nodes.Sheet;
import org.openide.util.HelpCtx;
import org.openoffice.idesupport.filter.*;
import org.openoffice.idesupport.zip.ParcelZipper;
import org.openoffice.netbeans.modules.office.actions.ParcelFolderCookie;
import org.openoffice.netbeans.modules.office.actions.ParcelFolderSupport;
public class ParcelFolder extends DataFolder {
public static final String LANGUAGE_ATTRIBUTE = "language";
public ParcelFolder(FileObject pf, ParcelFolderDataLoader loader)
throws DataObjectExistsException {
super(pf, loader);
CookieSet cookies = getCookieSet();
cookies.add(new ParcelFolderSupport(this));
}
public Node createNodeDelegate() {
return new ParcelFolderNode(this, new ParcelFolderFilter());
}
public class ParcelFolderNode extends DataFolder.FolderNode {
private static final String LOCATION = "location";
private static final String FILTER = "filter";
private static final String LANGUAGE = LANGUAGE_ATTRIBUTE;
private static final String CLASSPATH = "classpath";
private File location;
private FileFilter filter;
private String language;
private String classpath;
private final FileFilter DEFAULT_FILTER = BinaryOnlyFilter.getInstance();
public ParcelFolderNode(ParcelFolder pf, DataFilter dataFilter) {
super(pf.createNodeChildren(dataFilter));
location = (File)pf.getPrimaryFile().getAttribute(LOCATION);
if (location == null)
location = FileUtil.toFile(pf.getPrimaryFile());
String name = (String)pf.getPrimaryFile().getAttribute(FILTER);
if (name == null)
filter = DEFAULT_FILTER;
else {
for (int i = 0; i < availableFilters.length; i++)
if (name.equals(availableFilters[i].toString()))
filter = availableFilters[i];
}
language = (String)pf.getPrimaryFile().getAttribute(LANGUAGE);
ParcelFolderCookie cookie =
(ParcelFolderCookie)pf.getCookie(ParcelFolderCookie.class);
String s = cookie.getClasspath();
if (s != null) {
classpath = s;
}
else {
classpath = ".";
cookie.setClasspath(classpath);
}
}
public File getTargetDir() {
return location;
}
public FileFilter getFileFilter() {
return filter;
}
public String getLanguage() {
if (language == null)
language = (String)getPrimaryFile().getAttribute(LANGUAGE);
return language;
}
public Sheet createSheet() {
Sheet sheet;
Sheet.Set props;
Node.Property prop;
sheet = super.createSheet();
props = sheet.get(Sheet.PROPERTIES);
if (props == null) {
props = Sheet.createPropertiesSet();
sheet.put(props);
}
// prop = createLocationProperty();
// props.put(prop);
prop = createFilterProperty();
props.put(prop);
prop = createFilterProperty();
props.put(prop);
// prop = createLanguageProperty();
// props.put(prop);
prop = createClasspathProperty();
props.put(prop);
return sheet;
}
private Node.Property createLocationProperty() {
Node.Property prop =
new PropertySupport.ReadWrite(LOCATION, File.class,
"Location", "Output location of Parcel Zip File") {
public void setValue(Object obj) {
if (obj instanceof File) {
location = (File)obj;
try {
getPrimaryFile().setAttribute(LOCATION, location);
}
catch (IOException ioe) {
}
}
}
public Object getValue() {
return location;
}
};
prop.setValue("files", Boolean.FALSE);
return prop;
}
private String[] languages = {"Java", "BeanShell"};
private Node.Property createLanguageProperty() {
Node.Property prop =
new PropertySupport.ReadWrite(LANGUAGE, String.class,
"Parcel Language", "Language of scripts in this Parcel") {
public void setValue(Object obj) {
if (obj instanceof String) {
language = (String)obj;
try {
getPrimaryFile().setAttribute(LANGUAGE, language);
}
catch (IOException ioe) {
}
}
}
public Object getValue() {
if (language == null)
language = (String)getPrimaryFile().getAttribute(LANGUAGE);
return language;
}
public PropertyEditor getPropertyEditor() {
return new PropertyEditorSupport() {
public String[] getTags() {
return languages;
}
public void setAsText(String text) {
for (int i = 0; i < languages.length; i++)
if (text.equals(languages[i]))
this.setValue(languages[i]);
}
public String getAsText() {
return (String)this.getValue();
}
};
}
};
return prop;
}
private FileFilter[] availableFilters = new FileFilter[] {
BinaryOnlyFilter.getInstance(), AllFilesFilter.getInstance()};
private Node.Property createFilterProperty() {
Node.Property prop =
new PropertySupport.ReadWrite(FILTER, String.class,
"File Filter", "Files to be included in Parcel") {
public void setValue(Object obj) {
if (obj instanceof FileFilter) {
filter = (FileFilter)obj;
try {
getPrimaryFile().setAttribute(FILTER, filter.toString());
}
catch (IOException ioe) {
}
}
}
public Object getValue() {
return filter;
}
public PropertyEditor getPropertyEditor() {
return new PropertyEditorSupport() {
public String[] getTags() {
String[] tags = new String[availableFilters.length];
for (int i = 0; i < availableFilters.length; i++)
tags[i] = availableFilters[i].toString();
return tags;
}
public void setAsText(String text) {
for (int i = 0; i < availableFilters.length; i++)
if (text.equals(availableFilters[i].toString()))
this.setValue(availableFilters[i]);
}
public String getAsText() {
return this.getValue().toString();
}
};
}
};
return prop;
}
private Node.Property createClasspathProperty() {
Node.Property prop =
new PropertySupport.ReadWrite(CLASSPATH, String.class,
"Classpath", "Classpath property for scripts in this parcel") {
public void setValue(Object obj) {
if (obj instanceof String) {
classpath = (String)obj;
ParcelFolderCookie cookie = (ParcelFolderCookie)
getDataObject().getCookie(ParcelFolderCookie.class);
cookie.setClasspath(classpath);
}
}
public Object getValue() {
return classpath;
}
};
return prop;
}
}
private class ParcelFolderFilter implements DataFilter {
public boolean acceptDataObject(DataObject dobj) {
String name = dobj.getPrimaryFile().getNameExt();
if (name.equals(ParcelZipper.PARCEL_DESCRIPTOR_XML))
return false;
return true;
}
}
}
| 5,958 |
10,225 | package io.quarkus.jdbc.h2.runtime.graal;
import com.oracle.svm.core.annotate.Delete;
import com.oracle.svm.core.annotate.TargetClass;
/**
* The org.h2.engine.Session represents the "Embedded Database" in H2.
* We remove this explicitly as it pulls in various things we can't support;
* rather than address them individually it's simpler to make sure this
* Session doesn't get included by mistake: that will produce errors
* that are easier to manage.
*/
@TargetClass(className = "org.h2.engine.Session")
@Delete
public final class SessionDisable {
}
| 166 |
665 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.tooling.projectmodel.test;
import java.util.HashSet;
import java.util.Set;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.isis.tooling.projectmodel.ProjectNode;
import org.apache.isis.tooling.projectmodel.ProjectNodeFactory;
import org.apache.isis.tooling.projectmodel.ProjectVisitor;
import lombok.val;
class ProjectTreeTest extends ProjectModelTestAbstract {
@Test @Disabled("for now we are missing some build.gradle files")
void testGradle() {
val projTree = ProjectNodeFactory.gradle(projRootFolder);
val artifactKeys = new HashSet<String>();
ProjectVisitor projectVisitor = projModel -> {
artifactKeys.add(toString(projModel));
System.out.println(toString(projModel));
};
projTree.depthFirst(projectVisitor);
assertHasSomeArtifactKeys(artifactKeys);
}
@Test
void testMaven() {
val projTree = ProjectNodeFactory.maven(projRootFolder);
val artifactKeys = new HashSet<String>();
ProjectVisitor projectVisitor = projModel -> {
artifactKeys.add(toString(projModel));
System.out.println(toString(projModel));
};
projTree.depthFirst(projectVisitor);
assertHasSomeArtifactKeys(artifactKeys);
}
private static String toString(ProjectNode node) {
val artifactKey = node.getArtifactCoordinates();
val groupId = artifactKey.getGroupId();
val artifactId = artifactKey.getArtifactId();
val packaging = artifactKey.getPackaging();
return String.format("%s:%s:%s", groupId, artifactId, packaging);
}
private void assertHasSomeArtifactKeys(Set<String> artifactKeys) {
assertTrue(artifactKeys.size()>50);
assertTrue(artifactKeys.contains("org.apache.isis.core:isis-core:pom"));
assertTrue(artifactKeys.contains("org.apache.isis.core:isis-core-config:jar"));
assertTrue(artifactKeys.contains("org.apache.isis.core:isis-core-metamodel:jar"));
assertTrue(artifactKeys.contains("org.apache.isis.core:isis-core-runtime:jar"));
for(val key : artifactKeys) {
assertFalse(key.startsWith("?"), ()->"incomplete key " + key);
}
}
}
| 1,290 |
852 | <filename>Alignment/CommonAlignmentProducer/plugins/AlignmentProducerAsAnalyzer.cc
/**
* @package Alignment/CommonAlignmentProducer
* @file AlignmentProducerAsAnalyzer.cc
*
* @author <NAME> (<EMAIL>)
* @date 2015/07/16
*/
/*** Header file ***/
#include "AlignmentProducerAsAnalyzer.h"
#include "CommonTools/UtilAlgos/interface/TFileService.h"
#include "FWCore/Framework/interface/MakerMacros.h"
//------------------------------------------------------------------------------
AlignmentProducerAsAnalyzer::AlignmentProducerAsAnalyzer(const edm::ParameterSet& config)
: AlignmentProducerBase(config, consumesCollector()),
token_(produces<AlignmentToken, edm::Transition::EndProcessBlock>()) {
usesResource(TFileService::kSharedResource);
tjTkAssociationMapToken_ = consumes<TrajTrackAssociationCollection>(tjTkAssociationMapTag_);
beamSpotToken_ = consumes<reco::BeamSpot>(beamSpotTag_);
tkLasBeamToken_ = consumes<TkFittedLasBeamCollection>(tkLasBeamTag_);
tsosVectorToken_ = consumes<TsosVectorCollection>(tkLasBeamTag_);
clusterValueMapToken_ = consumes<AliClusterValueMap>(clusterValueMapTag_);
}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::beginJob() {}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::endJob() {}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::beginRun(const edm::Run& run, const edm::EventSetup& setup) {
beginRunImpl(run, setup);
}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::endRun(const edm::Run& run, const edm::EventSetup& setup) { endRunImpl(run, setup); }
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::beginLuminosityBlock(const edm::LuminosityBlock& lumiBlock,
const edm::EventSetup& setup) {
beginLuminosityBlockImpl(lumiBlock, setup);
}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::endLuminosityBlock(const edm::LuminosityBlock& lumiBlock,
const edm::EventSetup& setup) {
endLuminosityBlockImpl(lumiBlock, setup);
}
void AlignmentProducerAsAnalyzer::endProcessBlockProduce(edm::ProcessBlock& processBlock) {
const AlignmentToken valueToPut{};
processBlock.emplace(token_, valueToPut);
terminateProcessing();
if (!finish()) {
edm::LogError("Alignment") << "@SUB=AlignmentProducerAsAnalyzer::endJob"
<< "Did not process any events, do not dare to store to DB.";
}
// message is used by the MillePede log parser to check the end of the job
edm::LogInfo("Alignment") << "@SUB=AlignmentProducerAsAnalyzer::endJob"
<< "Finished alignment producer job.";
}
//------------------------------------------------------------------------------
void AlignmentProducerAsAnalyzer::accumulate(edm::Event const& event, edm::EventSetup const& setup) {
processEvent(event, setup);
}
DEFINE_FWK_MODULE(AlignmentProducerAsAnalyzer);
| 1,031 |
8,315 | package com.airbnb.epoxy;
public class BasicModelWithFinalAttribute extends EpoxyModel<Object> {
@EpoxyAttribute final int value;
public BasicModelWithFinalAttribute() {
value = 0;
}
@Override
protected int getDefaultLayout() {
return 0;
}
} | 82 |
379 | /**
* Copyright 2016-2017 Sixt GmbH & Co. Autovermietung KG
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain a
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.sixt.service.framework.jetty;
import com.codahale.metrics.MetricRegistry;
import com.google.common.primitives.Ints;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.protobuf.Message;
import com.sixt.service.framework.*;
import com.sixt.service.framework.metrics.GoTimer;
import com.sixt.service.framework.protobuf.ProtobufUtil;
import com.sixt.service.framework.protobuf.RpcEnvelope;
import com.sixt.service.framework.rpc.RpcCallException;
import com.sixt.service.framework.util.ReflectionUtil;
import io.opentracing.Span;
import io.opentracing.Tracer;
import io.opentracing.tag.Tags;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.servlet.ServletInputStream;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Map;
@Singleton
public class ProtobufHandler extends RpcHandler {
private static final Logger logger = LoggerFactory.getLogger(ProtobufHandler.class);
@Inject
public ProtobufHandler(MethodHandlerDictionary handlers, MetricRegistry registry,
RpcHandlerMetrics handlerMetrics, ServiceProperties serviceProperties, Tracer tracer) {
super(handlers, registry, handlerMetrics, serviceProperties, tracer);
}
@SuppressWarnings("unchecked")
public void doPost(HttpServletRequest req, HttpServletResponse resp) {
logger.debug("Handling protobuf request");
RpcEnvelope.Request rpcRequest = null;
String methodName = null;
Span span = null;
Map<String, String> headers = gatherHttpHeaders(req);
OrangeContext context = new OrangeContext(headers);
HttpServletRequest blubb = new HttpServletRequestWrapper(req);
try {
MDC.put(OrangeContext.CORRELATION_ID, context.getCorrelationId());
ServletInputStream in = req.getInputStream();
rpcRequest = readRpcEnvelope(in);
methodName = rpcRequest.getServiceMethod();
span = getSpan(methodName, headers, context);
ServiceMethodHandler handler = handlers.getMethodHandler(methodName);
if (handler == null) {
incrementFailureCounter(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
throw new IllegalArgumentException("Invalid method: " +
rpcRequest.getServiceMethod());
}
Class<? extends Message> requestClass = (Class<? extends Message>)
ReflectionUtil.findSubClassParameterType(handler, 0);
Message pbRequest = readRpcBody(in, requestClass);
GoTimer methodTimer = getMethodTimer(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
long startTime = methodTimer.start();
Message pbResponse = invokeHandlerChain(methodName, handler, pbRequest, context);
resp.setContentType(RpcServlet.TYPE_OCTET);
sendSuccessfulResponse(resp, rpcRequest, pbResponse);
//TODO: should we check the response for errors?
methodTimer.recordSuccess(startTime);
incrementSuccessCounter(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
} catch (RpcCallException rpcEx) {
sendErrorResponse(resp, rpcRequest, rpcEx.toString(), rpcEx.getCategory().getHttpStatus());
if (span != null) {
Tags.ERROR.set(span, true);
}
incrementFailureCounter(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
} catch (RpcReadException ex) {
logger.warn("Bad request, cannot decode rpc message: {}", ex.toJson(req));
sendErrorResponse(resp, rpcRequest, ex.getMessage(), HttpServletResponse.SC_BAD_REQUEST);
if (span != null) {
Tags.ERROR.set(span, true);
}
incrementFailureCounter(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
} catch (Exception ex) {
logger.warn("Uncaught exception", ex);
sendErrorResponse(resp, rpcRequest, ex.getMessage(), HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
if (span != null) {
Tags.ERROR.set(span, true);
}
incrementFailureCounter(methodName, context.getRpcOriginService(),
context.getRpcOriginMethod());
} finally {
if (span != null) {
span.finish();
}
MDC.remove(OrangeContext.CORRELATION_ID);
}
}
private void sendSuccessfulResponse(HttpServletResponse response,
RpcEnvelope.Request rpcRequest,
Message pbResponse) throws IOException {
response.setStatus(HttpServletResponse.SC_OK);
RpcEnvelope.Response rpcResponse = RpcEnvelope.Response.newBuilder().
setServiceMethod(rpcRequest.getServiceMethod()).
setSequenceNumber(rpcRequest.getSequenceNumber()).build();
byte responseHeader[] = rpcResponse.toByteArray();
byte responseBody[];
if (pbResponse == null) {
responseBody = new byte[0];
} else {
responseBody = pbResponse.toByteArray();
}
try {
ServletOutputStream out = response.getOutputStream();
out.write(Ints.toByteArray(responseHeader.length));
out.write(responseHeader);
out.write(Ints.toByteArray(responseBody.length));
out.write(responseBody);
} catch (IOException ioex) {
//there is nothing we can do, client probably went away
logger.debug("Caught IOException, assuming client disconnected");
}
}
private void sendErrorResponse(HttpServletResponse resp,
RpcEnvelope.Request rpcRequest,
String message,
int httpStatusCode) {
if (rpcRequest != null) {
try {
if (FeatureFlags.shouldExposeErrorsToHttp(serviceProps)) {
resp.setStatus(httpStatusCode);
} else {
resp.setStatus(HttpServletResponse.SC_OK);
}
if (message == null) {
message = "null";
}
RpcEnvelope.Response rpcResponse = RpcEnvelope.Response.newBuilder().
setServiceMethod(rpcRequest.getServiceMethod()).
setSequenceNumber(rpcRequest.getSequenceNumber()).
setError(message).build();
byte responseHeader[] = rpcResponse.toByteArray();
ServletOutputStream out = resp.getOutputStream();
out.write(Ints.toByteArray(responseHeader.length));
out.write(responseHeader);
out.write(Ints.toByteArray(0)); //zero-length (no) body
} catch (Exception ex) {
logger.warn("Error writing error response", ex);
}
}
}
private RpcEnvelope.Request readRpcEnvelope(ServletInputStream in) throws Exception {
byte chunkSize[] = new byte[4];
in.read(chunkSize);
int size = Ints.fromByteArray(chunkSize);
if (size <= 0 || size > ProtobufUtil.MAX_HEADER_CHUNK_SIZE) {
String message = "Invalid header chunk size: " + size;
throw new RpcReadException(chunkSize, in, message);
}
byte headerData[] = readyFully(in, size);
RpcEnvelope.Request rpcRequest = RpcEnvelope.Request.parseFrom(headerData);
return rpcRequest;
}
private Message readRpcBody(ServletInputStream in,
Class<? extends Message> requestClass) throws Exception {
byte chunkSize[] = new byte[4];
in.read(chunkSize);
int size = Ints.fromByteArray(chunkSize);
if (size == 0) {
return ProtobufUtil.newEmptyMessage(requestClass);
}
if (size > ProtobufUtil.MAX_BODY_CHUNK_SIZE) {
String message = "Invalid body chunk size: " + size;
throw new RpcReadException(chunkSize, in, message);
}
byte bodyData[] = readyFully(in, size);
Message pbRequest = ProtobufUtil.byteArrayToProtobuf(bodyData, requestClass);
return pbRequest;
}
private byte[] readyFully(ServletInputStream in, int totalSize) throws Exception {
byte[] retval = new byte[totalSize];
int bytesRead = 0;
while (bytesRead < totalSize) {
try {
int read = in.read(retval, bytesRead, totalSize - bytesRead);
if (read == -1) {
throw new RpcCallException(RpcCallException.Category.InternalServerError,
"Unable to read complete request or response");
}
bytesRead += read;
} catch (IOException e) {
throw new RpcCallException(RpcCallException.Category.InternalServerError,
"IOException reading data: " + e);
}
}
return retval;
}
}
| 4,462 |
923 | <reponame>riciche/SimpleCVReproduction
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
class StackedRNN(nn.Module):
"""Stacked RNN
"""
def __init__(self, input_size, output_size, hidden_size, number_layers):
super(StackedRNN, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, number_layers)
self.fc = nn.Linear(hidden_size, output_size)
self.nlayers = number_layers
self.nhid = hidden_size
def init_hidden(self, bsz, volatile=False):
weight = next(self.parameters()).data
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_(), volatile=volatile),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_(), volatile=volatile))
def forward(self, input, hidden):
output, hidden = self.rnn(input, hidden)
# output is seq_len x batch_size x hidden_size
seq_len = output.size()[0]
output = torch.stack([self.fc(output[t]) for t in range(seq_len)])
return output, hidden
| 482 |
1,056 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.editor.completion;
import java.awt.Color;
import java.awt.Font;
import java.awt.Graphics;
import java.awt.event.InputEvent;
import java.awt.event.KeyEvent;
import java.util.function.Consumer;
import java.util.function.Supplier;
import javax.swing.ImageIcon;
import javax.swing.text.BadLocationException;
import javax.swing.text.JTextComponent;
import org.netbeans.api.editor.completion.Completion;
import org.netbeans.editor.BaseDocument;
import org.netbeans.spi.editor.completion.CompletionItem;
import org.netbeans.spi.editor.completion.CompletionTask;
import org.netbeans.spi.editor.completion.support.CompletionUtilities;
import org.openide.util.ImageUtilities;
/**
*
* @author <NAME>
*/
public class SimpleCompletionItem implements CompletionItem {
private final String insertText;
private final int startOffset;
private final int endOffset;
private final String iconResource;
private final String leftHtmlText;
private final String rightHtmlText;
private final int sortPriority;
private final CharSequence sortText;
private final Supplier<CompletionTask> documentationTask;
private final Supplier<CompletionTask> tooltipTask;
private final Consumer<CompletionUtilities.OnSelectContext> onSelectCallback;
private ImageIcon icon;
public SimpleCompletionItem(String insertText, int startOffset, int endOffset, String iconResource, String leftHtmlText, String rightHtmlText,
int sortPriority, CharSequence sortText, Supplier<CompletionTask> documentationTask, Supplier<CompletionTask> tooltipTask,
Consumer<CompletionUtilities.OnSelectContext> onSelectCallback) {
this.insertText = insertText;
this.startOffset = startOffset;
this.endOffset = endOffset;
this.iconResource = iconResource;
this.leftHtmlText = leftHtmlText;
this.rightHtmlText = rightHtmlText;
this.sortPriority = sortPriority;
this.sortText = sortText;
this.documentationTask = documentationTask;
this.tooltipTask = tooltipTask;
this.onSelectCallback = onSelectCallback;
}
@Override
public void defaultAction(JTextComponent component) {
if (component != null) {
Completion.get().hideDocumentation();
Completion.get().hideCompletion();
process(component, false);
}
}
@Override
public void processKeyEvent(KeyEvent evt) {
if (evt.getID() == KeyEvent.KEY_PRESSED && evt.getKeyCode() == KeyEvent.VK_ENTER && (evt.getModifiers() & InputEvent.CTRL_MASK) > 0) {
JTextComponent component = (JTextComponent)evt.getSource();
Completion.get().hideDocumentation();
Completion.get().hideCompletion();
process(component, true);
evt.consume();
}
}
@Override
public int getPreferredWidth(Graphics g, Font defaultFont) {
return CompletionUtilities.getPreferredWidth(leftHtmlText != null ? leftHtmlText : insertText, rightHtmlText, g, defaultFont);
}
@Override
public void render(Graphics g, Font defaultFont, Color defaultColor, Color backgroundColor, int width, int height, boolean selected) {
CompletionUtilities.renderHtml(getIcon(), leftHtmlText != null ? leftHtmlText : insertText, rightHtmlText, g, defaultFont, defaultColor, width, height, selected);
}
@Override
public CompletionTask createDocumentationTask() {
if (documentationTask != null) {
return documentationTask.get();
}
return null;
}
@Override
public CompletionTask createToolTipTask() {
if (tooltipTask != null) {
tooltipTask.get();
}
return null;
}
@Override
public boolean instantSubstitution(JTextComponent component) {
return false;
}
@Override
public int getSortPriority() {
return sortPriority;
}
@Override
public CharSequence getSortText() {
return sortText != null ? sortText : insertText;
}
@Override
public CharSequence getInsertPrefix() {
return insertText;
}
private void process(JTextComponent component, boolean overwrite) {
if (onSelectCallback != null) {
CompletionUtilities.OnSelectContext ctx = CompletionSupportSpiPackageAccessor.get().createOnSelectContext(component, overwrite);
onSelectCallback.accept(ctx);
} else {
final BaseDocument doc = (BaseDocument) component.getDocument();
doc.runAtomic (new Runnable() {
@Override
public void run() {
try {
if (startOffset < 0) {
if (overwrite && endOffset > component.getCaretPosition()) {
doc.remove(component.getCaretPosition(), endOffset - component.getCaretPosition());
}
doc.insertString(component.getCaretPosition(), insertText, null);
} else {
doc.remove(startOffset, (overwrite && endOffset > component.getCaretPosition() ? endOffset : component.getCaretPosition()) - startOffset);
doc.insertString(startOffset, insertText, null);
}
} catch (BadLocationException e) {
}
}
});
}
}
private ImageIcon getIcon() {
if (icon == null && iconResource != null) {
icon = ImageUtilities.loadImageIcon(iconResource, false);
}
return icon;
}
}
| 2,471 |
637 | <gh_stars>100-1000
{ "multipass": true,
"plugins":
[ "removeDoctype",
"removeXMLProcInst",
"removeComments",
"removeMetadata",
"removeEditorsNSData",
"cleanupAttrs",
"minifyStyles",
"convertStyleToAttrs",
"cleanupIDs",
"removeRasterImages" ],
"js2svg": { "pretty": true } }
| 149 |
3,301 | <reponame>okjay/Alink
package com.alibaba.alink.operator.stream.evaluation;
import org.apache.flink.types.Row;
import com.alibaba.alink.operator.stream.StreamOperator;
import com.alibaba.alink.operator.stream.source.MemSourceStreamOp;
import com.alibaba.alink.testutil.AlinkTestBase;
import org.junit.Test;
import java.util.Arrays;
public class EvalBinaryClassStreamOpTest extends AlinkTestBase {
@Test
public void testDetailBinary() throws Exception {
Row[] detailBinaryArray =
new Row[] {
Row.of("prefix1", "{\"prefix1\": 0.1, \"prefix0\": 0.9}"),
Row.of("prefix1", "{\"prefix1\": 0.8, \"prefix0\": 0.2}"),
Row.of("prefix1", "{\"prefix1\": 0.4, \"prefix0\": 0.6}"),
Row.of("prefix0", "{\"prefix1\": 0.75, \"prefix0\": 0.25}"),
Row.of("prefix0", "{\"prefix1\": 0.6, \"prefix0\": 0.4}"),
Row.of("prefix1", "{\"prefix1\": 0.65, \"prefix0\": 0.35}"),
Row.of("prefix1", "{\"prefix1\": 0.55, \"prefix0\": 0.45}"),
Row.of("prefix0", "{\"prefix1\": 0.1, \"prefix0\": 0.9}"),
Row.of("prefix0", "{\"prefix1\": 0.3, \"prefix0\": 0.7}"),
Row.of("prefix1", "{\"prefix1\": 0.25, \"prefix0\": 0.75}"),
Row.of("prefix0", "{\"prefix1\": 0.2, \"prefix0\": 0.8}"),
Row.of("prefix1", "{\"prefix1\": 0.1, \"prefix0\": 0.9}")
};
MemSourceStreamOp detailBinaryTmp = new MemSourceStreamOp(Arrays.asList(detailBinaryArray),
new String[] {"label", "detailInput"});
EvalBinaryClassStreamOp op1 = new EvalBinaryClassStreamOp()
.setLabelCol("label")
.setPositiveLabelValueString("prefix0")
.setTimeInterval(0.001)
.setPredictionDetailCol("detailInput");
detailBinaryTmp.link(op1).print();
StreamOperator.execute();
}
}
| 687 |
1,359 | <filename>src/main/java/com/kalessil/phpStorm/phpInspectionsEA/inspectors/ifs/IfReturnReturnSimplificationInspector.java
package com.kalessil.phpStorm.phpInspectionsEA.inspectors.ifs;
import com.intellij.codeInspection.LocalQuickFix;
import com.intellij.codeInspection.ProblemDescriptor;
import com.intellij.codeInspection.ProblemsHolder;
import com.intellij.openapi.project.Project;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiElementVisitor;
import com.intellij.psi.SmartPointerManager;
import com.intellij.psi.SmartPsiElementPointer;
import com.intellij.psi.tree.IElementType;
import com.jetbrains.php.lang.lexer.PhpTokenTypes;
import com.jetbrains.php.lang.psi.PhpPsiElementFactory;
import com.jetbrains.php.lang.psi.elements.*;
import com.kalessil.phpStorm.phpInspectionsEA.openApi.BasePhpElementVisitor;
import com.kalessil.phpStorm.phpInspectionsEA.openApi.BasePhpInspection;
import com.kalessil.phpStorm.phpInspectionsEA.utils.ExpressionSemanticUtil;
import com.kalessil.phpStorm.phpInspectionsEA.utils.MessagesPresentationUtil;
import com.kalessil.phpStorm.phpInspectionsEA.utils.OpenapiTypesUtil;
import com.kalessil.phpStorm.phpInspectionsEA.utils.PhpLanguageUtil;
import org.jetbrains.annotations.NotNull;
import java.util.HashMap;
import java.util.Map;
/*
* This file is part of the Php Inspections (EA Extended) package.
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
public class IfReturnReturnSimplificationInspector extends BasePhpInspection {
private static final String messagePattern = "The construct can be replaced with '%s'.";
private static final Map<IElementType, String> inversionMapping = new HashMap<>(8);
static {
inversionMapping.put(PhpTokenTypes.opIDENTICAL, "!==");
inversionMapping.put(PhpTokenTypes.opNOT_IDENTICAL, "===");
inversionMapping.put(PhpTokenTypes.opEQUAL, "!=");
inversionMapping.put(PhpTokenTypes.opNOT_EQUAL, "==");
inversionMapping.put(PhpTokenTypes.opGREATER, "<=");
inversionMapping.put(PhpTokenTypes.opGREATER_OR_EQUAL, "<");
inversionMapping.put(PhpTokenTypes.opLESS, ">=");
inversionMapping.put(PhpTokenTypes.opLESS_OR_EQUAL, ">");
}
@NotNull
@Override
public String getShortName() {
return "IfReturnReturnSimplificationInspection";
}
@NotNull
@Override
public String getDisplayName() {
return "If-return-return could be simplified";
}
@Override
@NotNull
public PsiElementVisitor buildVisitor(@NotNull final ProblemsHolder holder, boolean isOnTheFly) {
return new BasePhpElementVisitor() {
@Override
public void visitPhpIf(@NotNull If statement) {
final PsiElement cond = ExpressionSemanticUtil.getExpressionTroughParenthesis(statement.getCondition());
if (cond instanceof BinaryExpression && statement.getElseIfBranches().length == 0) {
final GroupStatement ifBody = ExpressionSemanticUtil.getGroupStatement(statement);
if (ifBody != null && ExpressionSemanticUtil.countExpressionsInGroup(ifBody) == 1) {
final PsiElement ifLast = ExpressionSemanticUtil.getLastStatement(ifBody);
if (ifLast instanceof PhpReturn) {
/* find first and second returns */
final PhpReturn first = (PhpReturn) ifLast;
PhpReturn second = null;
final Else elseBranch = statement.getElseBranch();
if (elseBranch != null) {
final GroupStatement elseBody = ExpressionSemanticUtil.getGroupStatement(elseBranch);
if (elseBody != null && ExpressionSemanticUtil.countExpressionsInGroup(elseBody) == 1) {
final PsiElement elseLast = ExpressionSemanticUtil.getLastStatement(elseBody);
if (elseLast instanceof PhpReturn) {
second = (PhpReturn) elseLast;
}
}
} else {
final PsiElement next = statement.getNextPsiSibling();
if (next instanceof PhpReturn) {
second = (PhpReturn) next;
}
}
/* if 2nd return found, check more pattern matches */
if (second != null) {
final boolean isDirect = PhpLanguageUtil.isTrue(first.getArgument()) && PhpLanguageUtil.isFalse(second.getArgument());
final boolean isReverse = !isDirect && PhpLanguageUtil.isTrue(second.getArgument()) && PhpLanguageUtil.isFalse(first.getArgument());
if (isDirect || isReverse) {
/* false-positives: if-return if-return return - code style */
if (elseBranch == null) {
final PsiElement before = statement.getPrevPsiSibling();
if (before instanceof If && !ExpressionSemanticUtil.hasAlternativeBranches((If) before)) {
final GroupStatement prevBody = ExpressionSemanticUtil.getGroupStatement(before);
if (prevBody != null && ExpressionSemanticUtil.getLastStatement(prevBody) instanceof PhpReturn) {
return;
}
}
}
/* final reporting step */
final String replacement = String.format(isReverse ? "return !(%s)" : "return %s", cond.getText());
holder.registerProblem(
statement.getFirstChild(),
MessagesPresentationUtil.prefixWithEa(String.format(messagePattern, replacement)),
new SimplifyFix(holder.getProject(), statement, elseBranch == null ? second : statement, replacement)
);
}
}
}
}
}
}
};
}
private static final class SimplifyFix implements LocalQuickFix {
private static final String title = "Use return instead";
final private SmartPsiElementPointer<PsiElement> from;
final private SmartPsiElementPointer<PsiElement> to;
final String replacement;
SimplifyFix(@NotNull Project project, @NotNull PsiElement from, @NotNull PsiElement to, @NotNull String replacement) {
super();
final SmartPointerManager factory = SmartPointerManager.getInstance(project);
this.from = factory.createSmartPsiElementPointer(from);
this.to = factory.createSmartPsiElementPointer(to);
this.replacement = replacement;
}
@NotNull
@Override
public String getName() {
return MessagesPresentationUtil.prefixWithEa(title);
}
@NotNull
@Override
public String getFamilyName() {
return getName();
}
@Override
public void applyFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) {
final PsiElement from = this.from.getElement();
final PsiElement to = this.to.getElement();
if (from != null && to != null && ! project.isDisposed()) {
PhpReturn replacement = PhpPsiElementFactory.createPhpPsiFromText(project, PhpReturn.class, this.replacement + ';');
/* Do simplifications if possible */
final PsiElement returnArgument = replacement.getArgument();
if (returnArgument instanceof UnaryExpression) {
final UnaryExpression unary = (UnaryExpression) returnArgument;
if (OpenapiTypesUtil.is(unary.getOperation(), PhpTokenTypes.opNOT)) {
final PsiElement unaryArgument = unary.getValue();
if (unaryArgument instanceof ParenthesizedExpression) {
final PsiElement argument = ExpressionSemanticUtil.getExpressionTroughParenthesis(unaryArgument);
if (argument instanceof BinaryExpression) {
final BinaryExpression binary = (BinaryExpression) argument;
final PsiElement left = binary.getLeftOperand();
final PsiElement right = binary.getRightOperand();
final IElementType operator = binary.getOperationType();
if (left != null && right != null && inversionMapping.containsKey(operator)) {
replacement = PhpPsiElementFactory.createPhpPsiFromText(
project,
PhpReturn.class,
String.format("return %s %s %s;", left.getText(), inversionMapping.get(operator), right.getText())
);
}
}
}
}
}
/* Do the replacement */
if (from == to) {
from.replace(replacement);
} else {
final PsiElement parent = from.getParent();
parent.addBefore(replacement, from);
parent.deleteChildRange(from, to);
}
}
}
}
} | 5,133 |
2,290 | package net.chrisrichardson.ftgo.restaurantservice.events;
import io.eventuate.tram.events.common.DomainEvent;
public interface RestaurantDomainEvent extends DomainEvent {
}
| 52 |
2,329 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shenyu.plugin.base.support;
import org.springframework.http.codec.HttpMessageWriter;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.web.reactive.function.BodyInserter;
import org.springframework.web.reactive.function.client.ExchangeStrategies;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* The type Body inserter context.
*
* @see <a href="https://github.com/spring-cloud/spring-cloud-gateway/blob/master/spring-cloud-gateway-server/src/main/java/org/springframework/cloud/gateway/support/BodyInserterContext.java">BodyInserterContext</a>
*/
public class BodyInserterContext implements BodyInserter.Context {
private final ExchangeStrategies exchangeStrategies;
/**
* Instantiates a new Body inserter context.
*/
public BodyInserterContext() {
this.exchangeStrategies = ExchangeStrategies.withDefaults();
}
@SuppressWarnings("NullableProblems")
@Override
public List<HttpMessageWriter<?>> messageWriters() {
return exchangeStrategies.messageWriters();
}
@SuppressWarnings("NullableProblems")
@Override
public Optional<ServerHttpRequest> serverRequest() {
return Optional.empty();
}
@SuppressWarnings("NullableProblems")
@Override
public Map<String, Object> hints() {
return Collections.emptyMap();
}
}
| 685 |
335 | <reponame>Safal08/Hacktoberfest-1
{
"word": "Wold",
"definitions": [
"(in Britain) a piece of high, open uncultivated land or moor."
],
"parts-of-speech": "Noun"
} | 86 |
879 | package org.zstack.header.identity.rbac;
import org.zstack.header.exception.CloudRuntimeException;
import org.zstack.header.message.APIMessage;
import org.zstack.utils.FieldUtils;
import org.zstack.utils.Utils;
import org.zstack.utils.logging.CLogger;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class RBACEntity {
private static final CLogger logger = Utils.getLogger(RBACEntity.class);
private String apiName;
private APIMessage apiMessage;
private List<String> additionalApisToCheck = new ArrayList<>();
public RBACEntity(APIMessage apiMessage) {
this.apiMessage = apiMessage;
apiName = apiMessage.getClass().getName();
List<RBAC.ExpendedFieldPermission> structs = RBAC.expendApiClassForPermissionCheck.get(apiMessage.getClass());
if (structs == null) {
return;
}
for (RBAC.ExpendedFieldPermission s : structs) {
Field field = FieldUtils.getField(s.fieldName, apiMessage.getClass());
if (field == null) {
throw new CloudRuntimeException(String.format("Unknown field %s of class %s", s.fieldName, apiMessage.getClass()));
}
try {
field.setAccessible(true);
Object obj = field.get(apiMessage);
if (obj == null) {
continue;
}
if (obj instanceof Collection && ((Collection) obj).isEmpty()) {
continue;
}
additionalApisToCheck.add(s.apiClass.getName());
} catch (IllegalAccessException e) {
throw new CloudRuntimeException(e);
}
}
}
public RBACEntity() {
}
public String getApiName() {
return apiName;
}
public void setApiName(String apiName) {
this.apiName = apiName;
}
public APIMessage getApiMessage() {
return apiMessage;
}
public void setApiMessage(APIMessage apiMessage) {
this.apiMessage = apiMessage;
}
public List<String> getAdditionalApisToCheck() {
return additionalApisToCheck;
}
public void setAdditionalApisToCheck(List<String> additionalApisToCheck) {
this.additionalApisToCheck = additionalApisToCheck;
}
}
| 1,016 |
3,167 | <filename>open_spiel/games/sheriff_test.cc
// Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "open_spiel/games/sheriff.h"
#include <iostream>
#include <limits>
#include "open_spiel/abseil-cpp/absl/container/flat_hash_set.h"
#include "open_spiel/algorithms/expected_returns.h"
#include "open_spiel/algorithms/get_all_states.h"
#include "open_spiel/algorithms/tabular_exploitability.h"
#include "open_spiel/policy.h"
#include "open_spiel/spiel_utils.h"
#include "open_spiel/tests/basic_tests.h"
namespace open_spiel {
namespace sheriff {
namespace {
namespace testing = open_spiel::testing;
void BasicSheriffTest() {
for (int num_rounds = 1; num_rounds <= 6; ++num_rounds) {
const std::shared_ptr<const Game> game =
LoadGame("sheriff", {{"item_penalty", GameParameter(2.0)},
{"item_value", GameParameter(1.5)},
{"sheriff_penalty", GameParameter(3.14)},
{"max_bribe", GameParameter(10)},
{"max_items", GameParameter(10)},
{"num_rounds", GameParameter(num_rounds)}});
testing::RandomSimTestWithUndo(*game, 100);
testing::NoChanceOutcomesTest(*game);
}
}
struct GameSize {
uint32_t num_sequences[2] = {0, 0}; // Layout: [Pl.0, Pl.1].
uint32_t num_infostates[2] = {0, 0}; // Layout: [Pl.0, Pl.1].
uint32_t num_terminal_states = 0;
};
GameSize ComputeGameSize(const std::shared_ptr<const Game> game) {
std::map<std::string, std::unique_ptr<open_spiel::State>> all_states =
open_spiel::algorithms::GetAllStates(
*game, /* depth_limit = */ std::numeric_limits<int>::max(),
/* include_terminals = */ true,
/* include_chance_states = */ false);
GameSize size;
// Account for empty sequence.
size.num_sequences[Player{0}] = 1;
size.num_sequences[Player{1}] = 1;
absl::flat_hash_set<std::string> infosets;
for (const auto& [_, state] : all_states) {
if (state->IsTerminal()) {
++size.num_terminal_states;
} else {
const Player player = state->CurrentPlayer();
SPIEL_CHECK_TRUE(player == Player{0} || player == Player{1});
// NOTE: there is no requirement that infostates strings be unique across
// players. So, we disambiguate the player by prepending it.
const std::string infostate_string =
absl::StrCat(player, state->InformationStateString());
if (infosets.insert(infostate_string).second) {
// The infostate string was not present in the hash set. We update the
// tally of infosets and sequences for the player.
size.num_infostates[player] += 1;
size.num_sequences[player] += state->LegalActions().size();
}
}
}
return size;
}
void TestGameSizes() {
// We expect these game sizes:
//
// +-------+-------+--------+-----------------+----------------+----------+
// | Max | Max | Num | Num sequences | Num infosets | Terminal |
// | bribe | items | rounds | pl 0 | pl 1 | pl 0 | pl 1 | states |
// +-------+-------+--------+--------+--------+-------+--------+----------+
// | 3 | 3 | 1 | 21 | 9 | 5 | 4 | 32 |
// | 3 | 5 | 2 | 223 | 73 | 55 | 36 | 384 |
// | 3 | 3 | 3 | 1173 | 585 | 293 | 292 | 2048 |
// | 3 | 5 | 4 | 14047 | 4681 | 3511 | 2340 | 24576 |
// +-------+-------+--------+--------+--------+-------+--------+----------+
// | 5 | 3 | 1 | 29 | 13 | 5 | 6 | 48 |
// | 5 | 3 | 2 | 317 | 157 | 53 | 78 | 576 |
// | 5 | 5 | 3 | 5659 | 1885 | 943 | 942 | 10368 |
// +-------+-------+--------+--------+--------+-------+--------+----------+
// To simplify the construction of game instance we introduce a lambda.
const auto ConstructInstance =
[](const uint32_t& max_bribe, const uint32_t max_items,
const uint32_t num_rounds) -> std::shared_ptr<const Game> {
return LoadGame(
"sheriff",
{{"max_bribe", GameParameter(static_cast<int>(max_bribe))},
{"max_items", GameParameter(static_cast<int>(max_items))},
{"num_rounds", GameParameter(static_cast<int>(num_rounds))}});
};
GameSize size = ComputeGameSize(ConstructInstance(3, 3, 1));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 21);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 9);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 5);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 4);
SPIEL_CHECK_EQ(size.num_terminal_states, 32);
size = ComputeGameSize(ConstructInstance(3, 5, 2));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 223);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 73);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 55);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 36);
SPIEL_CHECK_EQ(size.num_terminal_states, 384);
size = ComputeGameSize(ConstructInstance(3, 3, 3));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 1173);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 585);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 293);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 292);
SPIEL_CHECK_EQ(size.num_terminal_states, 2048);
size = ComputeGameSize(ConstructInstance(3, 5, 4));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 14047);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 4681);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 3511);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 2340);
SPIEL_CHECK_EQ(size.num_terminal_states, 24576);
size = ComputeGameSize(ConstructInstance(5, 3, 1));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 29);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 13);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 5);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 6);
SPIEL_CHECK_EQ(size.num_terminal_states, 48);
size = ComputeGameSize(ConstructInstance(5, 3, 2));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 317);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 157);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 53);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 78);
SPIEL_CHECK_EQ(size.num_terminal_states, 576);
size = ComputeGameSize(ConstructInstance(5, 5, 3));
SPIEL_CHECK_EQ(size.num_sequences[Player{0}], 5659);
SPIEL_CHECK_EQ(size.num_sequences[Player{1}], 1885);
SPIEL_CHECK_EQ(size.num_infostates[Player{0}], 943);
SPIEL_CHECK_EQ(size.num_infostates[Player{1}], 942);
SPIEL_CHECK_EQ(size.num_terminal_states, 10368);
}
} // namespace
} // namespace sheriff
} // namespace open_spiel
int main(int argc, char** argv) {
open_spiel::testing::LoadGameTest("sheriff");
open_spiel::sheriff::BasicSheriffTest();
open_spiel::sheriff::TestGameSizes();
}
| 3,099 |
679 | <filename>main/svtools/inc/svtools/textdata.hxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _TEXTDATA_HXX
#define _TEXTDATA_HXX
#include "svtools/svtdllapi.h"
#include <svl/brdcst.hxx>
#include <svl/smplhint.hxx>
#include <tools/string.hxx>
// Fuer Notify, wenn alle Absaetze geloescht wurden...
#define TEXT_PARA_ALL 0xFFFFFFFF
class TextPaM
{
private:
sal_uLong mnPara;
sal_uInt16 mnIndex;
public:
TextPaM() { mnPara = 0, mnIndex = 0; }
TextPaM( sal_uLong nPara, sal_uInt16 nIndex ) { mnPara = nPara, mnIndex = nIndex; }
sal_uLong GetPara() const { return mnPara; }
sal_uLong& GetPara() { return mnPara; }
sal_uInt16 GetIndex() const { return mnIndex; }
sal_uInt16& GetIndex() { return mnIndex; }
inline sal_Bool operator == ( const TextPaM& rPaM ) const;
inline sal_Bool operator != ( const TextPaM& rPaM ) const;
inline sal_Bool operator < ( const TextPaM& rPaM ) const;
inline sal_Bool operator > ( const TextPaM& rPaM ) const;
};
inline sal_Bool TextPaM::operator == ( const TextPaM& rPaM ) const
{
return ( ( mnPara == rPaM.mnPara ) && ( mnIndex == rPaM.mnIndex ) ) ? sal_True : sal_False;
}
inline sal_Bool TextPaM::operator != ( const TextPaM& rPaM ) const
{
return !( *this == rPaM );
}
inline sal_Bool TextPaM::operator < ( const TextPaM& rPaM ) const
{
return ( ( mnPara < rPaM.mnPara ) ||
( ( mnPara == rPaM.mnPara ) && mnIndex < rPaM.mnIndex ) ) ? sal_True : sal_False;
}
inline sal_Bool TextPaM::operator > ( const TextPaM& rPaM ) const
{
return ( ( mnPara > rPaM.mnPara ) ||
( ( mnPara == rPaM.mnPara ) && mnIndex > rPaM.mnIndex ) ) ? sal_True : sal_False;
}
class SVT_DLLPUBLIC TextSelection
{
private:
TextPaM maStartPaM;
TextPaM maEndPaM;
public:
TextSelection();
TextSelection( const TextPaM& rPaM );
TextSelection( const TextPaM& rStart, const TextPaM& rEnd );
const TextPaM& GetStart() const { return maStartPaM; }
TextPaM& GetStart() { return maStartPaM; }
const TextPaM& GetEnd() const { return maEndPaM; }
TextPaM& GetEnd() { return maEndPaM; }
void Justify();
sal_Bool HasRange() const { return maStartPaM != maEndPaM; }
inline sal_Bool operator == ( const TextSelection& rSel ) const;
inline sal_Bool operator != ( const TextSelection& rSel ) const;
};
inline sal_Bool TextSelection::operator == ( const TextSelection& rSel ) const
{
return ( ( maStartPaM == rSel.maStartPaM ) && ( maEndPaM == rSel.maEndPaM ) );
}
inline sal_Bool TextSelection::operator != ( const TextSelection& rSel ) const
{
return !( *this == rSel );
}
#define TEXT_HINT_PARAINSERTED 1
#define TEXT_HINT_PARAREMOVED 2
#define TEXT_HINT_PARACONTENTCHANGED 3
#define TEXT_HINT_TEXTHEIGHTCHANGED 4
#define TEXT_HINT_FORMATPARA 5
#define TEXT_HINT_TEXTFORMATTED 6
#define TEXT_HINT_MODIFIED 7
#define TEXT_HINT_BLOCKNOTIFICATION_START 8
#define TEXT_HINT_BLOCKNOTIFICATION_END 9
#define TEXT_HINT_INPUT_START 10
#define TEXT_HINT_INPUT_END 11
#define TEXT_HINT_VIEWSCROLLED 100
#define TEXT_HINT_VIEWSELECTIONCHANGED 101
class SVT_DLLPUBLIC TextHint : public SfxSimpleHint
{
private:
sal_uLong mnValue;
public:
TYPEINFO();
TextHint( sal_uLong nId );
TextHint( sal_uLong nId, sal_uLong nValue );
sal_uLong GetValue() const { return mnValue; }
void SetValue( sal_uLong n ) { mnValue = n; }
};
struct TEIMEInfos
{
String aOldTextAfterStartPos;
sal_uInt16* pAttribs;
TextPaM aPos;
sal_uInt16 nLen;
sal_Bool bCursor;
sal_Bool bWasCursorOverwrite;
TEIMEInfos( const TextPaM& rPos, const String& rOldTextAfterStartPos );
~TEIMEInfos();
void CopyAttribs( const sal_uInt16* pA, sal_uInt16 nL );
void DestroyAttribs();
};
// ----------------- Wrapper for old Tools List -------------------
#ifndef INCLUDED_VECTOR
#include <vector>
#define INCLUDED_VECTOR
#endif
#ifndef INCLUDED_ALGORITHM
#include <algorithm>
#define INCLUDED_ALGORITHM
#endif
template <class T> class ToolsList : public ::std::vector< T >
{
public:
sal_uLong Count() const { return static_cast<sal_uLong>(::std::vector< T >::size()); }
sal_uLong GetPos( T pObject ) const { return ( ::std::find( this->begin(), this->end(), pObject ) ) - this->begin(); }
T GetObject( sal_uLong nIndex ) const { return (*this)[nIndex]; }
void Insert( T pObject, sal_uLong nPos ) { ::std::vector< T >::insert( this->begin()+nPos, pObject ); }
void Remove( sal_uLong nPos ) { ::std::vector< T >::erase( this->begin()+nPos ); }
};
#endif // _TEXTDATA_HXX
| 2,158 |
743 | <reponame>althink/hermes
package pl.allegro.tech.hermes.management.infrastructure.kafka.service;
import pl.allegro.tech.hermes.api.Subscription;
import pl.allegro.tech.hermes.api.Topic;
import pl.allegro.tech.hermes.management.domain.subscription.ConsumerGroupManager;
public class NoOpConsumerGroupManager implements ConsumerGroupManager {
@Override
public void createConsumerGroup(Topic topic, Subscription subscription) {
// no operation
}
}
| 151 |
30,023 | <gh_stars>1000+
{
"system_health": {
"info": {
"current_recorder_run": "Praegune k\u00e4ivitamise algusaeg",
"database_engine": "Andmebaasi mootor",
"database_version": "Andmebaasi versioon",
"estimated_db_size": "Andmebaasi hinnanguline suurus (MB)",
"oldest_recorder_run": "Vanim k\u00e4ivitamise algusaeg"
}
}
} | 212 |
419 | import numpy as np
import os.path as osp
import cv2
import torch
import torch.nn.functional as F
from pointmvsnet.utils.io import mkdir
from pointmvsnet.functions.functions import get_pixel_grids
def file_logger(data_batch, preds, step, output_dir, prefix):
step_dir = osp.join(output_dir, "{}_step{:05d}".format(prefix, step))
mkdir(step_dir)
print("start saving files in ", step_dir)
img_list = data_batch["img_list"]
batch_size, num_view, img_channel, img_height, img_width = list(img_list.size())
cam_params_list = data_batch["cam_params_list"]
for i in range(num_view):
np.savetxt(osp.join(step_dir, "img{}.txt".format(i)), img_list[0, i, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "cam{}_extrinsic.txt".format(i)), cam_params_list[0, i, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "cam{}_intrinsic.txt".format(i)), cam_params_list[0, i, 1].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "gt_depth_img.txt"), data_batch["gt_depth_img"][0, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "coarse_depth_img.txt"), preds["coarse_depth_map"][0, 0].detach().cpu().numpy(), fmt="%.4f")
cam_extrinsic = cam_params_list[0, 0, 0, :3, :4].clone() # (3, 4)
cam_intrinsic = cam_params_list[0, 0, 1, :3, :3].clone()
world_points = preds["world_points"]
world_points = world_points[0].cpu().numpy().transpose()
save_points(osp.join(step_dir, "world_points.xyz"), world_points)
prob_map = preds["coarse_prob_map"][0][0].cpu().numpy()
coarse_points = depth2pts(preds["coarse_depth_map"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "coarse_point.xyz"), coarse_points)
gt_points = depth2pts(data_batch["gt_depth_img"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "gt_points.xyz"), gt_points)
if "flow1" in preds.keys():
flow1_points = depth2pts(preds["flow1"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "flow1_points.xyz"), flow1_points)
if "flow2" in preds.keys():
flow2_points = depth2pts(preds["flow2"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "flow2_points.xyz"), flow2_points)
print("saving finished.")
def depth2pts(depth_map, prob_map, cam_intrinsic, cam_extrinsic, img_size):
feature_map_indices_grid = get_pixel_grids(depth_map.size(2), depth_map.size(3)).to(depth_map.device) # (3, H*W)
curr_cam_intrinsic = cam_intrinsic.clone()
scale = (depth_map.size(2) + 0.0) / (img_size[0] + 0.0) * 4.0
curr_cam_intrinsic[:2, :3] *= scale
uv = torch.matmul(torch.inverse(curr_cam_intrinsic), feature_map_indices_grid)
cam_points = uv * depth_map[0].view(1, -1)
R = cam_extrinsic[:3, :3]
t = cam_extrinsic[:3, 3].unsqueeze(-1)
R_inv = torch.inverse(R)
world_points = torch.matmul(R_inv, cam_points - t).detach().cpu().numpy().transpose()
curr_prob_map = prob_map.copy()
if curr_prob_map.shape[0] != depth_map.size(2):
curr_prob_map = cv2.resize(curr_prob_map, (depth_map.size(3), depth_map.size(2)),
interpolation=cv2.INTER_LANCZOS4)
curr_prob_map = np.reshape(curr_prob_map, (-1, 1))
world_points = np.concatenate([world_points, curr_prob_map], axis=1)
return world_points
def save_points(path, points):
np.savetxt(path, points, delimiter=' ', fmt='%.4f')
| 1,743 |
615 | /* ************************************************************************
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ************************************************************************/
#ifndef _CLBLAS_FUNCTOR_XGEMM_H_
#define _CLBLAS_FUNCTOR_XGEMM_H_
//
// This file provides the declarations of all XGEMM functors and related classes.
//
//
//
#include "functor.h"
//
// Base class for all XGEMM functors (DGEMM, SGEMM, ...)
//
template <class T>
class clblasXgemmFunctor : public clblasFunctor
{
public:
// Structure used to store all XGEMM arguments
struct Args
{
clblasOrder order;
clblasTranspose transA;
clblasTranspose transB;
size_t M;
size_t N;
size_t K;
T alpha;
cl_mem A;
size_t offA;
size_t lda;
cl_mem B;
size_t offB;
size_t ldb;
T beta;
cl_mem C;
size_t offC;
size_t ldc;
cl_command_queue queue;
cl_uint numEventsInWaitList;
const cl_event * eventWaitList;
cl_event * events;
Args(clblasOrder order,
clblasTranspose transA,
clblasTranspose transB,
size_t M,
size_t N,
size_t K,
T alpha,
cl_mem A,
size_t offA,
size_t lda,
cl_mem B,
size_t offB,
size_t ldb,
T beta,
cl_mem C,
size_t offC,
size_t ldc,
cl_command_queue queue,
cl_uint numEventsInWaitList,
const cl_event *eventWaitList,
cl_event *events)
: order(order),
transA(transA),
transB(transB),
M(M),
N(N),
K(K),
alpha(alpha),
A(A),
offA(offA),
lda(lda),
B(B),
offB(offB),
ldb(ldb),
beta(beta),
C(C),
offC(offC),
ldc(ldc),
queue(queue),
numEventsInWaitList(numEventsInWaitList),
eventWaitList(eventWaitList),
events(events)
{
}
};
public:
virtual clblasStatus execute(Args &args) = 0;
} ;
// ================ SGEMM ==================
//
// Base class for all functors providing a SGEMM implementation
//
class clblasSgemmFunctor : public clblasXgemmFunctor<cl_float>
{
};
//
// Fallback functor for SGEMM using the original solver mechanism
//
class clblasSgemmFunctorFallback : public clblasSgemmFunctor
{
public: // Inherited members from clblasFunctor
virtual void retain();
virtual void release();
public: // Inherited members from clblasSgemmFunctor
virtual clblasStatus execute(Args & a);
public:
static clblasSgemmFunctorFallback * provide ();
};
// ================ DGEMM ==================
//
//
// Base class for all functors providing a DGEMM implementation
//
class clblasDgemmFunctor : public clblasXgemmFunctor<cl_double>
{
};
//
// Fallback functor for DGEMM using the original solver mechanism
//
class clblasDgemmFunctorFallback : public clblasDgemmFunctor
{
public: // Inherited members from clblasFunctor
virtual void retain();
virtual void release();
public: // Inherited members from clblasDgemmFunctor
virtual clblasStatus execute(Args & a);
public:
static clblasDgemmFunctorFallback * provide ();
};
// ================ CGEMM ==================
//
// Base class for all functors providing a CGEMM implementation
//
class clblasCgemmFunctor : public clblasXgemmFunctor<FloatComplex>
{
};
//
// Fallback functor for CGEMM using the original solver mechanism
//
class clblasCgemmFunctorFallback : public clblasCgemmFunctor
{
public: // Inherited members from clblasFunctor
virtual void retain();
virtual void release();
public: // Inherited members from clblasCgemmFunctor
virtual clblasStatus execute(Args & a);
public:
static clblasCgemmFunctorFallback * provide ();
};
// ================ ZGEMM ==================
//
// Base class for all functors providing a ZGEMM implementation
//
class clblasZgemmFunctor : public clblasXgemmFunctor<DoubleComplex>
{
};
//
// Fallback functor for ZGEMM using the original solver mechanism
//
class clblasZgemmFunctorFallback : public clblasZgemmFunctor
{
public: // Inherited members from clblasFunctor
virtual void retain();
virtual void release();
public: // Inherited members from clblasZgemmFunctor
virtual clblasStatus execute(Args & a);
public:
static clblasZgemmFunctorFallback * provide ();
};
#endif // _CLBLAS_FUNCTOR_XGEMM_H_
| 2,450 |
3,010 | import os
def main():
print("Random number test")
r = os.urandom(32)
print(f"urandom TRNG string is {r}")
main()
| 53 |
374 | <gh_stars>100-1000
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* ILU solve routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.hpp"
#include "par_ilu.h"
/*--------------------------------------------------------------------
* hypre_ILUSolve
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_ILUSolve( void *ilu_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
// HYPRE_Int i;
hypre_ParILUData *ilu_data = (hypre_ParILUData*) ilu_vdata;
#ifdef HYPRE_USING_CUDA
/* pointers to cusparse data, note that they are not NULL only when needed */
cusparseMatDescr_t matL_des = hypre_ParILUDataMatLMatrixDescription(ilu_data);
cusparseMatDescr_t matU_des = hypre_ParILUDataMatUMatrixDescription(ilu_data);
void *ilu_solve_buffer = hypre_ParILUDataILUSolveBuffer(ilu_data);//device memory
cusparseSolvePolicy_t ilu_solve_policy = hypre_ParILUDataILUSolvePolicy(ilu_data);
hypre_CSRMatrix *matALU_d = hypre_ParILUDataMatAILUDevice(ilu_data);
hypre_CSRMatrix *matBLU_d = hypre_ParILUDataMatBILUDevice(ilu_data);
//hypre_CSRMatrix *matSLU_d = hypre_ParILUDataMatSILUDevice(ilu_data);
hypre_CSRMatrix *matE_d = hypre_ParILUDataMatEDevice(ilu_data);
hypre_CSRMatrix *matF_d = hypre_ParILUDataMatFDevice(ilu_data);
csrsv2Info_t matAL_info = hypre_ParILUDataMatALILUSolveInfo(ilu_data);
csrsv2Info_t matAU_info = hypre_ParILUDataMatAUILUSolveInfo(ilu_data);
csrsv2Info_t matBL_info = hypre_ParILUDataMatBLILUSolveInfo(ilu_data);
csrsv2Info_t matBU_info = hypre_ParILUDataMatBUILUSolveInfo(ilu_data);
csrsv2Info_t matSL_info = hypre_ParILUDataMatSLILUSolveInfo(ilu_data);
csrsv2Info_t matSU_info = hypre_ParILUDataMatSUILUSolveInfo(ilu_data);
hypre_ParCSRMatrix *Aperm = hypre_ParILUDataAperm(ilu_data);
//hypre_ParCSRMatrix *R = hypre_ParILUDataR(ilu_data);
//hypre_ParCSRMatrix *P = hypre_ParILUDataP(ilu_data);
#endif
/* get matrices */
HYPRE_Int ilu_type = hypre_ParILUDataIluType(ilu_data);
HYPRE_Int *perm = hypre_ParILUDataPerm(ilu_data);
HYPRE_Int *qperm = hypre_ParILUDataQPerm(ilu_data);
hypre_ParCSRMatrix *matA = hypre_ParILUDataMatA(ilu_data);
hypre_ParCSRMatrix *matL = hypre_ParILUDataMatL(ilu_data);
HYPRE_Real *matD = hypre_ParILUDataMatD(ilu_data);
hypre_ParCSRMatrix *matU = hypre_ParILUDataMatU(ilu_data);
#ifndef HYPRE_USING_CUDA
hypre_ParCSRMatrix *matmL = hypre_ParILUDataMatLModified(ilu_data);
HYPRE_Real *matmD = hypre_ParILUDataMatDModified(ilu_data);
hypre_ParCSRMatrix *matmU = hypre_ParILUDataMatUModified(ilu_data);
#endif
hypre_ParCSRMatrix *matS = hypre_ParILUDataMatS(ilu_data);
HYPRE_Int iter, num_procs, my_id;
hypre_ParVector *F_array = hypre_ParILUDataF(ilu_data);
hypre_ParVector *U_array = hypre_ParILUDataU(ilu_data);
/* get settings */
HYPRE_Real tol = hypre_ParILUDataTol(ilu_data);
HYPRE_Int logging = hypre_ParILUDataLogging(ilu_data);
HYPRE_Int print_level = hypre_ParILUDataPrintLevel(ilu_data);
HYPRE_Int max_iter = hypre_ParILUDataMaxIter(ilu_data);
HYPRE_Real *norms = hypre_ParILUDataRelResNorms(ilu_data);
hypre_ParVector *Ftemp = hypre_ParILUDataFTemp(ilu_data);
hypre_ParVector *Utemp = hypre_ParILUDataUTemp(ilu_data);
hypre_ParVector *Xtemp = hypre_ParILUDataXTemp(ilu_data);
hypre_ParVector *Ytemp = hypre_ParILUDataYTemp(ilu_data);
HYPRE_Real *fext = hypre_ParILUDataFExt(ilu_data);
HYPRE_Real *uext = hypre_ParILUDataUExt(ilu_data);
hypre_ParVector *residual;
HYPRE_Real alpha = -1;
HYPRE_Real beta = 1;
HYPRE_Real conv_factor = 0.0;
HYPRE_Real resnorm = 1.0;
HYPRE_Real init_resnorm = 0.0;
HYPRE_Real rel_resnorm;
HYPRE_Real rhs_norm = 0.0;
HYPRE_Real old_resnorm;
HYPRE_Real ieee_check = 0.0;
HYPRE_Real operat_cmplxty = hypre_ParILUDataOperatorComplexity(ilu_data);
HYPRE_Int Solve_err_flag;
#ifdef HYPRE_USING_CUDA
HYPRE_Int test_opt;
#endif
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int nLU = hypre_ParILUDataNLU(ilu_data);
HYPRE_Int *u_end = hypre_ParILUDataUEnd(ilu_data);
/* Schur system solve */
HYPRE_Solver schur_solver = hypre_ParILUDataSchurSolver(ilu_data);
HYPRE_Solver schur_precond = hypre_ParILUDataSchurPrecond(ilu_data);
hypre_ParVector *rhs = hypre_ParILUDataRhs(ilu_data);
hypre_ParVector *x = hypre_ParILUDataX(ilu_data);
/* begin */
HYPRE_ANNOTATE_FUNC_BEGIN;
if(logging > 1)
{
residual = hypre_ParILUDataResidual(ilu_data);
}
hypre_ParILUDataNumIterations(ilu_data) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
/*-----------------------------------------------------------------------
* Write the solver parameters
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1)
{
hypre_ILUWriteSolverParams(ilu_data);
}
/*-----------------------------------------------------------------------
* Initialize the solver error flag
*-----------------------------------------------------------------------*/
Solve_err_flag = 0;
/*-----------------------------------------------------------------------
* write some initial info
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1 && tol > 0.)
{
hypre_printf("\n\n ILU SOLVER SOLUTION INFO:\n");
}
/*-----------------------------------------------------------------------
* Compute initial residual and print
*-----------------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
if ( logging > 1 )
{
hypre_ParVectorCopy(f, residual );
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual );
}
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else
{
hypre_ParVectorCopy(f, Ftemp);
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp);
}
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (resnorm != 0.)
{
ieee_check = resnorm/resnorm; /* INF -> NaN conversion */
}
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by <NAME>, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_ILUSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
init_resnorm = resnorm;
rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f));
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = init_resnorm / rhs_norm;
}
else
{
/* rhs is zero, return a zero solution */
hypre_ParVectorSetConstantValues(U_array, 0.0);
if(logging > 0)
{
rel_resnorm = 0.0;
hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm;
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
}
else
{
rel_resnorm = 1.;
}
if (my_id == 0 && print_level > 1)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",init_resnorm,
rel_resnorm);
}
matA = A;
U_array = u;
F_array = f;
/************** Main Solver Loop - always do 1 iteration ************/
iter = 0;
while ((rel_resnorm >= tol || iter < 1)
&& iter < max_iter)
{
/* Do one solve on LUe=r */
switch(ilu_type){
case 0: case 1:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse
#else
hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ
#endif
break;
case 10: case 11:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseSchurGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end,
matL_des, matU_des, matBL_info, matBU_info, matSL_info, matSU_info,
matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer);//GMRES-cusparse
#else
hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, perm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES
#endif
break;
case 20: case 21:
hypre_ILUSolveSchurNSH(matA, F_array, U_array, perm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, rhs, x, u_end); //MR+NSH
break;
case 30: case 31:
hypre_ILUSolveLURAS(matA, F_array, U_array, perm, matL, matD, matU, Utemp, Utemp, fext, uext); //RAS
break;
case 40: case 41:
hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, qperm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES
break;
case 50:
#ifdef HYPRE_USING_CUDA
test_opt = hypre_ParILUDataTestOption(ilu_data);
hypre_ILUSolveRAPGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, Xtemp, Ytemp, schur_solver, schur_precond, rhs, x, u_end,
matL_des, matU_des, matAL_info, matAU_info, matBL_info, matBU_info, matSL_info, matSU_info,
Aperm, matALU_d, matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer, test_opt);//GMRES-RAP
#else
hypre_ILUSolveRAPGMRESHOST(matA, F_array, U_array, perm, nLU, matL, matD, matU, matmL, matmD, matmU, Utemp, Ftemp, Xtemp, Ytemp,
schur_solver, schur_precond, rhs, x, u_end);//GMRES-RAP
#endif
break;
default:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse
#else
hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ
#endif
break;
}
/*---------------------------------------------------------------
* Compute residual and residual norm
*----------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
old_resnorm = resnorm;
if ( logging > 1 ) {
hypre_ParVectorCopy(F_array, residual);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual );
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else {
hypre_ParVectorCopy(F_array, Ftemp);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp);
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
if (old_resnorm) conv_factor = resnorm / old_resnorm;
else conv_factor = resnorm;
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = resnorm / rhs_norm;
}
else
{
rel_resnorm = resnorm;
}
norms[iter] = rel_resnorm;
}
++iter;
hypre_ParILUDataNumIterations(ilu_data) = iter;
hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm;
if (my_id == 0 && print_level > 1)
{
hypre_printf(" ILUSolve %2d %e %f %e \n", iter,
resnorm, conv_factor, rel_resnorm);
}
}
/* check convergence within max_iter */
if (iter == max_iter && tol > 0.)
{
Solve_err_flag = 1;
hypre_error(HYPRE_ERROR_CONV);
}
/*-----------------------------------------------------------------------
* Print closing statistics
* Add operator and grid complexity stats
*-----------------------------------------------------------------------*/
if (iter > 0 && init_resnorm)
{
conv_factor = pow((resnorm/init_resnorm),(1.0/(HYPRE_Real) iter));
}
else
{
conv_factor = 1.;
}
if (print_level > 1)
{
/*** compute operator and grid complexity (fill factor) here ?? ***/
if (my_id == 0)
{
if (Solve_err_flag == 1)
{
hypre_printf("\n\n==============================================");
hypre_printf("\n NOTE: Convergence tolerance was not achieved\n");
hypre_printf(" within the allowed %d iterations\n",max_iter);
hypre_printf("==============================================");
}
hypre_printf("\n\n Average Convergence Factor = %f \n",conv_factor);
hypre_printf(" operator = %f\n",operat_cmplxty);
}
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int *qperm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
// HYPRE_Int m = n - nLU;
/* other data objects for computation */
// hypre_Vector *f_local;
// HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
// f_local = hypre_ParVectorLocalVector(f);
// f_data = hypre_VectorData(f_local);
/* now update with L to solve */
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[qperm[i]] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
utemp_data[qperm[i]] -= L_diag_data[j] * utemp_data[qperm[L_diag_j[j]]];
}
}
/* 2nd need to compute g'i = gi - Ei*UBi^-1*xi
* now put g'i into the f_temp lower
*/
for(i = nLU ; i < n ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[qperm[col]];
}
}
/* 3rd need to solve global Schur Complement Sy = g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve whe S is not NULL
*/
if(S)
{
/*initialize solution to zero for residual equation */
hypre_ParVectorSetConstantValues(x, 0.0);
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
/* set rhs value */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i-nLU] = ftemp_data[perm[i]];
}
/* solve */
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)S,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* copy value back to original */
for(i = nLU ; i < n ; i ++)
{
utemp_data[qperm[i]] = x_data[i-nLU];
}
}
/* 4th need to compute zi = xi - LBi^-1*Fi*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU < n)
{
for(i = 0 ; i < nLU ; i ++)
{
ftemp_data[perm[i]] = utemp_data[qperm[i]];
k1 = u_end[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[qperm[col]];
}
}
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[qperm[i]] = ftemp_data[perm[i]];
}
}
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
for(i = nLU-1 ; i >= 0 ; i --)
{
k1 = U_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
utemp_data[qperm[i]] -= U_diag_data[j] * utemp_data[qperm[col]];
}
utemp_data[qperm[i]] *= D[i];
}
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Newton-Schulz-Hotelling solve
* ParCSRMatrix S is already built in ilu data sturcture
* S here is the INVERSE of Schur Complement
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the inverse global Schur complement
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveSchurNSH(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
// HYPRE_Int m = n - nLU;
/* other data objects for computation */
// hypre_Vector *f_local;
// HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
// f_local = hypre_ParVectorLocalVector(f);
// f_data = hypre_VectorData(f_local);
/* now update with L to solve */
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]];
}
}
/* 2nd need to compute g'i = gi - Ei*UBi^-1*xi
* now put g'i into the f_temp lower
*/
for(i = nLU ; i < n ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]];
}
}
/* 3rd need to solve global Schur Complement Sy = g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve when S is not NULL
*/
if(S)
{
/*initialize solution to zero for residual equation */
hypre_ParVectorSetConstantValues(x, 0.0);
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
/* set rhs value */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i-nLU] = ftemp_data[perm[i]];
}
/* Solve Schur system with approx inverse
* x = S*rhs
*/
hypre_NSHSolve(schur_solver,S,rhs,x);
/* copy value back to original */
for(i = nLU ; i < n ; i ++)
{
utemp_data[perm[i]] = x_data[i-nLU];
}
}
/* 4th need to compute zi = xi - LBi^-1*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU < n)
{
for(i = 0 ; i < nLU ; i ++)
{
ftemp_data[perm[i]] = utemp_data[perm[i]];
k1 = u_end[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
}
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
}
}
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
for(i = nLU-1 ; i >= 0 ; i --)
{
k1 = U_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
utemp_data[perm[i]] *= D[i];
}
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Incomplete LU solve
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
*/
HYPRE_Int
hypre_ILUSolveLU(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2;
/* begin */
alpha = -1.0;
beta = 1.0;
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* L solve - Forward solve */
/* copy rhs to account for diagonal of L (which is identity) */
for( i = 0; i < nLU; i++ )
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
}
/* update with remaining (off-diagonal) entries of L */
for( i = 0; i < nLU; i++ )
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j=k1; j <k2; j++)
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]];
}
}
/*-------------------- U solve - Backward substitution */
for( i = nLU-1; i >= 0; i-- )
{
/* first update with the remaining (off-diagonal) entries of U */
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j=k1; j <k2; j++)
{
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[U_diag_j[j]]];
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
utemp_data[perm[i]] *= D[i];
}
/* Update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Incomplete LU solve RAS
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* fext and uext are tempory arrays for external data
*/
HYPRE_Int
hypre_ILUSolveLURAS(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Real *fext, HYPRE_Real *uext)
{
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int num_sends, begin, end;
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
HYPRE_Int n = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A));
HYPRE_Int m = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
// HYPRE_Int buffer_size;
HYPRE_Int n_total = m + n;
HYPRE_Int idx;
HYPRE_Int jcol;
HYPRE_Int col;
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2;
/* begin */
alpha = -1.0;
beta = 1.0;
/* prepare for communication */
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
/* setup if not yet built */
if(!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* communication to get external data */
/* get total num of send */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg,0);
end = hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
/* copy new index into send_buf */
for(i = begin ; i < end ; i ++)
{
/* all we need is just send out data, we don't need to worry about the
* permutation of offd part, actually we don't need to worry about
* permutation at all
* borrow uext as send buffer .
*/
uext[i-begin] = ftemp_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
/* main communication */
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, uext, fext);
hypre_ParCSRCommHandleDestroy(comm_handle);
/* L solve - Forward solve */
for( i = 0 ; i < n_total ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
if( i < n )
{
/* diag part */
utemp_data[perm[i]] = ftemp_data[perm[i]];
for(j=k1; j <k2; j++)
{
col = L_diag_j[j];
if( col < n )
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
utemp_data[perm[i]] -= L_diag_data[j] * uext[jcol];
}
}
}
else
{
/* offd part */
idx = i - n;
uext[idx] = fext[idx];
for(j=k1; j <k2; j++)
{
col = L_diag_j[j];
if(col < n)
{
uext[idx] -= L_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
uext[idx] -= L_diag_data[j] * uext[jcol];
}
}
}
}
/*-------------------- U solve - Backward substitution */
for( i = n_total-1; i >= 0; i-- )
{
/* first update with the remaining (off-diagonal) entries of U */
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
if( i < n )
{
/* diag part */
for(j=k1; j <k2; j++)
{
col = U_diag_j[j];
if( col < n )
{
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
utemp_data[perm[i]] -= U_diag_data[j] * uext[jcol];
}
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
utemp_data[perm[i]] *= D[i];
}
else
{
/* 2nd part of offd */
idx = i - n;
for(j=k1; j <k2; j++)
{
col = U_diag_j[j];
if( col < n )
{
uext[idx] -= U_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
uext[idx] -= U_diag_data[j] * uext[jcol];
}
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
uext[idx] *= D[i];
}
}
/* Update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
#ifdef HYPRE_USING_CUDA
/* Permutation function (for GPU version, can just call thrust)
* option 00: perm integer array
* option 01: rperm integer array
* option 10: perm real array
* option 11: rperm real array
* */
HYPRE_Int
hypre_ILUSeqVectorPerm(void *vectori, void *vectoro, HYPRE_Int size, HYPRE_Int *perm, HYPRE_Int option)
{
cudaDeviceSynchronize();
HYPRE_Int i;
switch(option)
{
case 00:
{
HYPRE_Int *ivectori = (HYPRE_Int *) vectori;
HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro;
for(i = 0 ; i < size ; i ++)
{
ivectoro[i] = ivectori[perm[i]];
}
break;
}
case 01:
{
HYPRE_Int *ivectori = (HYPRE_Int *) vectori;
HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro;
for(i = 0 ; i < size ; i ++)
{
ivectoro[perm[i]] = ivectori[i];
}
break;
}
case 10:
{
HYPRE_Real *dvectori = (HYPRE_Real *) vectori;
HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro;
for(i = 0 ; i < size ; i ++)
{
dvectoro[i] = dvectori[perm[i]];
}
break;
}
case 11:
{
HYPRE_Real *dvectori = (HYPRE_Real *) vectori;
HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro;
for(i = 0 ; i < size ; i ++)
{
dvectoro[perm[i]] = dvectori[i];
}
break;
}
default:
{
printf("Error option in ILUSeqVectorPerm");
hypre_assert(1==0);
}
}
return hypre_error_flag;
}
/* Incomplete LU solve (GPU)
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
*/
HYPRE_Int
hypre_ILUSolveCusparseLU(hypre_ParCSRMatrix *A, cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matL_info, csrsv2Info_t matU_info, hypre_CSRMatrix *matLU_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer,
hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int n, hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
/* Only solve when we have stuffs to be solved */
if(n == 0)
{
return hypre_error_flag;
}
/* ILU data */
HYPRE_Real *LU_data = hypre_CSRMatrixData(matLU_d);
HYPRE_Int *LU_i = hypre_CSRMatrixI(matLU_d);
HYPRE_Int *LU_j = hypre_CSRMatrixJ(matLU_d);
HYPRE_Int nnz = LU_i[n];
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
//HYPRE_Int i, j, k1, k2;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
/* begin */
alpha = -1.0;
beta = 1.0;
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* apply permutation */
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) LU_data, LU_i, LU_j, matL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Backward substitution */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) LU_data, LU_i, LU_j, matU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (float *) &beta, matL_des,
(float *) LU_data, LU_i, LU_j, matL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Backward substitution */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (float *) &beta, matU_des,
(float *) LU_data, LU_i, LU_j, matU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* apply reverse permutation */
HYPRE_THRUST_CALL(scatter,utemp_data, utemp_data + n, perm, ftemp_data);
/* Update solution */
hypre_ParVectorAxpy(beta, ftemp, u);
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveCusparseSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end,
cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matBL_info, csrsv2Info_t matBU_info, csrsv2Info_t matSL_info, csrsv2Info_t matSU_info,
hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer)
{
/* If we don't have S block, just do one L solve and one U solve */
if(!S)
{
/* Just call BJ cusparse and return */
return hypre_ILUSolveCusparseLU(A, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, f, u, perm, nLU, ftemp, utemp);
}
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for temp vector */
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs);
HYPRE_Real *rhs_data = hypre_VectorData(rhs_local);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
HYPRE_Real *x_data = hypre_VectorData(x_local);
HYPRE_Real alpha;
HYPRE_Real beta;
//HYPRE_Real gamma;
//HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int *BLU_i = NULL;
HYPRE_Int *BLU_j = NULL;
HYPRE_Real *BLU_data = NULL;
HYPRE_Int BLU_nnz = 0;
hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d);
HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d);
HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d);
HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d);
HYPRE_Int n = nLU + m;
HYPRE_Int SLU_nnz = SLU_i[m];
hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU);
hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m);
hypre_VectorOwnsData(ftemp_upper) = 0;
hypre_VectorOwnsData(utemp_lower) = 0;
hypre_VectorData(ftemp_upper) = ftemp_data;
hypre_VectorData(utemp_lower) = utemp_data + nLU;
hypre_SeqVectorInitialize(ftemp_upper);
hypre_SeqVectorInitialize(utemp_lower);
if( nLU > 0)
{
BLU_i = hypre_CSRMatrixI(matBLU_d);
BLU_j = hypre_CSRMatrixJ(matBLU_d);
BLU_data = hypre_CSRMatrixData(matBLU_d);
BLU_nnz = BLU_i[nLU];
}
/* begin */
beta = 1.0;
alpha = -1.0;
//gamma = 0.0;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
/* apply permutation before we can start our solve */
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
if(nLU > 0)
{
/* This solve won't touch data in utemp, thus, gi is still in utemp_lower */
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &beta, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* 2nd need to compute g'i = gi - Ei*UBi^{-1}*xi
* Ei*UBi^{-1} is exactly the matE_d here
* Now: LBi^{-1}f_i is in ftemp_upper
* gi' is in utemp_lower
*/
hypre_CSRMatrixMatvec(alpha, matE_d, ftemp_upper, beta, utemp_lower);
}
/* 3rd need to solve global Schur Complement M^{-1}Sy = M^{-1}g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve whe S is not NULL
*/
/* setup vectors for solve
* rhs = M^{-1}g'
*/
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) utemp_data + nLU, (hypre_double *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer));
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) ftemp_data + nLU, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &beta, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) utemp_data + nLU, (float *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer));
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &beta, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) ftemp_data + nLU, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* solve */
/* with tricky initial guess */
//hypre_Vector *tv = hypre_ParVectorLocalVector(x);
//HYPRE_Real *tz = hypre_VectorData(tv);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* 4th need to compute zi = xi - LBi^-1*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU > 0)
{
hypre_CSRMatrixMatvec(alpha, matF_d, x_local, beta, ftemp_upper);
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &beta, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* copy lower part solution into u_temp as well */
hypre_TMemcpy(utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* perm back */
HYPRE_THRUST_CALL(scatter,utemp_data, utemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, ftemp, u);
hypre_SeqVectorDestroy(ftemp_upper);
hypre_SeqVectorDestroy(utemp_lower);
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement, RAP style
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveRAPGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp, hypre_ParVector *xtemp, hypre_ParVector *ytemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end,
cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matAL_info, csrsv2Info_t matAU_info,
csrsv2Info_t matBL_info, csrsv2Info_t matBU_info,
csrsv2Info_t matSL_info, csrsv2Info_t matSU_info,
hypre_ParCSRMatrix *Aperm, hypre_CSRMatrix *matALU_d, hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer, HYPRE_Int test_opt)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for temp vector */
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *xtemp_local = hypre_ParVectorLocalVector(xtemp);
HYPRE_Real *xtemp_data = hypre_VectorData(xtemp_local);
//hypre_Vector *ytemp_local = hypre_ParVectorLocalVector(ytemp);
//HYPRE_Real *ytemp_data = hypre_VectorData(ytemp_local);
hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs);
HYPRE_Real *rhs_data = hypre_VectorData(rhs_local);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
HYPRE_Real *x_data = hypre_VectorData(x_local);
//HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int *ALU_i = hypre_CSRMatrixI(matALU_d);
HYPRE_Int *ALU_j = hypre_CSRMatrixJ(matALU_d);
HYPRE_Real *ALU_data = hypre_CSRMatrixData(matALU_d);
HYPRE_Int *BLU_i = hypre_CSRMatrixI(matBLU_d);
HYPRE_Int *BLU_j = hypre_CSRMatrixJ(matBLU_d);
HYPRE_Real *BLU_data = hypre_CSRMatrixData(matBLU_d);
HYPRE_Int BLU_nnz = BLU_i[nLU];
hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d);
HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d);
HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d);
HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d);
HYPRE_Int n = nLU + m;
HYPRE_Int SLU_nnz = SLU_i[m];
HYPRE_Int ALU_nnz = ALU_i[n];
hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU);
hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m);
hypre_VectorOwnsData(ftemp_upper) = 0;
hypre_VectorOwnsData(utemp_lower) = 0;
hypre_VectorData(ftemp_upper) = ftemp_data;
hypre_VectorData(utemp_lower) = utemp_data + nLU;
hypre_SeqVectorInitialize(ftemp_upper);
hypre_SeqVectorInitialize(utemp_lower);
/* begin */
HYPRE_Real one = 1.0;
HYPRE_Real mone = -1.0;
HYPRE_Real zero = 0.0;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
switch(test_opt)
{
case 1: case 3:
{
/* E and F */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, utemp);
/* apply permutation before we can start our solve
* Au=f -> (PAQ)Q'u=Pf
*/
HYPRE_THRUST_CALL(gather, perm, perm + n, utemp_data, ftemp_data);
/* A-smoothing
* x = [UA\(LA\(P*f_u))] fill to xtemp
*/
if(n > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info,
(hypre_double *) utemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matL_des,
(float *) ALU_data, ALU_i, ALU_j, matAL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matU_des,
(float *) ALU_data, ALU_i, ALU_j, matAU_info,
(float *) utemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* residual, we should not touch xtemp for now
* r = R*(f-PAQx)
*/
hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, ftemp);
/* with R is complex */
/* copy partial data in */
hypre_TMemcpy( rhs_data, ftemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* solve L^{-1} */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U^{-1}L^{-1} */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* -EU^{-1}L^{-1} */
hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local);
/* now solve S
*/
if(S)
{
/* if we have a schur complement */
hypre_ParVectorSetConstantValues(x, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* u = xtemp + P*x */
/* -Fx */
hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper);
/* -L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U{-1}L^{-1}Fx */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( ftemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* correction to the residual */
hypre_ParVectorAxpy(one, ftemp, xtemp);
}
else
{
/* otherwise just apply triangular solves */
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* u = xtemp + P*x */
/* -Fx */
hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper);
/* -L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U{-1}L^{-1}Fx */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( ftemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, ftemp, xtemp);
}
/* perm back */
HYPRE_THRUST_CALL(scatter,xtemp_data, xtemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(one, ftemp, u);
}
break;
case 0: case 2: default:
{
/* EU^{-1} and L^{-1}F */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, ftemp);
/* apply permutation before we can start our solve
* Au=f -> (PAQ)Q'u=Pf
*/
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
/* A-smoothing
* x = [UA\(LA\(P*f_u))] fill to xtemp
*/
if(n > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info,
(hypre_double *) ftemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matL_des,
(float *) ALU_data, ALU_i, ALU_j, matAL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matU_des,
(float *) ALU_data, ALU_i, ALU_j, matAU_info,
(float *) ftemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* residual, we should not touch xtemp for now
* r = R*(f-PAQx)
*/
hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, utemp);
/* with R is complex */
/* copy partial data in */
hypre_TMemcpy( rhs_data, utemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* solve L^{-1} */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* -EU^{-1}L^{-1} */
hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local);
/* now solve S
*/
if(S)
{
/* if we have a schur complement */
hypre_ParVectorSetConstantValues(x, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* u = xtemp + P*x */
/* -L^{-1}Fx */
hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper);
/* -U{-1}L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, utemp, xtemp);
}
else
{
/* otherwise just apply triangular solves */
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* u = xtemp + P*x */
/* -L^{-1}Fx */
hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper);
/* -U{-1}L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( utemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, utemp, xtemp);
}
/* perm back */
HYPRE_THRUST_CALL(scatter,xtemp_data, xtemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(one, ftemp, u);
}
break;
}
return hypre_error_flag;
}
#endif
HYPRE_Int
hypre_ILUSolveRAPGMRESHOST(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real *D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *mL, HYPRE_Real *mD, hypre_ParCSRMatrix *mU,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
hypre_ParVector *xtemp, hypre_ParVector *ytemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
//#pragma omp parallel
// printf("threads %d\n",omp_get_num_threads());
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_CSRMatrix *mL_diag = hypre_ParCSRMatrixDiag(mL);
HYPRE_Real *mL_diag_data = hypre_CSRMatrixData(mL_diag);
HYPRE_Int *mL_diag_i = hypre_CSRMatrixI(mL_diag);
HYPRE_Int *mL_diag_j = hypre_CSRMatrixJ(mL_diag);
hypre_CSRMatrix *mU_diag = hypre_ParCSRMatrixDiag(mU);
HYPRE_Real *mU_diag_data = hypre_CSRMatrixData(mU_diag);
HYPRE_Int *mU_diag_i = hypre_CSRMatrixI(mU_diag);
HYPRE_Int *mU_diag_j = hypre_CSRMatrixJ(mU_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *xtemp_local = NULL;
HYPRE_Real *xtemp_data = NULL;
hypre_Vector *ytemp_local = NULL;
HYPRE_Real *ytemp_data = NULL;
if(xtemp)
{
/* xtemp might be null when we have no Schur complement */
xtemp_local = hypre_ParVectorLocalVector(xtemp);
xtemp_data = hypre_VectorData(xtemp_local);
ytemp_local = hypre_ParVectorLocalVector(ytemp);
ytemp_data = hypre_VectorData(ytemp_local);
}
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
HYPRE_Int m = n - nLU;
/* other data objects for computation */
//hypre_Vector *f_local;
//HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
if(m > 0)
{
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
}
/* only support RAP with partial factorized W and Z */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* A-smoothing f_temp = [UA \ LA \ (f_temp[perm])] */
/* permuted L solve */
for(i = 0 ; i < n ; i ++)
{
utemp_data[i] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
utemp_data[i] -= L_diag_data[j] * utemp_data[col];
}
}
if(!xtemp)
{
/* in this case, we don't have a Schur complement */
/* U solve */
for(i = n-1 ; i >= 0 ; i --)
{
ftemp_data[perm[i]] = utemp_data[i];
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * ftemp_data[perm[col]];
}
ftemp_data[perm[i]] *= D[i];
}
hypre_ParVectorAxpy(beta, ftemp, u);
return hypre_error_flag;
}
/* U solve */
for(i = n-1 ; i >= 0 ; i --)
{
xtemp_data[perm[i]] = utemp_data[i];
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
xtemp_data[perm[i]] -= U_diag_data[j] * xtemp_data[perm[col]];
}
xtemp_data[perm[i]] *= D[i];
}
/* coarse-grid correction */
/* now f_temp is the result of A-smoothing
* rhs = R*(b - Ax)
* */
// utemp = (ftemp - A*xtemp)
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, xtemp, beta, ftemp, utemp);
// R = [-L21 L\inv, I]
if( m > 0)
{
/* first is L solve */
for(i = 0 ; i < nLU ; i ++)
{
ytemp_data[i] = utemp_data[perm[i]];
k1 = mL_diag_i[i] ; k2 = mL_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
ytemp_data[i] -= mL_diag_data[j] * ytemp_data[col];
}
}
/* apply -W * ytemp on this, and take care of the I part */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i - nLU] = utemp_data[perm[i]];
k1 = mL_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
rhs_data[i - nLU] -= mL_diag_data[j] * ytemp_data[col];
}
}
}
/* now the rhs is ready */
hypre_SeqVectorSetConstantValues(x_local, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
if(m > 0)
{
/*
for(i = 0 ; i < m ; i ++)
{
x_data[i] = rhs_data[i];
k1 = u_end[i+nLU] ; k2 = mL_diag_i[i+nLU+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
x_data[i] -= mL_diag_data[j] * x_data[col-nLU];
}
}
for(i = m-1 ; i >= 0 ; i --)
{
rhs_data[i] = x_data[i];
k1 = mU_diag_i[i+nLU] ; k2 = mU_diag_i[i+1+nLU];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
rhs_data[i] -= mU_diag_data[j] * rhs_data[col-nLU];
}
rhs_data[i] *= mD[i];
}
*/
/* after solve, update x = x + Pv
* that is, xtemp = xtemp + P*x
*/
/* first compute P*x
* P = [ -U\inv U_12 ]
* [ I ]
*/
/* matvec */
for(i = 0 ; i < nLU ; i ++)
{
ytemp_data[i] = 0.0;
k1 = u_end[i] ; k2 = mU_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
ytemp_data[i] -= mU_diag_data[j] * x_data[col-nLU];
}
}
/* U solve */
for(i = nLU-1 ; i >= 0 ; i --)
{
ftemp_data[perm[i]] = ytemp_data[i];
k1 = mU_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
ftemp_data[perm[i]] -= mU_diag_data[j] * ftemp_data[perm[col]];
}
ftemp_data[perm[i]] *= mD[i];
}
/* update with I */
for(i = nLU ; i < n ; i ++)
{
ftemp_data[perm[i]] = x_data[i-nLU];
}
hypre_ParVectorAxpy(beta, ftemp, u);
}
hypre_ParVectorAxpy(beta, xtemp, u);
return hypre_error_flag;
}
/* solve functions for NSH */
/*--------------------------------------------------------------------
* hypre_NSHSolve
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_NSHSolve( void *nsh_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
// HYPRE_Int i;
hypre_ParNSHData *nsh_data = (hypre_ParNSHData*) nsh_vdata;
/* get matrices */
hypre_ParCSRMatrix *matA = hypre_ParNSHDataMatA(nsh_data);
hypre_ParCSRMatrix *matM = hypre_ParNSHDataMatM(nsh_data);
HYPRE_Int iter, num_procs, my_id;
hypre_ParVector *F_array = hypre_ParNSHDataF(nsh_data);
hypre_ParVector *U_array = hypre_ParNSHDataU(nsh_data);
/* get settings */
HYPRE_Real tol = hypre_ParNSHDataTol(nsh_data);
HYPRE_Int logging = hypre_ParNSHDataLogging(nsh_data);
HYPRE_Int print_level = hypre_ParNSHDataPrintLevel(nsh_data);
HYPRE_Int max_iter = hypre_ParNSHDataMaxIter(nsh_data);
HYPRE_Real *norms = hypre_ParNSHDataRelResNorms(nsh_data);
hypre_ParVector *Ftemp = hypre_ParNSHDataFTemp(nsh_data);
hypre_ParVector *Utemp = hypre_ParNSHDataUTemp(nsh_data);
hypre_ParVector *residual;
HYPRE_Real alpha = -1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real conv_factor = 0.0;
HYPRE_Real resnorm = 1.0;
HYPRE_Real init_resnorm = 0.0;
HYPRE_Real rel_resnorm;
HYPRE_Real rhs_norm = 0.0;
HYPRE_Real old_resnorm;
HYPRE_Real ieee_check = 0.;
HYPRE_Real operat_cmplxty = hypre_ParNSHDataOperatorComplexity(nsh_data);
HYPRE_Int Solve_err_flag;
/* problem size */
// HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* begin */
if(logging > 1)
{
residual = hypre_ParNSHDataResidual(nsh_data);
}
hypre_ParNSHDataNumIterations(nsh_data) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
/*-----------------------------------------------------------------------
* Write the solver parameters
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1)
{
hypre_NSHWriteSolverParams(nsh_data);
}
/*-----------------------------------------------------------------------
* Initialize the solver error flag
*-----------------------------------------------------------------------*/
Solve_err_flag = 0;
/*-----------------------------------------------------------------------
* write some initial info
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1 && tol > 0.)
{
hypre_printf("\n\n Newton–Schulz–Hotelling SOLVER SOLUTION INFO:\n");
}
/*-----------------------------------------------------------------------
* Compute initial residual and print
*-----------------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
if ( logging > 1 )
{
hypre_ParVectorCopy(f, residual );
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual );
}
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else
{
hypre_ParVectorCopy(f, Ftemp);
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp);
}
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (resnorm != 0.)
{
ieee_check = resnorm/resnorm; /* INF -> NaN conversion */
}
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by <NAME>, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_NSHSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
init_resnorm = resnorm;
rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f));
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = init_resnorm / rhs_norm;
}
else
{
/* rhs is zero, return a zero solution */
hypre_ParVectorSetConstantValues(U_array, 0.0);
if(logging > 0)
{
rel_resnorm = 0.0;
hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm;
}
return hypre_error_flag;
}
}
else
{
rel_resnorm = 1.;
}
if (my_id == 0 && print_level > 1)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",init_resnorm,
rel_resnorm);
}
matA = A;
U_array = u;
F_array = f;
/************** Main Solver Loop - always do 1 iteration ************/
iter = 0;
while ((rel_resnorm >= tol || iter < 1)
&& iter < max_iter)
{
/* Do one solve on e = Mr */
hypre_NSHSolveInverse(matA, f, u, matM, Utemp, Ftemp);
/*---------------------------------------------------------------
* Compute residual and residual norm
*----------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
old_resnorm = resnorm;
if ( logging > 1 ) {
hypre_ParVectorCopy(F_array, residual);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual );
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else {
hypre_ParVectorCopy(F_array, Ftemp);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp);
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
if (old_resnorm) conv_factor = resnorm / old_resnorm;
else conv_factor = resnorm;
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = resnorm / rhs_norm;
}
else
{
rel_resnorm = resnorm;
}
norms[iter] = rel_resnorm;
}
++iter;
hypre_ParNSHDataNumIterations(nsh_data) = iter;
hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm;
if (my_id == 0 && print_level > 1)
{
hypre_printf(" NSHSolve %2d %e %f %e \n", iter,
resnorm, conv_factor, rel_resnorm);
}
}
/* check convergence within max_iter */
if (iter == max_iter && tol > 0.)
{
Solve_err_flag = 1;
hypre_error(HYPRE_ERROR_CONV);
}
/*-----------------------------------------------------------------------
* Print closing statistics
* Add operator and grid complexity stats
*-----------------------------------------------------------------------*/
if (iter > 0 && init_resnorm)
{
conv_factor = pow((resnorm/init_resnorm),(1.0/(HYPRE_Real) iter));
}
else
{
conv_factor = 1.;
}
if (print_level > 1)
{
/*** compute operator and grid complexity (fill factor) here ?? ***/
if (my_id == 0)
{
if (Solve_err_flag == 1)
{
hypre_printf("\n\n==============================================");
hypre_printf("\n NOTE: Convergence tolerance was not achieved\n");
hypre_printf(" within the allowed %d iterations\n",max_iter);
hypre_printf("==============================================");
}
hypre_printf("\n\n Average Convergence Factor = %f \n",conv_factor);
hypre_printf(" operator = %f\n",operat_cmplxty);
}
}
return hypre_error_flag;
}
/* NSH solve
* Simply a matvec on residual with approximate inverse
* A: original matrix
* f: rhs
* u: solution
* M: approximate inverse
* ftemp, utemp: working vectors
*/
HYPRE_Int
hypre_NSHSolveInverse(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, hypre_ParCSRMatrix *M,
hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
HYPRE_Real alpha;
HYPRE_Real beta;
/* begin */
alpha = -1.0;
beta = 1.0;
/* r = f-Au */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* e = Mr */
hypre_ParCSRMatrixMatvec(1.0, M, ftemp, 0.0, utemp);
/* u = u + e */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
| 53,555 |
1,738 | <filename>dev/Code/CryEngine/RenderDll/Common/RendElements/TerrainUtils/VTWrapper.h
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#ifdef LY_TERRAIN_RUNTIME
#pragma once
#include <AzCore/std/containers/queue.h>
#include <AzCore/std/containers/set.h>
#include <AzCore/std/containers/unordered_map.h>
#include <AzCore/std/containers/vector.h>
#include <AzCore/std/functional.h>
#include <Terrain/Bus/TerrainProviderBus.h>
#include "LRUCacheQueue.h"
//#define VT_VERBOSE_LOGGING
//
// VTWrapper.h defines a set of utility classes that handle texture management for Terrain::VirtualTexture (see VirtualTexture.h)
//
// Terrain::VTWrapper
// Wraps texture management for a VirtualTexture cache, where tiles are addressed by tile index.
// The tiles are indexed like addressing individual pixel indices within each mipmap.
//
// Terrain::VTWrapper manages a virtual tile page table, mapping each virtual tile resident in
// cache to their respective physical tiles.
//
namespace Terrain
{
// VirtualTile: a virtual tile address
class VirtualTile
{
public:
int m_x = -1;
int m_y = -1;
int m_mipLevel = -1;
VirtualTile() = default;
bool operator==(const VirtualTile& rhs) const;
};
struct VirtualTileComparator
{
bool operator() (VirtualTile a, VirtualTile b);
};
class PhysicalTile
{
public:
int m_x = -1;
int m_y = -1;
VirtualTile m_virtualAddr;
PhysicalTile() = default;
PhysicalTile(int x, int y);
bool operator==(const PhysicalTile& rhs) const;
};
struct IndirectionTileInfo
{
CTexture* m_indirectionTilePtr = nullptr;
int m_vTileOffsetX = -1;
int m_vTileOffsetY = -1;
};
}
// TileIndex, VirtualTile, and PhysicalTile hashing
namespace AZStd
{
template <>
struct hash <Terrain::VirtualTile>
{
inline size_t operator()(const Terrain::VirtualTile& vTileAddr) const
{
AZ::u64 mip = (static_cast<AZ::u64>(vTileAddr.m_mipLevel) << 60) & 0xF000000000000000; // mip level top 4 bits
AZ::u64 tileX = static_cast<AZ::u64>(vTileAddr.m_x) & 0x000000003FFFFFFF; // x lower bits 29 to 0
AZ::u64 tileY = (static_cast<AZ::u64>(vTileAddr.m_y) << 14) & 0x0FFFFFFFC0000000; // y higher bits 59 to 30
return std::hash<AZ::u64>{} (mip | tileX | tileY);
}
};
template <>
struct hash <Terrain::PhysicalTile>
{
inline size_t operator()(const Terrain::PhysicalTile& pTileAddr) const
{
AZ::u32 tileX = static_cast<AZ::u32>(pTileAddr.m_x) & 0x0000FFFF; // x lower bits 15 to 0
AZ::u32 tileY = (static_cast<AZ::u32>(pTileAddr.m_y) << 16) & 0xFFFF0000; // y higher bits 32 to 16
return std::hash<AZ::u32>{} (tileX | tileY);
}
};
}
namespace Terrain
{
// Forward declaration
class IndirectionMapCache;
class VirtualTileRequestInfo
{
public:
Viewport2D m_physicalViewportDest;
VirtualTile m_virtualTileAddress;
VirtualTileRequestInfo() = default;
};
typedef AZStd::function<bool(const AZStd::vector<CTexture*>& physicalTextures, const VirtualTileRequestInfo& virtualTileRequestInfo)> TileRequestCallback;
class VTWrapper
{
public:
struct VTWrapperDesc
{
const char* m_name = nullptr;
ETEX_Format* m_formats = nullptr;
ColorF* m_clearColors = nullptr;
int m_textureCount = 1;
float m_texelsPerMeter = 1.0f;
int m_virtualTextureSizeX = 2048;
int m_virtualTextureSizeY = 2048;
int m_mipLevels = 1;
int m_tileSize = 64;
int m_tilePadding = 1;
int m_physicalTextureCacheSize = 2048;
int m_initialIndirectionTileCacheSize = 1;
void CalculateVirtualTextureSize(int worldSizeXMeters, int worldSizeYMeters);
};
VTWrapper(const VTWrapperDesc& desc);
~VTWrapper();
///////////////////////////////
// Setters/Getters
CTexture* GetPhysicalTexture(int index)
{
return static_cast<CTexture*>(m_physicalTextureCache[index].get());
}
void GetIndirectionTile(int vTileX, int vTileY, int mipLevel, IndirectionTileInfo& indirectionTileInfo);
int GetMipLevels() const
{
return m_mipLevels;
}
int GetTileSize() const
{
return m_tileSize;
}
int GetTilePadding() const
{
return m_tilePadding;
}
int GetVirtualTextureSizeX() const
{
return m_virtualTextureSizeX;
}
int GetVirtualTextureSizeY() const
{
return m_virtualTextureSizeY;
}
int GetPhysicalTextureCacheSize() const
{
return m_physicalTextureCacheSize;
}
int GetPhysicalTextureCount() const
{
return m_physicalTextureCount;
}
// Returns the size of the virtual texture represented by a single indirection tile
int GetVirtualTextureChunkSize() const;
void SetTileRequestCallback(TileRequestCallback funcPtr)
{
m_tileRequestCallback = funcPtr;
}
AZStd::size_t GetActiveRequestCount() const
{
return m_tileRequestQueue.size();
}
///////////////////////////////
// Functionality
void ClearCache();
// Perform a request lookup based on the virtual addr
// If the virtual tile exists in the cache, then the LRU cache queue is updated
// If the virtual tile does not exist in the cache, then we queue a request
bool RequestTile(int x, int y, int mipLevel);
// Remove the virtual tile from the cache if it exists
bool ClearTile(int x, int y, int mipLevel);
///////////////////////////////
// Update pump
// Process 'maxRequestsToProcess' virtual tile requests
void Update(int maxRequestsToProcess);
///////////////////////////////
// Utility
// Convert physical tile coordinates to a viewport that takes into account tile padding
Viewport2D GetPhysicalTileViewport(const PhysicalTile& physicalTile);
protected:
void ResetLRUCache();
// Virtualized texture size
int m_virtualTextureSizeX;
int m_virtualTextureSizeY;
int m_mipLevels;
// Physical cache
int m_physicalTextureCacheSize;
int m_physicalTileCountX;
int m_physicalTextureCount;
AZStd::vector<_smart_ptr<ITexture> > m_physicalTextureCache;
// Tile
int m_tileSize;
int m_tilePadding;
// Indirection map
AZStd::unique_ptr<IndirectionMapCache> m_indirectionMapCachePtr;
// Page table: mapping virtual tiles to their respective physical tile address
typedef AZStd::unordered_map<VirtualTile, PhysicalTile> VirtualTileMap;
VirtualTileMap m_virtualTileMap;
LRUCacheQueue<PhysicalTile> m_lruCacheQueue;
// VirtualTile request queue
AZStd::set<VirtualTile, VirtualTileComparator> m_tileRequestQueue;
// Request callback
TileRequestCallback m_tileRequestCallback;
};
}
#endif
| 3,266 |
848 | /*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "priorbox.hpp"
#include <utility>
#include <cmath>
namespace vitis { namespace ai { namespace medicaldetection {
PriorBox::PriorBox(const std::vector<int>& input_shape,
const std::vector<int>& feature_shapes,
const std::vector<int>& min_sizes,
const std::vector<int>& max_sizes,
const std::vector<float>& aspect_ratios,
const std::vector<int>& steps,
float offset )
{
/*
input_shape = [320, 320]
feature_shapes = [(40, 40), (20, 20), (10, 10), (5, 5)]
min_sizes = [(32,), (64,), (128,), (256,)]
max_sizes = [(64,), (128,), (256,), (315,)]
aspect_ratios = [(2.,), (2.,), (2.,), (2.,)]
steps = [(8, 8), (16, 16), (32, 32), (64, 64)]
offset=0.5
*/
float f_h_s, f_w_s;
for(auto i=0u; i<feature_shapes.size()/2; i++) {
f_h_s = input_shape[0]/steps[i*2];
f_w_s = input_shape[1]/steps[i*2+1];
std::vector<std::pair<float, float>> prior_whs_ratios;
float p_w, p_h;
p_w = float(min_sizes[i])/input_shape[1];
p_h = float(min_sizes[i])/input_shape[0];
prior_whs_ratios.emplace_back(std::make_pair(p_w, p_h));
auto size = sqrt(min_sizes[i] * max_sizes[i]);
p_w = size/input_shape[1];
p_h = size/input_shape[0];
prior_whs_ratios.emplace_back(std::make_pair(p_w, p_h));
auto s_alpha = sqrt(aspect_ratios[i]);
p_w = float(min_sizes[i])/input_shape[1];
p_h = float(min_sizes[i])/input_shape[0];
prior_whs_ratios.emplace_back(std::make_pair(p_w*s_alpha, p_h/s_alpha));
prior_whs_ratios.emplace_back(std::make_pair(p_w/s_alpha, p_h*s_alpha));
for(auto h = 0; h< feature_shapes[i*2]; h++){
for(auto w = 0; w< feature_shapes[i*2+1]; w++){
auto cx = (w + offset) / f_w_s;
auto cy = (h + offset) / f_h_s;
for (auto& it: prior_whs_ratios) {
prior_boxes.emplace_back(std::vector<float>( {cx, cy, std::get<0>(it), std::get<1>(it) } ));
prior_boxes_ltrb.emplace_back(std::vector<float>( {
cx - std::get<0>(it)/2,
cy - std::get<1>(it)/2,
cx + std::get<0>(it)/2,
cy + std::get<1>(it)/2 } ));
}
} // end for w
} // end for h
} // end for i
}
}}}
| 1,298 |
14,668 | // Copyright (c) 2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Byte level differential compression algorithm used by Courgette.
#ifndef COURGETTE_SIMPLE_DELTA_H_
#define COURGETTE_SIMPLE_DELTA_H_
#include "courgette/courgette.h"
#include "courgette/streams.h"
namespace courgette {
Status ApplySimpleDelta(SourceStream* old, SourceStream* delta,
SinkStream* target);
Status GenerateSimpleDelta(SourceStream* old, SourceStream* target,
SinkStream* delta);
} // namespace courgette
#endif // COURGETTE_SIMPLE_DELTA_H_
| 255 |
395 | from backpack.extensions.firstorder.gradient.convtranspose2d import GradConvTranspose2d
from backpack.extensions.firstorder.sum_grad_squared.convtranspose2d import (
SGSConvTranspose2d,
)
from .variance_base import VarianceBaseModule
class VarianceConvTranspose2d(VarianceBaseModule):
def __init__(self):
super().__init__(
params=["bias", "weight"],
grad_extension=GradConvTranspose2d(),
sgs_extension=SGSConvTranspose2d(),
)
| 201 |
410 | // Copyright(c) 2017 POLYGONTEK
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http ://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Precompiled.h"
#include "Render/ParticleSystem.h"
#include "Asset/Asset.h"
#include "Asset/Resource.h"
#include "Asset/GuidMapper.h"
BE_NAMESPACE_BEGIN
OBJECT_DECLARATION("Particle System", ParticleSystemResource, Resource)
BEGIN_EVENTS(ParticleSystemResource)
END_EVENTS
void ParticleSystemResource::RegisterProperties() {
}
ParticleSystemResource::ParticleSystemResource() {
particleSystem = nullptr;
}
ParticleSystemResource::~ParticleSystemResource() {
if (particleSystem) {
particleSystemManager.ReleaseParticleSystem(particleSystem);
}
}
ParticleSystem *ParticleSystemResource::GetParticleSystem() {
if (particleSystem) {
return particleSystem;
}
const Str particleSystemPath = resourceGuidMapper.Get(asset->GetGuid());
particleSystem = particleSystemManager.GetParticleSystem(particleSystemPath);
return particleSystem;
}
void ParticleSystemResource::Rename(const Str &newName) {
const Str particleSystemPath = resourceGuidMapper.Get(asset->GetGuid());
ParticleSystem *existingParticleSystem = particleSystemManager.FindParticleSystem(particleSystemPath);
if (existingParticleSystem) {
particleSystemManager.RenameParticleSystem(existingParticleSystem, newName);
}
}
bool ParticleSystemResource::Reload() {
const Str particleSystemPath = resourceGuidMapper.Get(asset->GetGuid());
ParticleSystem *existingParticleSystem = particleSystemManager.FindParticleSystem(particleSystemPath);
if (existingParticleSystem) {
existingParticleSystem->Reload();
return true;
}
return false;
}
bool ParticleSystemResource::Save() {
const Str particleSystemPath = resourceGuidMapper.Get(asset->GetGuid());
ParticleSystem *existingParticleSystem = particleSystemManager.FindParticleSystem(particleSystemPath);
if (existingParticleSystem) {
existingParticleSystem->Write(existingParticleSystem->GetHashName());
return true;
}
return false;
}
BE_NAMESPACE_END
| 820 |
852 | import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.4 $'),
name = cms.untracked.string('$Source: /cvs/CMSSW/CMSSW/DPGAnalysis/Skims/python/EGPDSkim_cfg.py,v $'),
annotation = cms.untracked.string('EGamma skim')
)
#
#
# This is for testing purposes.
#
#
##run143960
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Run2010A/EG/RECO/v4/000/143/960/84DEE17A-44B1-DF11-B844-001D09F29849.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Run2010A/EG/RAW/v1/000/143/960/C40C9318-0FB1-DF11-A974-0030487CBD0A.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
#------------------------------------------
# Load standard sequences.
#------------------------------------------
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR10_P_V8::All'
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/Reconstruction_cff")
process.load('Configuration/EventContent/EventContent_cff')
#drop collections created on the fly
process.FEVTEventContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
process.FEVTEventContent.outputCommands.append("drop *_*_*_SKIM")
#
# Load common sequences
#
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
#################################WZFilter############################################
process.hltFilter = cms.EDFilter("HLTHighLevel",
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
HLTPaths = cms.vstring(
# "HLT_Photon15_L1R",
# "HLT_Photon15_Cleaned_L1R",
# "HLT_Photon20_Cleaned_L1R",
"HLT_Ele15_LW_L1R",
"HLT_Ele15_SW_L1R",
"HLT_Ele15_SW_CaloEleId_L1R",
"HLT_Ele17_SW_CaloEleId_L1R",
"HLT_Ele17_SW_L1R",
"HLT_Ele17_SW_TightEleId_L1R",
"HLT_Ele17_SW_TightCaloEleId_SC8HE_L1R"
),
eventSetupPathsKey = cms.string(''),
andOr = cms.bool(True),
throw = cms.bool(False),
saveTags = cms.bool(False)
)
process.load("DPGAnalysis/Skims/WZinterestingEventFilter_cfi")
process.WZfilter = cms.Path(process.hltFilter*process.WZInterestingEventSelector)
# Output definition
process.outWZfilter = cms.OutputModule("PoolOutputModule",
# splitLevel = cms.untracked.int32(0),
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/EGMWZ_filter.root'),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('EGMWZFilter')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('WZfilter')
))
#################################logerrorharvester############################################
process.load("FWCore.Modules.logErrorFilter_cfi")
from Configuration.StandardSequences.RawToDigi_Data_cff import gtEvmDigis
process.gtEvmDigis = gtEvmDigis.clone()
process.stableBeam = cms.EDFilter("HLTBeamModeFilter",
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
AllowedBeamMode = cms.vuint32(11),
saveTags = cms.bool(False)
)
process.logerrorpath=cms.Path(process.gtEvmDigis+process.stableBeam+process.logErrorFilter)
process.outlogerr = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/logerror_filter.root'),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('Skim_logerror')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring("logerrorpath")
))
#======================
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.outpath = cms.EndPath(process.outlogerr+process.outWZfilter)
| 2,650 |
14,668 | <gh_stars>1000+
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_UTIL_MISC_INITIALIZATION_INITIALIZATION_STATE_H_
#define CRASHPAD_UTIL_MISC_INITIALIZATION_INITIALIZATION_STATE_H_
#include <stdint.h>
namespace crashpad {
//! \brief Tracks whether data are initialized.
//!
//! Objects of this type track whether the data they’re guarding are
//! initialized. The three possible states are uninitialized (the initial
//! state), initializing, and valid. As the guarded data are initialized, an
//! InitializationState object will normally transition through these three
//! states. A fourth state corresponds to the destruction of objects of this
//! type, making it less likely that a use-after-free of an InitializationState
//! object will appear in the valid state.
//!
//! If the only purpose for tracking the initialization state of guarded data is
//! to DCHECK when the object is in an unexpected state, use
//! InitializationStateDcheck instead.
class InitializationState {
public:
//! \brief The object’s state.
enum State : uint8_t {
//! \brief The object has not yet been initialized.
kStateUninitialized = 0,
//! \brief The object is being initialized.
//!
//! This state protects against attempted reinitializaton of
//! partially-initialized objects whose initial initialization attempt
//! failed. This state is to be used while objects are initializing, but are
//! not yet fully initialized.
kStateInvalid,
//! \brief The object has been initialized.
kStateValid,
//! \brief The object has been destroyed.
kStateDestroyed,
};
InitializationState() : state_(kStateUninitialized) {}
InitializationState(const InitializationState&) = delete;
InitializationState& operator=(const InitializationState&) = delete;
~InitializationState() { state_ = kStateDestroyed; }
//! \brief Returns `true` if the object’s state is #kStateUninitialized and it
//! is safe to begin initializing it.
bool is_uninitialized() const { return state_ == kStateUninitialized; }
//! \brief Sets the object’s state to #kStateInvalid, marking initialization
//! as being in process.
void set_invalid() { state_ = kStateInvalid; }
//! \brief Sets the object’s state to #kStateValid, marking it initialized.
void set_valid() { state_ = kStateValid; }
//! \brief Returns `true` if the the object’s state is #kStateValid and it has
//! been fully initialized and may be used.
bool is_valid() const { return state_ == kStateValid; }
protected:
//! \brief Returns the object’s state.
//!
//! Consumers of this class should use an is_state_*() method instead.
State state() const { return state_; }
//! \brief Sets the object’s state.
//!
//! Consumers of this class should use a set_state_*() method instead.
void set_state(State state) { state_ = state; }
private:
// state_ is volatile to ensure that it’ll be set by the destructor when it
// runs. Otherwise, optimizations might prevent it from ever being set to
// kStateDestroyed, limiting this class’ ability to catch use-after-free
// errors.
volatile State state_;
};
} // namespace crashpad
#endif // CRASHPAD_UTIL_MISC_INITIALIZATION_INITIALIZATION_STATE_H_
| 1,126 |
672 | /*
* Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <netdb.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#ifdef __STDC__
#include <stdarg.h>
#else
#include <varargs.h>
#endif
#include <crt_externs.h>
#ifdef BUILDING_VARIANT
__private_extern__ int _sl_LogFile; /* fd for log */
__private_extern__ int _sl_connected; /* have done connect */
__private_extern__ int _sl_LogStat; /* status bits, set by openlog() */
__private_extern__ const char *_sl_LogTag; /* string to tag the entry with */
__private_extern__ int _sl_LogFacility; /* default facility code */
__private_extern__ int _sl_LogMask; /* mask of priorities to be logged */
#else /* !BUILDING_VARIANT */
__private_extern__ int _sl_LogFile = -1; /* fd for log */
__private_extern__ int _sl_connected = 0; /* have done connect */
__private_extern__ int _sl_LogStat = 0; /* status bits, set by openlog() */
__private_extern__ const char *_sl_LogTag = NULL; /* string to tag the entry with */
__private_extern__ int _sl_LogFacility = LOG_USER; /* default facility code */
__private_extern__ int _sl_LogMask = 0xff; /* mask of priorities to be logged */
#endif /* BUILDING_VARIANT */
/*
* syslog, vsyslog --
* print message on log file; output is intended for syslogd(8).
*/
void
#ifdef __STDC__
syslog(int pri, const char *fmt, ...)
#else
syslog(pri, fmt, va_alist)
int pri;
char *fmt;
va_dcl
#endif
{
va_list ap;
#ifdef __STDC__
va_start(ap, fmt);
#else
va_start(ap);
#endif
vsyslog(pri, fmt, ap);
va_end(ap);
}
void
vsyslog(pri, fmt, ap)
int pri;
register const char *fmt;
va_list ap;
{
register int cnt;
register char ch, *p, *t;
time_t now;
int fd, saved_errno;
#define TBUF_LEN 2048
#define FMT_LEN 1024
char *stdp, tbuf[TBUF_LEN], fmt_cpy[FMT_LEN];
int tbuf_left, fmt_left, prlen;
#define INTERNALLOG LOG_ERR|LOG_CONS|LOG_PERROR|LOG_PID
/* Check for invalid bits. */
if (pri & ~(LOG_PRIMASK|LOG_FACMASK)) {
syslog(INTERNALLOG,
"syslog: unknown facility/priority: %x", pri);
pri &= LOG_PRIMASK|LOG_FACMASK;
}
/* Check priority against setlogmask values. */
if (!(LOG_MASK(LOG_PRI(pri)) & _sl_LogMask))
return;
saved_errno = errno;
/* Set default facility if none specified. */
if ((pri & LOG_FACMASK) == 0)
pri |= _sl_LogFacility;
/* Build the message. */
/*
* Although it's tempting, we can't ignore the possibility of
* overflowing the buffer when assembling the "fixed" portion
* of the message. Strftime's "%h" directive expands to the
* locale's abbreviated month name, but if the user has the
* ability to construct to his own locale files, it may be
* arbitrarily long.
*/
(void)time(&now);
p = tbuf;
tbuf_left = TBUF_LEN;
#define DEC() \
do { \
if (prlen >= tbuf_left) \
prlen = tbuf_left - 1; \
p += prlen; \
tbuf_left -= prlen; \
} while (0)
prlen = snprintf(p, tbuf_left, "<%d>", pri);
DEC();
prlen = strftime(p, tbuf_left, "%h %e %T ", localtime(&now));
DEC();
if (_sl_LogStat & LOG_PERROR)
stdp = p;
if (_sl_LogTag == NULL)
_sl_LogTag = *(*_NSGetArgv());
if (_sl_LogTag != NULL) {
prlen = snprintf(p, tbuf_left, "%s", _sl_LogTag);
DEC();
}
if (_sl_LogStat & LOG_PID) {
prlen = snprintf(p, tbuf_left, "[%d]", getpid());
DEC();
}
if (_sl_LogTag != NULL) {
if (tbuf_left > 1) {
*p++ = ':';
tbuf_left--;
}
if (tbuf_left > 1) {
*p++ = ' ';
tbuf_left--;
}
}
/*
* We wouldn't need this mess if printf handled %m, or if
* strerror() had been invented before syslog().
*/
for (t = fmt_cpy, fmt_left = FMT_LEN; (ch = *fmt); ++fmt) {
if (ch == '%' && fmt[1] == 'm') {
++fmt;
prlen = snprintf(t, fmt_left, "%s",
strerror(saved_errno));
if (prlen >= fmt_left)
prlen = fmt_left - 1;
t += prlen;
fmt_left -= prlen;
} else {
if (fmt_left > 1) {
*t++ = ch;
fmt_left--;
}
}
}
*t = '\0';
prlen = vsnprintf(p, tbuf_left, fmt_cpy, ap);
DEC();
cnt = p - tbuf;
/* Output to stderr if requested. */
if (_sl_LogStat & LOG_PERROR) {
struct iovec iov[2];
iov[0].iov_base = stdp;
iov[0].iov_len = cnt - (stdp - tbuf);
iov[1].iov_base = "\n";
iov[1].iov_len = 1;
(void)writev(STDERR_FILENO, iov, 2);
}
/* Get connected, output the message to the local logger. */
if (!_sl_connected)
openlog(_sl_LogTag, _sl_LogStat | LOG_NDELAY, 0);
if (send(_sl_LogFile, tbuf, cnt, 0) >= 0)
return;
/*
* Output the message to the console; don't worry about blocking,
* if console blocks everything will. Make sure the error reported
* is the one from the syslogd failure.
*/
if (_sl_LogStat & LOG_CONS &&
(fd = open(_PATH_CONSOLE, O_WRONLY, 0)) >= 0) {
struct iovec iov[2];
p = strchr(tbuf, '>') + 1;
iov[0].iov_base = p;
iov[0].iov_len = cnt - (p - tbuf);
iov[1].iov_base = "\r\n";
iov[1].iov_len = 2;
(void)writev(fd, iov, 2);
(void)close(fd);
}
}
#ifndef BUILDING_VARIANT
static struct sockaddr_un SyslogAddr; /* AF_UNIX address of local logger */
void
openlog(ident, logstat, logfac)
const char *ident;
int logstat, logfac;
{
if (ident != NULL)
_sl_LogTag = ident;
_sl_LogStat = logstat;
if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0)
_sl_LogFacility = logfac;
if (_sl_LogFile == -1) {
SyslogAddr.sun_family = AF_UNIX;
(void)strncpy(SyslogAddr.sun_path, _PATH_LOG,
sizeof(SyslogAddr.sun_path));
if (_sl_LogStat & LOG_NDELAY) {
if ((_sl_LogFile = socket(AF_UNIX, SOCK_DGRAM, 0)) == -1)
return;
(void)fcntl(_sl_LogFile, F_SETFD, 1);
}
}
if (_sl_LogFile != -1 && !_sl_connected)
if (connect(_sl_LogFile, (struct sockaddr *)&SyslogAddr, sizeof(SyslogAddr)) == -1) {
(void)close(_sl_LogFile);
_sl_LogFile = -1;
} else
_sl_connected = 1;
}
void
closelog()
{
(void)close(_sl_LogFile);
_sl_LogFile = -1;
_sl_connected = 0;
}
/* setlogmask -- set the log mask level */
int
setlogmask(pmask)
int pmask;
{
int omask;
omask = _sl_LogMask;
if (pmask != 0)
_sl_LogMask = pmask;
return (omask);
}
#endif /* !BUILDING_VARIANT */
| 3,515 |
339 | <filename>integration/broker-tests/tests-integration/tests-amqp/src/test/java/org/wso2/mb/integration/tests/amqp/functional/RedeliveryDelayTestCase.java<gh_stars>100-1000
/*
* Copyright (c) 2018, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.mb.integration.tests.amqp.functional;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.wso2.andes.configuration.enums.AndesConfiguration;
import org.wso2.carbon.andes.stub.AndesAdminServiceBrokerManagerAdminException;
import org.wso2.carbon.authenticator.stub.LogoutAuthenticationExceptionException;
import org.wso2.carbon.automation.engine.context.TestUserMode;
import org.wso2.carbon.integration.common.utils.LoginLogoutClient;
import org.wso2.carbon.integration.common.utils.exceptions.AutomationUtilException;
import org.wso2.carbon.integration.common.utils.mgt.ServerConfigurationManager;
import org.wso2.mb.integration.common.clients.AndesClient;
import org.wso2.mb.integration.common.clients.AndesJMSConsumer;
import org.wso2.mb.integration.common.clients.AndesJMSPublisher;
import org.wso2.mb.integration.common.clients.configurations.AndesJMSConsumerClientConfiguration;
import org.wso2.mb.integration.common.clients.configurations.AndesJMSPublisherClientConfiguration;
import org.wso2.mb.integration.common.clients.exceptions.AndesClientConfigurationException;
import org.wso2.mb.integration.common.clients.exceptions.AndesClientException;
import org.wso2.mb.integration.common.clients.operations.clients.AndesAdminClient;
import org.wso2.mb.integration.common.clients.operations.utils.AndesClientConstants;
import org.wso2.mb.integration.common.clients.operations.utils.AndesClientUtils;
import org.wso2.mb.integration.common.clients.operations.utils.ExchangeType;
import org.wso2.mb.integration.common.clients.operations.utils.JMSAcknowledgeMode;
import org.wso2.mb.integration.common.utils.backend.ConfigurationEditor;
import org.wso2.mb.integration.common.utils.backend.MBIntegrationBaseTest;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.MessageListener;
import javax.jms.MessageProducer;
import javax.jms.TextMessage;
import javax.naming.NamingException;
import javax.xml.xpath.XPathExpressionException;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
/**
* Following test cases are related to redelivery delay feature for rejected messages.
*/
public class RedeliveryDelayTestCase extends MBIntegrationBaseTest {
private Log log = LogFactory.getLog(RedeliveryDelayTestCase.class);
/**
* The default andes acknowledgement wait timeout.
*/
private String defaultAndesAckWaitTimeOut = null;
private String defaultAndesRedeliveryDelay = null;
/**
* Initializing test case.
*
* @throws XPathExpressionException
*/
@BeforeClass(alwaysRun = true)
public void init() throws XPathExpressionException, IOException, AutomationUtilException, ConfigurationException {
super.init(TestUserMode.SUPER_TENANT_USER);
// Updating the redelivery attempts to 1 to speed up the test case.
super.serverManager = new ServerConfigurationManager(automationContext);
String defaultMBConfigurationPath = ServerConfigurationManager.getCarbonHome() + File.separator + "wso2" +
File.separator + "broker" + File.separator + "conf" + File.separator +
"broker.xml";
ConfigurationEditor configurationEditor = new ConfigurationEditor(defaultMBConfigurationPath);
// Changing "maximumRedeliveryAttempts" value to "1" in broker.xml
configurationEditor.updateProperty(AndesConfiguration.TRANSPORTS_AMQP_MAXIMUM_REDELIVERY_ATTEMPTS, "1");
// Restarting server
configurationEditor.applyUpdatedConfigurationAndRestartServer(serverManager);
// Get current "AndesAckWaitTimeOut" system property.
defaultAndesAckWaitTimeOut = System.getProperty(AndesClientConstants.ANDES_ACK_WAIT_TIMEOUT_PROPERTY);
// Setting system property "AndesAckWaitTimeOut" for andes
System.setProperty(AndesClientConstants.ANDES_ACK_WAIT_TIMEOUT_PROPERTY, "0");
// Get current "AndesRedeliveryDelay" system property.
defaultAndesRedeliveryDelay = System.getProperty(AndesClientConstants.ANDES_REDELIVERY_DELAY_PROPERTY);
System.setProperty(AndesClientConstants.ANDES_REDELIVERY_DELAY_PROPERTY, "10000");
}
/**
* This test publishes 10 messages and the subscriber rejects the first message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void firstMessageInvalidOnlyQueueMessageListenerTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstMessageInvalidOnlyQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstMessageInvalidOnlyQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
publisherConfig.setPrintsPerMessageCount(sendCount / 10L);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
private boolean receivedFirstMessage = false;
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (!receivedFirstMessage && "#0".equals(textMessage.getText())) {
receivedFirstMessage = true;
} else {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
Assert.assertEquals(receivedMessages.size(), sendCount + 1, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects the first message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void firstMessageInvalidOnlyQueueMessageReceiverTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstMessageInvalidOnlyReceiverQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstMessageInvalidOnlyReceiverQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
publisherConfig.setPrintsPerMessageCount(sendCount / 10L);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
private boolean receivedFirstMessage = false;
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (!receivedFirstMessage && "#0".equals(textMessage.getText())) {
receivedFirstMessage = true;
} else {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
Assert.assertEquals(receivedMessages.size(), sendCount + 1, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects all message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void allUnacknowledgeMessageListenerTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
int sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"multipleUnacknowledgeQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"multipleUnacknowledgeQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (getMessageList(receivedMessages).contains(textMessage.getText())) {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount * 2; i++) {
if (i < sendCount) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
} else {
validateMessageContentAndDelay(receivedMessages, i - sendCount, i,
"#" + Integer.toString(i - sendCount));
}
}
Assert.assertEquals(receivedMessages.size(), sendCount * 2, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects all message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void allUnacknowledgeMessageReceiverTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
int sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"multipleUnacknowledgeReceiverQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"multipleUnacknowledgeReceiverQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (getMessageList(receivedMessages).contains(textMessage.getText())) {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount * 2; i++) {
if (i < sendCount) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
} else {
validateMessageContentAndDelay(receivedMessages, i - sendCount, i,
"#" + Integer.toString(i - sendCount));
}
}
Assert.assertEquals(receivedMessages.size(), sendCount * 2, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects a message after each 3 received messages and then wait
* for the redelivered message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void oneByOneUnacknowledgeMessageListenerTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (Integer.parseInt(textMessage.getText().split("#")[1]) % 3 != 0 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
validateMessageContentAndDelay(receivedMessages, 1, 11, "#3");
validateMessageContentAndDelay(receivedMessages, 2, 12, "#6");
validateMessageContentAndDelay(receivedMessages, 3, 13, "#9");
Assert.assertEquals(receivedMessages.size(), sendCount + 4, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects a message after each 3 received messages and then wait
* for the redelivered message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void oneByOneUnacknowledgeMessageReceiverTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeReceiverQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeReceiverQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (Integer.parseInt(textMessage.getText().split("#")[1]) % 3 != 0 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
validateMessageContentAndDelay(receivedMessages, 1, 11, "#3");
validateMessageContentAndDelay(receivedMessages, 2, 12, "#6");
validateMessageContentAndDelay(receivedMessages, 3, 13, "#9");
Assert.assertEquals(receivedMessages.size(), sendCount + 4, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects first 4 messages and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void firstFewUnacknowledgeMessageListenerTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstFewUnacknowledgeQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstFewUnacknowledgeQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (Integer.parseInt(textMessage.getText().split("#")[1]) >= 4 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
validateMessageContentAndDelay(receivedMessages, 1, 11, "#1");
validateMessageContentAndDelay(receivedMessages, 2, 12, "#2");
validateMessageContentAndDelay(receivedMessages, 3, 13, "#3");
Assert.assertEquals(receivedMessages.size(), sendCount + 4, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects first 4 messages and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void firstFewUnacknowledgeMessageReceiverTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstFewUnacknowledgeReceiverQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"firstFewUnacknowledgeReceiverQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (Integer.parseInt(textMessage.getText().split("#")[1]) >= 4 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 10, "#0");
validateMessageContentAndDelay(receivedMessages, 1, 11, "#1");
validateMessageContentAndDelay(receivedMessages, 2, 12, "#2");
validateMessageContentAndDelay(receivedMessages, 3, 13, "#3");
Assert.assertEquals(receivedMessages.size(), sendCount + 4, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects the 8th message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void unacknowledgeMiddleMessageMessageListenerTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig = new AndesJMSConsumerClientConfiguration(getAMQPPort(),
ExchangeType.QUEUE, "unacknowledgeMiddleMessageQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig = new AndesJMSPublisherClientConfiguration(getAMQPPort(),
ExchangeType.QUEUE, "unacknowledgeMiddleMessageQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (!textMessage.getText().equals("#7") ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 6, 10, "#7");
Assert.assertEquals(receivedMessages.size(), sendCount + 1, "Message receiving failed.");
}
/**
* This test publishes 10 messages and the subscriber rejects the 8th message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void unacknowledgeMiddleMessageMessageReceiverTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 10;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig = new AndesJMSConsumerClientConfiguration(getAMQPPort(),
ExchangeType.QUEUE, "unacknowledgeMiddleMessageReceiverQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig = new AndesJMSPublisherClientConfiguration(getAMQPPort(),
ExchangeType.QUEUE, "unacknowledgeMiddleMessageReceiverQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (!textMessage.getText().equals("#7") ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 6, 10, "#7");
Assert.assertEquals(receivedMessages.size(), sendCount + 1, "Message receiving failed.");
}
/**
* This test publishes 1000 messages and the subscriber reject each 100th message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void oneByOneUnacknowledgeMessageListenerForMultipleMessagesTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 1000;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeMessageListenerForMultiple");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeMessageListenerForMultiple");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
MessageConsumer receiver = andesJMSConsumer.getReceiver();
receiver.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
try {
TextMessage textMessage = (TextMessage) message;
if (Integer.parseInt(textMessage.getText().split("#")[1]) % 100 != 0 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
message.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
});
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME * 2);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 1000, "#0");
validateMessageContentAndDelay(receivedMessages, 99, 1001, "#100");
validateMessageContentAndDelay(receivedMessages, 199, 1002, "#200");
validateMessageContentAndDelay(receivedMessages, 299, 1003, "#300");
validateMessageContentAndDelay(receivedMessages, 399, 1004, "#400");
validateMessageContentAndDelay(receivedMessages, 499, 1005, "#500");
validateMessageContentAndDelay(receivedMessages, 599, 1006, "#600");
validateMessageContentAndDelay(receivedMessages, 699, 1007, "#700");
validateMessageContentAndDelay(receivedMessages, 799, 1008, "#800");
validateMessageContentAndDelay(receivedMessages, 899, 1009, "#900");
Assert.assertEquals(receivedMessages.size(), sendCount + 10, "Message receiving failed.");
}
/**
* This test publishes 1000 messages and the subscriber reject each 100th message and then wait for the redelivered
* message.
* <p/>
* The redelivered message is tested against the same message content with the original message and the timestamps
* are also checked against the original message timestamp to make sure that the message was delayed.
* Here message receive method is used instead of the message listener to receive messages.
*
* @throws AndesClientConfigurationException
* @throws XPathExpressionException
* @throws IOException
* @throws JMSException
* @throws AndesClientException
* @throws NamingException
*/
@Test(groups = {"wso2.mb", "queue"})
public void oneByOneUnacknowledgeMessageReceiverForMultipleMessagesTestCase()
throws AndesClientConfigurationException, XPathExpressionException, IOException, JMSException,
AndesClientException, NamingException {
long sendCount = 1000;
final List<ImmutablePair<String, Calendar>> receivedMessages = new ArrayList<>();
// Creating a consumer client configuration
AndesJMSConsumerClientConfiguration consumerConfig =
new AndesJMSConsumerClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeMessageReceiverForMultipleQueue");
consumerConfig.setAcknowledgeMode(JMSAcknowledgeMode.PER_MESSAGE_ACKNOWLEDGE);
consumerConfig.setAsync(false);
// Creating a publisher client configuration
AndesJMSPublisherClientConfiguration publisherConfig =
new AndesJMSPublisherClientConfiguration(getAMQPPort(), ExchangeType.QUEUE,
"oneByOneUnacknowledgeMessageReceiverForMultipleQueue");
publisherConfig.setNumberOfMessagesToSend(sendCount);
// Creating clients
AndesClient consumerClient = new AndesClient(consumerConfig, true);
final AndesJMSConsumer andesJMSConsumer = consumerClient.getConsumers().get(0);
final MessageConsumer receiver = andesJMSConsumer.getReceiver();
Thread messageReceivingThread = new Thread() {
public void run() {
while (receiver != null) {
try {
TextMessage textMessage = (TextMessage) receiver.receive();
if (Integer.parseInt(textMessage.getText().split("#")[1]) % 100 != 0 ||
getMessageList(receivedMessages).contains(textMessage.getText())) {
textMessage.acknowledge();
}
receivedMessages.add(ImmutablePair.of(textMessage.getText(), Calendar.getInstance()));
andesJMSConsumer.getReceivedMessageCount().incrementAndGet();
} catch (JMSException e) {
throw new RuntimeException("Exception occurred when receiving messages.", e);
}
}
}
};
messageReceivingThread.start();
AndesClient publisherClient = new AndesClient(publisherConfig, true);
AndesJMSPublisher andesJMSPublisher = publisherClient.getPublishers().get(0);
MessageProducer sender = andesJMSPublisher.getSender();
for (int i = 0; i < sendCount; i++) {
TextMessage textMessage = andesJMSPublisher.getSession().createTextMessage("#" + Integer.toString(i));
sender.send(textMessage);
}
AndesClientUtils.waitForMessagesAndShutdown(consumerClient, AndesClientConstants.DEFAULT_RUN_TIME * 2);
log.info("Received Messages : " + getMessageList(receivedMessages));
for (int i = 0; i < sendCount; i++) {
Assert.assertEquals(receivedMessages.get(i).getLeft(), "#" + Integer.toString(i),
"Invalid messages received. #" + Integer.toString(i) + " expected.");
}
validateMessageContentAndDelay(receivedMessages, 0, 1000, "#0");
validateMessageContentAndDelay(receivedMessages, 99, 1001, "#100");
validateMessageContentAndDelay(receivedMessages, 199, 1002, "#200");
validateMessageContentAndDelay(receivedMessages, 299, 1003, "#300");
validateMessageContentAndDelay(receivedMessages, 399, 1004, "#400");
validateMessageContentAndDelay(receivedMessages, 499, 1005, "#500");
validateMessageContentAndDelay(receivedMessages, 599, 1006, "#600");
validateMessageContentAndDelay(receivedMessages, 699, 1007, "#700");
validateMessageContentAndDelay(receivedMessages, 799, 1008, "#800");
validateMessageContentAndDelay(receivedMessages, 899, 1009, "#900");
Assert.assertEquals(receivedMessages.size(), sendCount + 10, "Message receiving failed.");
}
/**
* This method will restore all the configurations back.
* Following configurations will be restored.
* 1. AndesAckWaitTimeOut system property.
* 2. AndesRedeliveryDelay system property.
* 3. Deleted all destination created in this test class.
* 4. Restore default broker.xml and restart server.
*
* @throws IOException
* @throws AutomationUtilException
*/
@AfterClass()
public void tearDown()
throws IOException, AutomationUtilException, LogoutAuthenticationExceptionException,
AndesAdminServiceBrokerManagerAdminException {
if (StringUtils.isBlank(defaultAndesAckWaitTimeOut)) {
System.clearProperty(AndesClientConstants.ANDES_ACK_WAIT_TIMEOUT_PROPERTY);
} else {
System.setProperty(AndesClientConstants.ANDES_ACK_WAIT_TIMEOUT_PROPERTY, defaultAndesAckWaitTimeOut);
}
if (StringUtils.isBlank(defaultAndesRedeliveryDelay)) {
System.clearProperty(AndesClientConstants.ANDES_REDELIVERY_DELAY_PROPERTY);
} else {
System.setProperty(AndesClientConstants.ANDES_REDELIVERY_DELAY_PROPERTY, defaultAndesRedeliveryDelay);
}
LoginLogoutClient loginLogoutClientForAdmin = new LoginLogoutClient(super.automationContext);
String sessionCookie = loginLogoutClientForAdmin.login();
AndesAdminClient andesAdminClient = new AndesAdminClient(super.backendURL, sessionCookie);
andesAdminClient.deleteQueue("firstMessageInvalidOnlyQueue");
andesAdminClient.deleteQueue("firstMessageInvalidOnlyReceiverQueue");
andesAdminClient.deleteQueue("multipleUnacknowledgeQueue");
andesAdminClient.deleteQueue("multipleUnacknowledgeReceiverQueue");
andesAdminClient.deleteQueue("oneByOneUnacknowledgeQueue");
andesAdminClient.deleteQueue("oneByOneUnacknowledgeReceiverQueue");
andesAdminClient.deleteQueue("firstFewUnacknowledgeQueue");
andesAdminClient.deleteQueue("firstFewUnacknowledgeReceiverQueue");
andesAdminClient.deleteQueue("unacknowledgeMiddleMessageQueue");
andesAdminClient.deleteQueue("unacknowledgeMiddleMessageReceiverQueue");
andesAdminClient.deleteQueue("oneByOneUnacknowledgeMessageListenerForMultiple");
andesAdminClient.deleteQueue("oneByOneUnacknowledgeMessageReceiverForMultipleQueue");
loginLogoutClientForAdmin.logout();
//Revert back to original configuration.
super.serverManager.restoreToLastConfiguration(true);
}
/**
* Validates message content of redelivered messages against original message. Validate that the redelivery delay
* has occurred.
*
* @param receivedMessages The received message list.
* @param originalMessageIndex The index of the origin message in the received message list.
* @param redeliveredMessageIndex The index of the redelivered message in the received message list.
* @param expectedMessageContent The expected message content.
*/
private void validateMessageContentAndDelay(
List<ImmutablePair<String, Calendar>> receivedMessages,
int originalMessageIndex,
int redeliveredMessageIndex,
String expectedMessageContent) {
// Validate message content
String messageContent = receivedMessages.get(redeliveredMessageIndex).getLeft();
Assert.assertEquals(messageContent, expectedMessageContent, "Invalid messages received.");
// Validate delay
Calendar originalMessageCalendar = receivedMessages.get(originalMessageIndex).getRight();
log.info("Original message timestamp for " + messageContent + " : " +
originalMessageCalendar.getTimeInMillis());
originalMessageCalendar.add(Calendar.SECOND, 10);
log.info("Minimum redelivered timestamp for " + messageContent + " : " +
originalMessageCalendar.getTimeInMillis());
Calendar redeliveredMessageCalendar = receivedMessages.get(redeliveredMessageIndex).getRight();
log.info("Timestamp of redelivered for " + messageContent + " message : " +
redeliveredMessageCalendar.getTimeInMillis());
Assert.assertTrue(originalMessageCalendar.compareTo(redeliveredMessageCalendar) <= 0,
"Message received before the redelivery delay");
}
/**
* Gets the received messages to a list.
*
* @param receivedMessages The list of received messages as a pair of message content and time received.
* @return A list of message content.
*/
private List<String> getMessageList(List<ImmutablePair<String, Calendar>> receivedMessages) {
List<String> messages = new ArrayList<>();
for (ImmutablePair<String, Calendar> receivedMessage : receivedMessages) {
messages.add(receivedMessage.getLeft());
}
return messages;
}
}
| 23,542 |
2,111 | <filename>algorithms/src/LocalizationAndMapping/registration_localization/fast_gicp/thirdparty/pybind11/tests/test_numpy_dtypes.py<gh_stars>1000+
# -*- coding: utf-8 -*-
import re
import pytest
import env # noqa: F401
from pybind11_tests import numpy_dtypes as m
np = pytest.importorskip("numpy")
@pytest.fixture(scope="module")
def simple_dtype():
ld = np.dtype("longdouble")
return np.dtype(
{
"names": ["bool_", "uint_", "float_", "ldbl_"],
"formats": ["?", "u4", "f4", "f{}".format(ld.itemsize)],
"offsets": [0, 4, 8, (16 if ld.alignment > 4 else 12)],
}
)
@pytest.fixture(scope="module")
def packed_dtype():
return np.dtype([("bool_", "?"), ("uint_", "u4"), ("float_", "f4"), ("ldbl_", "g")])
def dt_fmt():
from sys import byteorder
e = "<" if byteorder == "little" else ">"
return (
"{{'names':['bool_','uint_','float_','ldbl_'],"
" 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
" 'offsets':[0,4,8,{}], 'itemsize':{}}}"
)
def simple_dtype_fmt():
ld = np.dtype("longdouble")
simple_ld_off = 12 + 4 * (ld.alignment > 4)
return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
def packed_dtype_fmt():
from sys import byteorder
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
np.dtype("longdouble").itemsize, e="<" if byteorder == "little" else ">"
)
def partial_ld_offset():
return (
12
+ 4 * (np.dtype("uint64").alignment > 4)
+ 8
+ 8 * (np.dtype("longdouble").alignment > 8)
)
def partial_dtype_fmt():
ld = np.dtype("longdouble")
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
def partial_nested_fmt():
ld = np.dtype("longdouble")
partial_nested_off = 8 + 8 * (ld.alignment > 8)
partial_ld_off = partial_ld_offset()
partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
partial_dtype_fmt(), partial_nested_off, partial_nested_size
)
def assert_equal(actual, expected_data, expected_dtype):
np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
assert re.match(
"^NumPy type info missing for .*UnboundStruct.*$", str(excinfo.value)
)
ld = np.dtype("longdouble")
ldbl_fmt = ("4x" if ld.alignment > 4 else "") + ld.char
ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
dbl = np.dtype("double")
partial_fmt = (
"^T{?:bool_:3xI:uint_:f:float_:"
+ str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8))
+ "xg:ldbl_:}"
)
nested_extra = str(max(8, ld.alignment))
assert m.print_format_descriptors() == [
ss_fmt,
"^T{?:bool_:I:uint_:f:float_:g:ldbl_:}",
"^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}",
partial_fmt,
"^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
"^T{3s:a:3s:b:}",
"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
"^T{q:e1:B:e2:}",
"^T{Zf:cflt:Zd:cdbl:}",
]
def test_dtype(simple_dtype):
from sys import byteorder
e = "<" if byteorder == "little" else ">"
assert m.print_dtypes() == [
simple_dtype_fmt(),
packed_dtype_fmt(),
"[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()),
partial_dtype_fmt(),
partial_nested_fmt(),
"[('a', 'S3'), ('b', 'S3')]",
(
"{{'names':['a','b','c','d'], "
+ "'formats':[('S4', (3,)),('"
+ e
+ "i4', (2,)),('u1', (3,)),('"
+ e
+ "f4', (4, 2))], "
+ "'offsets':[0,12,20,24], 'itemsize':56}}"
).format(e=e),
"[('e1', '" + e + "i8'), ('e2', 'u1')]",
"[('x', 'i1'), ('y', '" + e + "u8')]",
"[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]",
]
d1 = np.dtype(
{
"names": ["a", "b"],
"formats": ["int32", "float64"],
"offsets": [1, 10],
"itemsize": 20,
}
)
d2 = np.dtype([("a", "i4"), ("b", "f4")])
assert m.test_dtype_ctors() == [
np.dtype("int32"),
np.dtype("float64"),
np.dtype("bool"),
d1,
d1,
np.dtype("uint32"),
d2,
]
assert m.test_dtype_methods() == [
np.dtype("int32"),
simple_dtype,
False,
True,
np.dtype("int32").itemsize,
simple_dtype.itemsize,
]
assert m.trailing_padding_dtype() == m.buffer_to_dtype(
np.zeros(1, m.trailing_padding_dtype())
)
assert m.test_dtype_kind() == list("iiiiiuuuuuffffcccbMmO")
assert m.test_dtype_char_() == list("bhilqBHILQefdgFDG?MmO")
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
for func, dtype in [
(m.create_rec_simple, simple_dtype),
(m.create_rec_packed, packed_dtype),
]:
arr = func(0)
assert arr.dtype == dtype
assert_equal(arr, [], simple_dtype)
assert_equal(arr, [], packed_dtype)
arr = func(3)
assert arr.dtype == dtype
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
# Show what recarray's look like in NumPy.
assert type(arr[0]) == np.void
assert type(arr[0].item()) == tuple
if dtype == simple_dtype:
assert m.print_rec_simple(arr) == [
"s:0,0,0,-0",
"s:1,1,1.5,-2.5",
"s:0,2,3,-5",
]
else:
assert m.print_rec_packed(arr) == [
"p:0,0,0,-0",
"p:1,1,1.5,-2.5",
"p:0,2,3,-5",
]
nested_dtype = np.dtype([("a", simple_dtype), ("b", packed_dtype)])
arr = m.create_rec_nested(0)
assert arr.dtype == nested_dtype
assert_equal(arr, [], nested_dtype)
arr = m.create_rec_nested(3)
assert arr.dtype == nested_dtype
assert_equal(
arr,
[
((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5)),
],
nested_dtype,
)
assert m.print_rec_nested(arr) == [
"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
"n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5",
]
arr = m.create_rec_partial(3)
assert str(arr.dtype) == partial_dtype_fmt()
partial_dtype = arr.dtype
assert "" not in arr.dtype.fields
assert partial_dtype.itemsize > simple_dtype.itemsize
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert str(arr.dtype) == partial_nested_fmt()
assert "" not in arr.dtype.fields
assert "" not in arr.dtype.fields["a"][0].fields
assert arr.dtype.itemsize > partial_dtype.itemsize
np.testing.assert_equal(arr["a"], m.create_rec_partial(3))
def test_array_constructors():
data = np.arange(1, 7, dtype="int32")
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
for i in range(5):
np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)
np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)
def test_string_array():
arr = m.create_string_array(True)
assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]"
assert m.print_string_array(arr) == [
"a='',b=''",
"a='a',b='a'",
"a='ab',b='ab'",
"a='abc',b='abc'",
]
dtype = arr.dtype
assert arr["a"].tolist() == [b"", b"a", b"ab", b"abc"]
assert arr["b"].tolist() == [b"", b"a", b"ab", b"abc"]
arr = m.create_string_array(False)
assert dtype == arr.dtype
def test_array_array():
from sys import byteorder
e = "<" if byteorder == "little" else ">"
arr = m.create_array_array(3)
assert str(arr.dtype) == (
"{{'names':['a','b','c','d'], "
+ "'formats':[('S4', (3,)),('"
+ e
+ "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], "
+ "'offsets':[0,12,20,24], 'itemsize':56}}"
).format(e=e)
assert m.print_array_array(arr) == [
"a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1},"
+ "c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
"a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001},"
+ "c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
"a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001},"
+ "c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
]
assert arr["a"].tolist() == [
[b"ABCD", b"KLMN", b"UVWX"],
[b"WXYZ", b"GHIJ", b"QRST"],
[b"STUV", b"CDEF", b"MNOP"],
]
assert arr["b"].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
assert m.create_array_array(0).dtype == arr.dtype
def test_enum_array():
from sys import byteorder
e = "<" if byteorder == "little" else ">"
arr = m.create_enum_array(3)
dtype = arr.dtype
assert dtype == np.dtype([("e1", e + "i8"), ("e2", "u1")])
assert m.print_enum_array(arr) == ["e1=A,e2=X", "e1=B,e2=Y", "e1=A,e2=X"]
assert arr["e1"].tolist() == [-1, 1, -1]
assert arr["e2"].tolist() == [1, 2, 1]
assert m.create_enum_array(0).dtype == dtype
def test_complex_array():
from sys import byteorder
e = "<" if byteorder == "little" else ">"
arr = m.create_complex_array(3)
dtype = arr.dtype
assert dtype == np.dtype([("cflt", e + "c8"), ("cdbl", e + "c16")])
assert m.print_complex_array(arr) == [
"c:(0,0.25),(0.5,0.75)",
"c:(1,1.25),(1.5,1.75)",
"c:(2,2.25),(2.5,2.75)",
]
assert arr["cflt"].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
assert arr["cdbl"].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
assert m.create_complex_array(0).dtype == dtype
def test_signature(doc):
assert (
doc(m.create_rec_nested)
== "create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
)
def test_scalar_conversion():
n = 3
arrays = [
m.create_rec_simple(n),
m.create_rec_packed(n),
m.create_rec_nested(n),
m.create_enum_array(n),
]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for i, func in enumerate(funcs):
for j, arr in enumerate(arrays):
if i == j and i < 2:
assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
assert "incompatible function arguments" in str(excinfo.value)
def test_vectorize():
n = 3
array = m.create_rec_simple(n)
values = m.f_simple_vectorized(array)
np.testing.assert_array_equal(values, [0, 10, 20])
array_2 = m.f_simple_pass_thru_vectorized(array)
np.testing.assert_array_equal(array, array_2)
def test_cls_and_dtype_conversion(simple_dtype):
s = m.SimpleStruct()
assert s.astuple() == (False, 0, 0.0, 0.0)
assert m.SimpleStruct.fromtuple(s.astuple()).astuple() == s.astuple()
s.uint_ = 2
assert m.f_simple(s) == 20
# Try as recarray of shape==(1,).
s_recarray = np.array([(False, 2, 0.0, 0.0)], dtype=simple_dtype)
# Show that this will work for vectorized case.
np.testing.assert_array_equal(m.f_simple_vectorized(s_recarray), [20])
# Show as a scalar that inherits from np.generic.
s_scalar = s_recarray[0]
assert isinstance(s_scalar, np.void)
assert m.f_simple(s_scalar) == 20
# Show that an *array* scalar (np.ndarray.shape == ()) does not convert.
# More specifically, conversion to SimpleStruct is not implicit.
s_recarray_scalar = s_recarray.reshape(())
assert isinstance(s_recarray_scalar, np.ndarray)
assert s_recarray_scalar.dtype == simple_dtype
with pytest.raises(TypeError) as excinfo:
m.f_simple(s_recarray_scalar)
assert "incompatible function arguments" in str(excinfo.value)
# Explicitly convert to m.SimpleStruct.
assert m.f_simple(m.SimpleStruct.fromtuple(s_recarray_scalar.item())) == 20
# Show that an array of dtype=object does *not* convert.
s_array_object = np.array([s])
assert s_array_object.dtype == object
with pytest.raises(TypeError) as excinfo:
m.f_simple_vectorized(s_array_object)
assert "incompatible function arguments" in str(excinfo.value)
# Explicitly convert to `np.array(..., dtype=simple_dtype)`
s_array = np.array([s.astuple()], dtype=simple_dtype)
np.testing.assert_array_equal(m.f_simple_vectorized(s_array), [20])
def test_register_dtype():
with pytest.raises(RuntimeError) as excinfo:
m.register_dtype()
assert "dtype is already registered" in str(excinfo.value)
@pytest.mark.xfail("env.PYPY")
def test_str_leak():
from sys import getrefcount
fmt = "f4"
pytest.gc_collect()
start = getrefcount(fmt)
d = m.dtype_wrapper(fmt)
assert d is np.dtype("f4")
del d
pytest.gc_collect()
assert getrefcount(fmt) == start
def test_compare_buffer_info():
assert all(m.compare_buffer_info())
| 6,949 |
860 | <gh_stars>100-1000
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import pytest
import numpy as np
from gradient_free_optimizers import RandomAnnealingOptimizer
from .test_hill_climbing_para_init import hill_climbing_para
from ._base_para_test import _base_para_test_func
def objective_function(para):
score = -para["x1"] * para["x1"]
return score
search_space = {"x1": np.arange(-100, 101, 1)}
random_annealing_para = hill_climbing_para + [
({"annealing_rate": 0.5}),
({"annealing_rate": 0.8}),
({"annealing_rate": 0.9}),
({"annealing_rate": 1}),
({"start_temp": 1}),
({"start_temp": 2}),
({"start_temp": 0.5}),
]
pytest_wrapper = ("opt_para", random_annealing_para)
@pytest.mark.parametrize(*pytest_wrapper)
def test_hill_climbing_para(opt_para):
_base_para_test_func(opt_para, RandomAnnealingOptimizer)
| 364 |
1,799 | // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/core/subgraph/subgraph_bridge_registry.h"
#include "lite/kernels/xpu/bridges/graph.h"
#include "lite/kernels/xpu/bridges/utility.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace xpu {
// fixme: yolo box has updated, check arm kernel to get more info
int YoloBoxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr);
CHECK(op != nullptr);
auto graph = static_cast<Graph*>(ctx);
auto op_info = op->op_info();
auto op_type = op_info->Type();
auto scope = op->scope();
VLOG(3) << "[XPU] Converting " + op_type + "...";
// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x = scope->FindTensor(x_name);
auto img_size_name = op_info->Input("ImgSize").front();
auto img_size = scope->FindTensor(img_size_name);
auto boxes_name = op_info->Output("Boxes").front();
auto scores_name = op_info->Output("Scores").front();
auto anchors = op_info->GetAttr<std::vector<int>>("anchors");
auto class_num = op_info->GetAttr<int>("class_num");
auto conf_thresh = op_info->GetAttr<float>("conf_thresh");
auto downsample_ratio = op_info->GetAttr<int>("downsample_ratio");
// X node
std::shared_ptr<Node> x_node = nullptr;
if (graph->Has(x_name)) {
x_node = graph->Get(x_name);
} else {
x_node = graph->Add(x_name, *x);
}
// ImgSize node
std::shared_ptr<Node> img_size_node = nullptr;
if (graph->Has(img_size_name)) {
img_size_node = graph->Get(img_size_name);
} else {
img_size_node = graph->Add(img_size_name, *img_size);
}
// Softmax node
auto yolo_box_data =
graph->builder_.CreateYoloBox(*x_node->data(),
*img_size_node->data(),
CvtShape<xtcl::Integer>(anchors),
class_num,
conf_thresh,
downsample_ratio);
graph->Add(boxes_name, graph->builder_.GetField(yolo_box_data, 0));
graph->Add(scores_name, graph->builder_.GetField(yolo_box_data, 1));
return SUCCESS;
}
} // namespace xpu
} // namespace subgraph
} // namespace lite
} // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(yolo_box,
kXPU,
paddle::lite::subgraph::xpu::YoloBoxConverter);
| 1,200 |
1,467 | {
"version": "2.15.9",
"date": "2020-10-16",
"entries": [
{
"type": "feature",
"category": "AWS Organizations",
"description": "Documentation updates for AWS Organizations."
},
{
"type": "feature",
"category": "AWS Elemental MediaLive",
"description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to transfer the ownership of MediaLive Link devices across AWS accounts."
},
{
"type": "feature",
"category": "AWS SDK for Java v2",
"description": "Updated service endpoint metadata."
}
]
} | 305 |
1,962 | /*
* Copyright 2019 Ververica GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bolingcavalry.json_serde;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
/**
* @Description: 反序列化类
* @author: willzhao E-mail: <EMAIL>
* @date: 2020/5/2 15:02
*/
public class JsonDeserializer<T> {
private final Class<T> recordClazz;
private final ObjectMapper jsonMapper;
public JsonDeserializer(Class<T> recordClazz) {
this.recordClazz = recordClazz;
this.jsonMapper = new ObjectMapper();
}
public T parseFromString(String line) {
try {
return jsonMapper.readValue(line, this.recordClazz);
} catch (IOException e) {
throw new IllegalArgumentException("Could not deserialize record: " + line + " as class " + recordClazz, e);
}
}
}
| 476 |
4,036 | import sys
import settings
import utils
import difflib
def ignore_line_ending(ch):
return difflib.IS_CHARACTER_JUNK(ch, ws=" \r\n")
def compare_files(file1, file2):
diff = difflib.ndiff(open(file1).readlines(),
open(file2).readlines(), None, ignore_line_ending)
ret = ""
for line in diff:
if line.startswith("+") or line.startswith("-"):
ret += line
return ret
def compare_folders(folder1, folder2, output_file):
"""
Compares the contents of two folders and writes the differences to the output file.
"""
return_md = ""
for lang in settings.languages:
expected_files = ""
generated_output_rst = settings.generated_output_rst.format(
language=lang)
generated_output_csv = settings.generated_output_csv.format(
language=lang)
# check if files exist in both folder1 and folder 2
if not utils.check_file_exists(f"{folder1}/{generated_output_rst}"):
expected_files += f"- {generated_output_rst} doesn't exist in folder {folder1}\n"
if not utils.check_file_exists(f"{folder2}/{generated_output_rst}"):
expected_files += f"- {generated_output_rst} doesn't exist in folder {folder2}\n"
if not utils.check_file_exists(f"{folder1}/{generated_output_csv}"):
expected_files += f"- {generated_output_csv} doesn't exist in folder {folder1}\n"
if not utils.check_file_exists(f"{folder2}/{generated_output_csv}"):
expected_files += f"- {generated_output_csv} doesn't exist in folder {folder2}\n"
if expected_files != "":
print("Expected files are missing", file=sys.stderr)
return_md += f"\n### {lang}\n\n#### Expected files are missing for {lang}\n{expected_files}\n"
continue
# compare contents of files
cmp1 = compare_files(
f"{folder1}/{generated_output_rst}", f"{folder2}/{generated_output_rst}")
cmp2 = compare_files(
f"{folder1}/{generated_output_csv}", f"{folder2}/{generated_output_csv}")
if cmp1 != "" or cmp2 != "":
print("Generated file contents are not matching", file=sys.stderr)
return_md += f"\n### {lang}\n\n#### Generated file changes for {lang}\n\n"
if cmp1 != "":
return_md += f"- Changes to {generated_output_rst}:\n```diff\n{cmp1}```\n\n"
if cmp2 != "":
return_md += f"- Changes to {generated_output_csv}:\n```diff\n{cmp2}```\n\n"
with open(output_file, 'w', newline='') as out:
out.write(return_md)
| 1,160 |
348 | <filename>docs/data/leg-t1/082/08202124.json<gh_stars>100-1000
{"nom":"Montbeton","circ":"2ème circonscription","dpt":"Tarn-et-Garonne","inscrits":2874,"abs":1470,"votants":1404,"blancs":58,"nuls":15,"exp":1331,"res":[{"nuance":"RDG","nom":"Mme <NAME>","voix":362},{"nuance":"FN","nom":"M. <NAME>","voix":322},{"nuance":"FI","nom":"M. <NAME>","voix":202},{"nuance":"LR","nom":"<NAME>","voix":183},{"nuance":"DVG","nom":"M. <NAME>","voix":88},{"nuance":"ECO","nom":"Mme <NAME>","voix":61},{"nuance":"COM","nom":"Mme <NAME>","voix":30},{"nuance":"ECO","nom":"Mme <NAME>","voix":17},{"nuance":"EXD","nom":"M. <NAME>","voix":17},{"nuance":"DVD","nom":"M. <NAME>","voix":16},{"nuance":"DIV","nom":"Mme <NAME>","voix":15},{"nuance":"DIV","nom":"M. <NAME>","voix":12},{"nuance":"EXG","nom":"Mme <NAME>","voix":6},{"nuance":"DIV","nom":"M. <NAME>","voix":0}]} | 351 |
5,196 | /*
* Copyright 2018 The Cartographer Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cartographer/metrics/counter.h"
namespace cartographer {
namespace metrics {
namespace {
// Implementation of counter that does nothing.
class NullCounter : public Counter {
public:
void Increment() override{};
void Increment(double) override{};
};
} // namespace
Counter* Counter::Null() {
static NullCounter null_counter;
return &null_counter;
}
} // namespace metrics
} // namespace cartographer
| 278 |
361 | <reponame>NickNYU/sofa-registry
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.sofa.registry.metrics;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.MetricRegistry;
import com.google.common.collect.Sets;
import io.netty.util.internal.ConcurrentSet;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadPoolExecutor;
/**
*
* @author shangyu.wh
* @version $Id: ThreadMetrics.java, v 0.1 2018-11-18 15:19 shangyu.wh Exp $
*/
public class TaskMetrics {
private final MetricRegistry metrics;
private final Set<String> executorNames = Sets.newConcurrentHashSet();
private TaskMetrics() {
this.metrics = new MetricRegistry();
}
private volatile static TaskMetrics instance;
public static TaskMetrics getInstance() {
if (instance == null) {
synchronized (TaskMetrics.class) {
if (instance == null) {
instance = new TaskMetrics();
}
}
}
return instance;
}
public MetricRegistry getMetricRegistry() {
return this.metrics;
}
public void registerThreadExecutor(String executorName, ThreadPoolExecutor executor) {
executorNames.add(executorName);
metrics.register(MetricRegistry.name(executorName, "queue"),
(Gauge<Integer>) () -> executor.getQueue().size());
metrics.register(MetricRegistry.name(executorName, "current"),
(Gauge<Integer>) executor::getPoolSize);
metrics.register(MetricRegistry.name(executorName, "active"),
(Gauge<Integer>) executor::getActiveCount);
metrics.register(MetricRegistry.name(executorName, "completed"),
(Gauge<Long>) executor::getCompletedTaskCount);
metrics.register(MetricRegistry.name(executorName, "task"),
(Gauge<Long>) executor::getTaskCount);
}
public Set<String> getExecutorNames() {
return executorNames;
}
public String metricsString() {
final String SYMBOLIC = " └─ ";
StringBuilder sb = new StringBuilder();
sb.append("\n").append("ExecutorMetrics").append(" >>>>>>>>");
sb.append("\n");
for (String executorName : getExecutorNames()) {
MetricRegistry metricRegistry = getMetricRegistry();
Map<String, Gauge> map = metricRegistry
.getGauges((name, value) -> name.startsWith(executorName));
sb.append(SYMBOLIC).append(executorName);
map.forEach((key, gauge) -> {
String name = key.substring(executorName.length() + 1);
sb.append(", ").append(name).append(":").append(gauge.getValue());
});
sb.append("\n");
}
return sb.toString();
}
} | 1,400 |
1,013 | /*!
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <pyclustering/cluster/rock.hpp>
#include "samples.hpp"
#include "utenv_check.hpp"
using namespace pyclustering;
using namespace pyclustering::clst;
static void
template_length_process_data(const std::shared_ptr<dataset> & p_data,
const double p_radius,
const size_t p_cluster_amount,
const double p_threshold,
const std::vector<size_t> & p_expected_cluster_length) {
rock_data output_result;
rock solver(p_radius, p_cluster_amount, p_threshold);
solver.process(*p_data, output_result);
const dataset & data = *p_data;
const cluster_sequence & actual_clusters = output_result.clusters();
ASSERT_CLUSTER_SIZES(data, actual_clusters, p_expected_cluster_length);
}
TEST(utest_rock, allocation_sample_simple_01) {
const std::vector<size_t> expected_clusters_length = { 5, 5 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 1.0, 2, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_one_allocation_simple_01) {
const std::vector<size_t> expected_clusters_length = { 10 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 5.0, 1, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { 10, 5, 8 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 1.0, 3, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_one_allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { 23 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 5.0, 1, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_simple_03) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 1.0, 4, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_wrong_radius_sample_simple_03) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 1.7, 4, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_simple_04) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15, 15 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_04), 1.0, 5, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_wrong_radius_sample_simple_04) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15, 15 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_04), 1.5, 5, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_simple_05) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_05), 1.0, 4, 0.5, expected_clusters_length);
}
TEST(utest_rock, allocation_sample_simple_07) {
const std::vector<size_t> expected_clusters_length = { 10, 10 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_07), 1.0, 2, 0.5, expected_clusters_length);
}
#ifndef VALGRIND_ANALYSIS_SHOCK
TEST(utest_rock, allocation_sample_simple_08) {
const std::vector<size_t> expected_clusters_length = { 15, 30, 20, 80 };
template_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_08), 1.0, 4, 0.5, expected_clusters_length);
}
#endif
| 1,498 |
496 | <filename>prescription_generator/helper/pdf_operations.py
from fpdf import FPDF
class PDF(FPDF):
def footer(self):
# Position at 1.5 cm from bottom
self.set_y(-15)
# Arial italic 8
self.set_font('Arial', 'I', 8)
# Text color in gray
self.set_text_color(128)
# Page number
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def save_pdf(medicines):
pdf = PDF()
# Add a page
pdf.add_page()
# setting style and size of font for the pdf
pdf.set_font("Arial", size=12)
pdf.cell(
200, 10,
txt="Generated Prscription",
ln=1, align='C'
)
for medic in medicines:
if ('Medicine Name' in medicines[medic]):
# create a cell
pdf.cell(
200, 10,
ln=1, align='C',
txt=medic
)
pdf.cell(
200, 10,
ln=2,
txt="Medicine Name: " + medicines[medic]["Medicine Name"],
)
if "Instruction" in medicines[medic]:
pdf.cell(
200, 10,
ln=2,
txt="Instructions: " + medicines[medic]["Instruction"]
)
else:
pdf.cell(
200, 10,
ln=2,
txt="Instructions*: No Instructions given"
)
# save the pdf with name .pdf
pdf.output("Prescription.pdf")
| 864 |
799 | <filename>src/nicelee/ui/item/DownloadInfoPanel.java
package nicelee.ui.item;
import java.awt.Color;
import java.awt.Desktop;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import javax.swing.BorderFactory;
import javax.swing.JButton;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import nicelee.bilibili.INeedAV;
import nicelee.bilibili.downloaders.Downloader;
import nicelee.bilibili.enums.StatusEnum;
import nicelee.bilibili.model.ClipInfo;
import nicelee.bilibili.util.CmdUtil;
import nicelee.bilibili.util.Logger;
import nicelee.bilibili.util.RepoUtil;
import nicelee.ui.Global;
import nicelee.ui.TabDownload;
public class DownloadInfoPanel extends JPanel implements ActionListener {
String avTitle; // 原始av标题
String clipTitle; // 原始clip标题
String avid;
String cid;
int page;
int remark;
int qn;
int realqn;
// 下载相关
public INeedAV iNeedAV;
public String url;
public String avid_qn;
public String formattedTitle;
public boolean stopOnQueue = false;
int failCnt = 0;
public int getFailCnt() {
return failCnt;
}
public void setFailCnt(int failCnt) {
this.failCnt = failCnt;
}
long lastCntTime = 0L;
long lastCnt = 0L;
/**
*
*/
private static final long serialVersionUID = -752743062676819402L;
String path;
String fileName;
long totalSize;
long currentDown;
boolean isdownloading = true;
JButton btnRemove;
JButton btnOpen;
JButton btnOpenFolder;
JButton btnControl;
JLabel lbCurrentStatus;
JLabel lbDownFile;
JLabel lbFileName;
JLabel lbavName;
public DownloadInfoPanel(ClipInfo clip, int qn) {
this.avTitle = clip.getAvTitle();
this.clipTitle = clip.getAvTitle();
this.avid = clip.getAvId();
this.cid = Long.toString(clip.getcId());
this.page = clip.getPage();
this.remark = clip.getRemark();
this.qn = qn;
path = "D:\\bilibiliDown\\";
fileName = "timg.gif";
totalSize = 0L;
currentDown = 0L;
initUI(this);
}
void initUI(DownloadInfoPanel dp) {
// this.setOpaque(false);
this.setBorder(BorderFactory.createLineBorder(Color.red));
this.setPreferredSize(new Dimension(1100, 120));
lbFileName = new JLabel("尚未生成");
lbFileName.setPreferredSize(new Dimension(600, 45));
lbFileName.setBorder(BorderFactory.createLineBorder(Color.red));
// lbFileName.addMouseListener(new MouseListener() {
// Color lightGreen = new Color(153, 214, 92);
// Color lightRed = new Color(255, 71, 10);
// Color lightPink = new Color(255, 122, 122);
// Color lightOrange = new Color(255, 207, 61);
// int cnt = 0;
// Color[] colors = {null, lightGreen, lightRed, lightPink, lightOrange};
// @Override
// public void mouseReleased(MouseEvent e) {
// }
// @Override
// public void mousePressed(MouseEvent e) {
// cnt = (cnt + 1)%colors.length;
// dp.setBackground(colors[cnt]);
// }
// @Override
// public void mouseExited(MouseEvent e) {
// }
// @Override
// public void mouseEntered(MouseEvent e) {
// }
// @Override
// public void mouseClicked(MouseEvent e) {
// }
// });
this.add(lbFileName);
btnOpen = new MJButton("打开文件");
btnOpen.setPreferredSize(new Dimension(100, 45));
btnOpen.addActionListener(this);
this.add(btnOpen);
btnOpenFolder = new MJButton("打开文件夹");
btnOpenFolder.setPreferredSize(new Dimension(100, 45));
btnOpenFolder.addActionListener(this);
this.add(btnOpenFolder);
btnRemove = new MJButton("删除任务");
btnRemove.setPreferredSize(new Dimension(100, 45));
btnRemove.addActionListener(this);
this.add(btnRemove);
JLabel blank = new JLabel();
blank.setPreferredSize(new Dimension(100, 45));
this.add(blank);
lbavName = new JLabel(avTitle);
lbavName.setToolTipText(avTitle);
lbavName.setPreferredSize(new Dimension(500, 45));
lbavName.setBorder(BorderFactory.createLineBorder(Color.red));
this.add(lbavName);
lbCurrentStatus = new JLabel("正在下载...");
lbCurrentStatus.setPreferredSize(new Dimension(200, 45));
lbCurrentStatus.setBorder(BorderFactory.createLineBorder(Color.red));
this.add(lbCurrentStatus);
lbDownFile = new JLabel(currentDown + "/" + totalSize);
lbDownFile.setPreferredSize(new Dimension(200, 45));
lbDownFile.setBorder(BorderFactory.createLineBorder(Color.red));
this.add(lbDownFile);
this.setBackground(new Color(204, 255, 255));
btnControl = new MJButton("暂停");
btnControl.setPreferredSize(new Dimension(100, 45));
btnControl.addActionListener(this);
this.add(btnControl);
}
@Override
public void actionPerformed(ActionEvent e) {
if (e.getSource() == btnOpenFolder) {
File file = new File(lbFileName.getText());
String os = System.getProperty("os.name");
try {
if (file.exists() && os.toLowerCase().startsWith("win")) {
// 打开并选中
String cmd[] = { "explorer", "/e,/select,", file.getAbsolutePath() };
Runtime.getRuntime().exec(cmd);
} else if(file.exists()){
Desktop desktop = Desktop.getDesktop();
desktop.open(file);
} else {
Desktop desktop = Desktop.getDesktop();
desktop.open(file.getParentFile());
}
} catch (Exception e1) {
JOptionPane.showMessageDialog(null, "打开文件夹失败!", "失败", JOptionPane.INFORMATION_MESSAGE);
}
} else if (e.getSource() == btnOpen) {
File file = new File(lbFileName.getText());
try {
Desktop.getDesktop().open(file);
} catch (Exception e1) {
// e1.printStackTrace();
JOptionPane.showMessageDialog(null, "打开文件失败!", "失败", JOptionPane.INFORMATION_MESSAGE);
}
} else if (e.getSource() == btnRemove) {
// if(Global.downloadTaskList.get(this).getStatus() == 0) {
// JOptionPane.showMessageDialog(this, "当前正在文件下载中!", "警告", JOptionPane.WARNING_MESSAGE);
// }
if(TabDownload.isStopAll()) {
Logger.println("停止任务中,请误操作");
return;
}
removeTask(true);
} else if (e.getSource() == btnControl) {
if(TabDownload.isStopAll()) {
Logger.println("停止任务中,请误操作");
return;
}
StatusEnum status = iNeedAV.getDownloader().currentStatus();
if (status == StatusEnum.DOWNLOADING) {
stopTask();
} else {
setFailCnt(0);
continueTask();
}
}
}
/**
* 下载前的初始化工作
*/
public void initDownloadParams(INeedAV iNeedAV, String url, String avid_qn, String formattedTitle, int realqn) {
this.iNeedAV = iNeedAV;
this.avid_qn = avid_qn;
this.formattedTitle = formattedTitle;
this.url = url;
this.realqn = realqn;
this.lbavName.setText(formattedTitle);
this.lbavName.setToolTipText(formattedTitle);
this.stopOnQueue = false;
}
/**
* 停止任务(方法内包含状态判断)
*/
public void stopTask() {
Downloader downloader = iNeedAV.getDownloader();
downloader.stopTask();
stopOnQueue = true;
}
/**
* 继续任务(方法内包含状态判断)
*/
public void continueTask() {
stopOnQueue = false;
String record = avid_qn + "-p" + page;
Downloader downloader = iNeedAV.getDownloader();
final DownloadInfoPanel dp = this;
// 如果正在下载 或 下载完毕,则不需要下载
StatusEnum status = downloader.currentStatus();
if (status != StatusEnum.DOWNLOADING && status != StatusEnum.SUCCESS && status != StatusEnum.PROCESSING) {
downloader.startTask();
Global.downLoadThreadPool.execute(new Runnable() {
@Override
public void run() {
if(downloader.currentStatus() == StatusEnum.NONE && dp.stopOnQueue) {
Logger.println("已经删除等待队列,无需再下载");
return;
}
if (downloader.currentStatus() == StatusEnum.STOP) {
Logger.println("已经人工停止,无需再下载");
return;
}
Logger.println("预期下载清晰度:" + qn + "实际清晰度:" + realqn);
// 开始下载
if (downloader.download(url, avid, realqn, page)) {
// 下载成功后保存到仓库
if (Global.saveToRepo) {
RepoUtil.appendAndSave(record);
}
CmdUtil.convertOrAppendCmdToRenameBat(avid_qn, formattedTitle, page);
}
}
});
}
}
/**
* 删除任务
*/
public void removeTask(boolean deleteAll) {
// 删除所有 或 删除已完成的任务
// 0 正在下载; 1 下载完毕; -1 出现错误; -2 人工停止;-3队列中
if (deleteAll || iNeedAV.getDownloader().currentStatus() == StatusEnum.SUCCESS) {
this.stopOnQueue = true;
// 停止下载
Global.downloadTaskList.get(this).stopTask();
// 全局监控撤销
Global.downloadTaskList.remove(this);
// 当前页面控件删除
Global.downloadTab.getJpContent().remove(this);
// 大小重新适配
Global.downloadTab.getJpContent()
.setPreferredSize(new Dimension(1100, 128 * Global.downloadTaskList.size()));
Global.downloadTab.getJpContent().updateUI();
Global.downloadTab.getJpContent().repaint();
// 删除未完成的下载文件
File file = new File(lbFileName.getText() + ".part");
if (file.exists()) {
file.delete();
}
}
}
public JLabel getLbCurrentStatus() {
return lbCurrentStatus;
}
public void setLbCurrentStatus(JLabel lbCurrentStatus) {
this.lbCurrentStatus = lbCurrentStatus;
}
public JLabel getLbDownFile() {
return lbDownFile;
}
public void setLbDownFile(JLabel lbDownFile) {
this.lbDownFile = lbDownFile;
}
public JLabel getLbFileName() {
return lbFileName;
}
public void setLbFileName(JLabel lbFileName) {
this.lbFileName = lbFileName;
}
public JButton getBtnControl() {
return btnControl;
}
public void setBtnControl(JButton btnControl) {
this.btnControl = btnControl;
}
@Override
public int hashCode() {
return (avid + page).hashCode();
}
@Override
public boolean equals(Object obj) {
// System.out.println("DownloadInfoPanel - equals:");
if (obj instanceof DownloadInfoPanel) {
DownloadInfoPanel down = (DownloadInfoPanel) obj;
return (avid.equals(down.avid) && page == down.page);
}
return false;
}
public long getLastCntTime() {
return lastCntTime;
}
public void setLastCntTime(long lastCntTime) {
this.lastCntTime = lastCntTime;
}
public long getLastCnt() {
return lastCnt;
}
public void setLastCnt(long lastCnt) {
this.lastCnt = lastCnt;
}
public String getAvid() {
return avid;
}
public void setAvid(String avid) {
this.avid = avid;
}
}
| 4,812 |
530 | package org.carlspring.strongbox.users.service;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import org.carlspring.strongbox.config.DataServiceConfig;
import org.carlspring.strongbox.config.UsersConfig;
import org.carlspring.strongbox.domain.UserEntry;
import org.carlspring.strongbox.users.domain.SystemRole;
import org.carlspring.strongbox.users.userdetails.SpringSecurityUser;
import org.carlspring.strongbox.users.userdetails.UserDetailsMapper;
import javax.inject.Inject;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.ContextConfiguration;
import com.google.common.collect.Sets;
/**
* @author ankit.tomar
*/
@SpringBootTest
@ActiveProfiles(profiles = "test")
@ContextConfiguration(classes = { DataServiceConfig.class,
UsersConfig.class })
public class UserDetailsMapperTest
{
@Inject
private UserDetailsMapper userDetailsMapper;
@Test
public void testEncodedPasswordUserWithPasswordEncodingAlgoPrefix()
{
UserEntry user = new UserEntry();
user.setUsername("test-user");
user.setPassword("{<PASSWORD>");
user.setRoles(Sets.newHashSet(SystemRole.REPOSITORY_MANAGER.name()));
user.setEnabled(true);
SpringSecurityUser securityUser = userDetailsMapper.apply(user);
assertNotNull(securityUser);
assertEquals(securityUser.getUsername(), "test-user");
assertEquals(securityUser.getPassword(),
"{<PASSWORD>");
assertNotNull(securityUser.getRoles());
}
@Test
public void testEncodedPasswordUserWithoutPasswordEncodingAlgoPrefix()
{
UserEntry user = new UserEntry();
user.setUsername("test-user");
user.setPassword("<PASSWORD>");
user.setRoles(Sets.newHashSet(SystemRole.REPOSITORY_MANAGER.name()));
user.setEnabled(true);
SpringSecurityUser securityUser = userDetailsMapper.apply(user);
assertNotNull(securityUser);
assertEquals(securityUser.getUsername(), "test-user");
assertEquals(securityUser.getPassword(),
"{<PASSWORD>");
assertNotNull(securityUser.getRoles());
}
}
| 908 |
1,127 | <filename>tools/mo/openvino/tools/mo/middle/PoolV2ToAttributedPool.py<gh_stars>1000+
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.graph.graph import Graph, rename_nodes
from openvino.tools.mo.middle.replacement import MiddleReplacementPattern
from openvino.tools.mo.ops.pooling import Pooling
class PoolV2ToAttributedPool(MiddleReplacementPattern):
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for pool_v2_node in graph.get_op_nodes(op='PoolingV2'):
pool_v2_name = pool_v2_node.soft_get('name', pool_v2_node.id)
pool_v1_node = Pooling(graph, {'window': pool_v2_node.in_port(1).data.get_value(),
'stride': pool_v2_node.in_port(2).data.get_value(),
'pad': pool_v2_node.pad,
'spatial_dims': pool_v2_node.spatial_dims,
'auto_pad': pool_v2_node.auto_pad,
'output_spatial_shape': pool_v2_node.output_spatial_shape,
'pad_spatial_shape': pool_v2_node.pad_spatial_shape,
'pool_method': pool_v2_node.pool_method,
'permute_attrs': pool_v2_node.permute_attrs,}).create_node()
rename_nodes([(pool_v2_node, pool_v2_name + '/to_be_removed'), (pool_v1_node, pool_v2_name)])
pool_v2_node.in_port(0).get_connection().set_destination(pool_v1_node.in_port(0))
pool_v2_node.out_port(0).get_connection().set_source(pool_v1_node.out_port(0))
| 931 |
761 | <reponame>NTForked/GridFluidSim3D
#ifndef DIFFUSEPARTICLE_T_H
#define DIFFUSEPARTICLE_T_H
#include "vector3_c.h"
typedef struct DiffuseParticle_t {
Vector3_t position;
Vector3_t velocity;
float lifetime;
char type;
} DiffuseParticle_t;
#endif | 109 |
363 | <reponame>roomanl/AndroidDownload
package cn.sddman.download.mvp.v;
public interface PlayerView {
void initPlayer();
void openVideo(String path);
void playPause();
void setTimeTextView(int currentPlayTimeMs, int durationTimeMs);
void updateUIPlayStation(int currentPlayTimeMs, int durationTimeMs);
boolean ismIsNeedUpdateUIProgress();
boolean ismIsTouchingSeekbar();
void controlViewToggle();
void controlViewShow();
void controlViewHide();
void setVideoTile(String name);
void userSeekPlayProgress(int currentPlayTimeMs);
}
| 190 |
998 | // Copyright 2021 Phyronnaz
#pragma once
#include "CoreMinimal.h"
#include "VoxelFastNoiseBase.h"
template<typename T>
class TVoxelFastNoise_WhiteNoise : public T
{
public:
DEFINE_VOXEL_NOISE_CLASS()
v_flt GetWhiteNoise_2D(v_flt x, v_flt y) const;
v_flt GetWhiteNoiseInt_2D(int32 x, int32 y) const;
v_flt GetWhiteNoise_3D(v_flt x, v_flt y, v_flt z) const;
v_flt GetWhiteNoiseInt_3D(int32 x, int32 y, int32 z) const;
v_flt GetWhiteNoise_4D(v_flt x, v_flt y, v_flt z, v_flt w) const;
v_flt GetWhiteNoiseInt_4D(int32 x, int32 y, int32 z, int32 w) const;
}; | 299 |
726 | # Bresenham Line Algorithm (BLA) is one of the earliest algorithms developed
# in computer graphics. It is used for drawing lines. It is an efficient method because
# it involves only integer addition, subtractions, and multiplication operations.
# These operations can be performed very rapidly so lines can be generated quickly.
# Reference: http://floppsie.comp.glam.ac.uk/Southwales/gaius/gametools/6.html
# Algorithm:
# 1. We are given the starting and ending point (x1, y1) and (x2, y2)
# 2. We compute the gradient m, using the formula: m = (y2-y1)/(x2-x1)
# 3. The equation of the straight line is y = m*x+c. So the next thing we need to find is the intercept c
# 4. Intercept can be derived using the formula c = y1 - m*x1
# 5. To get the next point, we add dx to the x-cordinate and dy to the y cordinate
# 6. We continue this cycle until we reach (x2, y2)
def lineGenerator(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
slope = 2*dy - dx
x = x1
y = y1
while x < x2:
#Print current coordinates
print(x, y)
#X increases any ways
x+= 1
# 2dy is always added in the slope. Do it.
slope += 2*dy
#Check for the current slope
if slope >= 0:
y += 1
slope -= 2 * (x2-x1)
elif slope <=0:
#No changes are made.
slope = slope
# lineGenerator(3, 2, 15, 5)
# # P1 is the point given. Initial point
# # P2 is the point to reach. Final point
# if P1[0] == P2[0] and P1[1] == P2[1]:
# return 0
# print(P1)
# #Check if the point is above or below the line.
# dx = P2[0]-P1[0]
# dy = P2[1]-P1[0]
# di = 2*dy - dx
# currX = P1[0]
# currY = P1[1]
# if di > 0:
# P1 = (currX+1, currY+1)
# else:
# P1 = (currX+1, currY)
# return lineGenerator(P1, P2)
| 704 |
2,338 | //===- TapiUniversal.cpp --------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Text-based Dynamic Library Stub format.
//
//===----------------------------------------------------------------------===//
#include "llvm/Object/TapiUniversal.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TextAPI/TextAPIReader.h"
using namespace llvm;
using namespace MachO;
using namespace object;
TapiUniversal::TapiUniversal(MemoryBufferRef Source, Error &Err)
: Binary(ID_TapiUniversal, Source) {
Expected<std::unique_ptr<InterfaceFile>> Result = TextAPIReader::get(Source);
ErrorAsOutParameter ErrAsOuParam(&Err);
if (!Result) {
Err = Result.takeError();
return;
}
ParsedFile = std::move(Result.get());
auto FlattenObjectInfo = [this](const auto &File) {
StringRef Name = File->getInstallName();
for (const Architecture Arch : File->getArchitectures())
Libraries.emplace_back(Library({Name, Arch}));
};
FlattenObjectInfo(ParsedFile);
// Get inlined documents from tapi file.
for (const std::shared_ptr<InterfaceFile> &File : ParsedFile->documents())
FlattenObjectInfo(File);
}
TapiUniversal::~TapiUniversal() = default;
Expected<std::unique_ptr<TapiFile>>
TapiUniversal::ObjectForArch::getAsObjectFile() const {
return std::unique_ptr<TapiFile>(new TapiFile(Parent->getMemoryBufferRef(),
*Parent->ParsedFile.get(),
Parent->Libraries[Index].Arch));
}
Expected<std::unique_ptr<TapiUniversal>>
TapiUniversal::create(MemoryBufferRef Source) {
Error Err = Error::success();
std::unique_ptr<TapiUniversal> Ret(new TapiUniversal(Source, Err));
if (Err)
return std::move(Err);
return std::move(Ret);
}
| 734 |
1,093 | <filename>lanzou/gui/dialogs/rename.py
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog, QLabel, QGridLayout, QDialogButtonBox, QLineEdit, QTextEdit
from lanzou.gui.qss import dialog_qss_style
from lanzou.debug import SRC_DIR
class RenameDialog(QDialog):
out = pyqtSignal(object)
def __init__(self, parent=None):
super(RenameDialog, self).__init__(parent)
self.infos = []
self.min_width = 400
self.initUI()
self.update_text()
self.setStyleSheet(dialog_qss_style)
def set_values(self, infos=None):
self.infos = infos or []
self.update_text() # 更新界面
def initUI(self):
self.setWindowIcon(QIcon(SRC_DIR + "desc.ico"))
self.lb_name = QLabel()
self.lb_name.setText("文件夹名:")
self.lb_name.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)
self.tx_name = QLineEdit()
self.lb_desc = QLabel()
self.tx_desc = QTextEdit()
self.lb_desc.setText("描 述:")
self.lb_desc.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.buttonBox.button(QDialogButtonBox.Ok).setText("确定")
self.buttonBox.button(QDialogButtonBox.Cancel).setText("取消")
self.grid = QGridLayout()
self.grid.setSpacing(10)
self.grid.addWidget(self.lb_name, 1, 0)
self.grid.addWidget(self.tx_name, 1, 1)
self.grid.addWidget(self.lb_desc, 2, 0)
self.grid.addWidget(self.tx_desc, 2, 1, 5, 1)
self.grid.addWidget(self.buttonBox, 7, 1, 1, 1)
self.setLayout(self.grid)
self.buttonBox.accepted.connect(self.btn_ok)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def update_text(self):
self.tx_desc.setFocus()
num = len(self.infos)
if num == 1:
self.lb_name.setVisible(True)
self.tx_name.setVisible(True)
infos = self.infos[0]
self.buttonBox.button(QDialogButtonBox.Ok).setToolTip("") # 去除新建文件夹影响
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True) # 去除新建文件夹影响
self.setWindowTitle("修改文件夹名与描述")
self.tx_name.setText(str(infos.name))
if infos.desc:
self.tx_desc.setText(str(infos.desc))
self.tx_desc.setToolTip('原描述:' + str(infos.desc))
else:
self.tx_desc.setText("")
self.tx_desc.setToolTip('')
self.tx_desc.setPlaceholderText("无")
self.min_width = len(str(infos.name)) * 8
if infos.is_file:
self.setWindowTitle("修改文件描述")
self.tx_name.setFocusPolicy(Qt.NoFocus)
self.tx_name.setReadOnly(True)
else:
self.tx_name.setFocusPolicy(Qt.StrongFocus)
self.tx_name.setReadOnly(False)
self.tx_name.setFocus()
elif num > 1:
self.lb_name.setVisible(False)
self.tx_name.setVisible(False)
self.setWindowTitle(f"批量修改{num}个文件(夹)的描述")
self.tx_desc.setText('')
self.tx_desc.setPlaceholderText("建议160字数以内。")
else:
self.setWindowTitle("新建文件夹")
self.tx_name.setText("")
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Ok).setToolTip("请先输入文件名!")
self.tx_name.textChanged.connect(self.slot_new_ok_btn)
self.tx_name.setPlaceholderText("不支持空格,如有会被自动替换成 _")
self.tx_name.setFocusPolicy(Qt.StrongFocus)
self.tx_name.setReadOnly(False)
self.tx_desc.setPlaceholderText("可选项,建议160字数以内。")
self.tx_name.setFocus()
if self.min_width < 400:
self.min_width = 400
self.resize(self.min_width, 200)
def slot_new_ok_btn(self):
"""新建文件夹槽函数"""
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Ok).setToolTip("")
def btn_ok(self):
new_name = self.tx_name.text()
new_des = self.tx_desc.toPlainText()
info_len = len(self.infos)
if info_len == 0: # 在 work_id 新建文件夹
if new_name:
self.out.emit(("new", new_name, new_des))
elif info_len == 1:
if new_name != self.infos[0].name or new_des != self.infos[0].desc:
self.infos[0].new_des = new_des
self.infos[0].new_name = new_name
self.out.emit(("change", self.infos))
else:
if new_des:
for infos in self.infos:
infos.new_des = new_des
self.out.emit(("change", self.infos))
| 2,789 |
1,382 | <gh_stars>1000+
# !/usr/bin/env python
# coding: utf-8
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""Test mlbox.model.classification.stacking_classifier module."""
import pytest
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from mlbox.model.classification.stacking_classifier import StackingClassifier
def test_init_stacking_classifier():
"""Test init method of StackingClassifier class."""
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(base_estimators=dict())
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(n_folds=dict())
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(copy="True")
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(drop_first="True")
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(random_state="1")
with pytest.raises(ValueError):
stacking_classifier = StackingClassifier(verbose="True")
stacking_classifier = StackingClassifier()
assert len(stacking_classifier.base_estimators) == 3
assert isinstance(stacking_classifier.level_estimator,
type(LogisticRegression()))
assert stacking_classifier.n_folds == 5
assert not stacking_classifier.copy
assert stacking_classifier.drop_first
assert stacking_classifier.random_state == 1
assert stacking_classifier.verbose
assert not stacking_classifier._StackingClassifier__fitOK
assert not stacking_classifier._StackingClassifier__fittransformOK
def test_get_params_stacking_classifier():
"""Test get_params method StackingClassifier class."""
stacking_classifier = StackingClassifier()
dict = stacking_classifier.get_params()
assert len(dict["base_estimators"]) == 3
assert isinstance(dict["level_estimator"],
type(LogisticRegression()))
assert dict["n_folds"] == 5
assert not dict["copy"]
assert dict["drop_first"]
assert dict["random_state"] == 1
assert dict["verbose"]
def test_set_params_stacking_classifier():
"""Test set_params method of StackingClassifier class."""
stacking_classifier = StackingClassifier()
stacking_classifier.set_params(n_folds=6)
assert stacking_classifier.n_folds == 6
stacking_classifier.set_params(copy=True)
assert stacking_classifier.copy
stacking_classifier.set_params(drop_first=False)
assert not stacking_classifier.drop_first
stacking_classifier.set_params(random_state=2)
assert stacking_classifier.random_state == 2
stacking_classifier.set_params(verbose=False)
assert not stacking_classifier.verbose
with pytest.warns(UserWarning) as record:
stacking_classifier.set_params(wrong_parameters=None)
assert len(record) == 1
def test_fit_transform_stacking_classifier():
"""Test fit_transform method of StackingClassifier class."""
df_train = pd.read_csv("data_for_tests/clean_train.csv")
y_train = pd.read_csv("data_for_tests/clean_target.csv", squeeze=True)
stacking_classifier = StackingClassifier()
with pytest.raises(ValueError):
stacking_classifier.fit_transform(None, y_train)
with pytest.raises(ValueError):
stacking_classifier.fit_transform(df_train, None)
stacking_classifier.fit_transform(df_train, y_train)
assert stacking_classifier._StackingClassifier__fittransformOK
def test_transform_stacking_classifier():
"""Test transform method of StackingClassifier class."""
df_train = pd.read_csv("data_for_tests/clean_train.csv")
y_train = pd.read_csv("data_for_tests/clean_target.csv", squeeze=True)
df_test = pd.read_csv("data_for_tests/clean_test.csv")
stacking_classifier = StackingClassifier()
with pytest.raises(ValueError):
stacking_classifier.transform(None)
with pytest.raises(ValueError):
stacking_classifier.transform(df_test)
stacking_classifier.fit_transform(df_train, y_train)
results = stacking_classifier.transform(df_test)
assert len(results.columns == 3)
def test_fit_stacking_classifier():
"""Test fit method of StackingClassifier class."""
df_train = pd.read_csv("data_for_tests/clean_train.csv")
y_train = pd.read_csv("data_for_tests/clean_target.csv", squeeze=True)
stacking_classifier = StackingClassifier(verbose=True)
stacking_classifier.fit(df_train, y_train)
assert stacking_classifier._StackingClassifier__fitOK
def test_predict_proba_stacking_classifier():
"""Test predict_proba method of StackingClassifier class."""
df_train = pd.read_csv("data_for_tests/clean_train.csv")
y_train = pd.read_csv("data_for_tests/clean_target.csv", squeeze=True)
df_test = pd.read_csv("data_for_tests/clean_test.csv")
stacking_classifier = StackingClassifier()
with pytest.raises(ValueError):
stacking_classifier.predict_proba(df_test)
stacking_classifier.fit(df_train, y_train)
results = stacking_classifier.predict_proba(df_test)
assert np.shape(results) == (418, 2)
def test_predict_stacking_classifier():
"""Test predict method of StackingClassifier class."""
df_train = pd.read_csv("data_for_tests/clean_train.csv")
y_train = pd.read_csv("data_for_tests/clean_target.csv", squeeze=True)
df_test = pd.read_csv("data_for_tests/clean_test.csv")
stacking_classifier = StackingClassifier()
with pytest.raises(ValueError):
stacking_classifier.predict(df_test)
stacking_classifier.fit(df_train, y_train)
results = stacking_classifier.predict(df_test)
assert np.shape(results) == (418,)
| 2,069 |
14,668 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/gwp_asan/client/sampling_malloc_shims.h"
#include <algorithm>
#include <utility>
#include "base/allocator/allocator_shim.h"
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"
#include "base/rand_util.h"
#include "build/build_config.h"
#include "components/crash/core/common/crash_key.h"
#include "components/gwp_asan/client/export.h"
#include "components/gwp_asan/client/guarded_page_allocator.h"
#include "components/gwp_asan/client/sampling_state.h"
#include "components/gwp_asan/common/crash_key_name.h"
#if defined(OS_APPLE)
#include <pthread.h>
#endif
namespace gwp_asan {
namespace internal {
namespace {
using base::allocator::AllocatorDispatch;
// By being implemented as a global with inline method definitions, method calls
// and member acceses are inlined and as efficient as possible in the
// performance-sensitive allocation hot-path.
//
// Note that this optimization has not been benchmarked. However since it is
// easy to do there is no reason to pay the extra cost.
SamplingState<MALLOC> sampling_state;
// The global allocator singleton used by the shims. Implemented as a global
// pointer instead of a function-local static to avoid initialization checks
// for every access.
GuardedPageAllocator* gpa = nullptr;
void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
if (UNLIKELY(sampling_state.Sample()))
if (void* allocation = gpa->Allocate(size))
return allocation;
return self->next->alloc_function(self->next, size, context);
}
void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
if (UNLIKELY(sampling_state.Sample()))
if (void* allocation = gpa->Allocate(size))
return allocation;
return self->next->alloc_unchecked_function(self->next, size, context);
}
void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
if (UNLIKELY(sampling_state.Sample())) {
base::CheckedNumeric<size_t> checked_total = size;
checked_total *= n;
if (UNLIKELY(!checked_total.IsValid()))
return nullptr;
size_t total_size = checked_total.ValueOrDie();
if (void* allocation = gpa->Allocate(total_size)) {
memset(allocation, 0, total_size);
return allocation;
}
}
return self->next->alloc_zero_initialized_function(self->next, n, size,
context);
}
void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
if (UNLIKELY(sampling_state.Sample()))
if (void* allocation = gpa->Allocate(size, alignment))
return allocation;
return self->next->alloc_aligned_function(self->next, alignment, size,
context);
}
void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
if (UNLIKELY(!address))
return AllocFn(self, size, context);
if (LIKELY(!gpa->PointerIsMine(address)))
return self->next->realloc_function(self->next, address, size, context);
if (!size) {
gpa->Deallocate(address);
return nullptr;
}
void* new_alloc = gpa->Allocate(size);
if (!new_alloc)
new_alloc = self->next->alloc_function(self->next, size, context);
if (!new_alloc)
return nullptr;
memcpy(new_alloc, address, std::min(size, gpa->GetRequestedSize(address)));
gpa->Deallocate(address);
return new_alloc;
}
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
if (UNLIKELY(gpa->PointerIsMine(address)))
return gpa->Deallocate(address);
self->next->free_function(self->next, address, context);
}
size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
if (UNLIKELY(gpa->PointerIsMine(address)))
return gpa->GetRequestedSize(address);
return self->next->get_size_estimate_function(self->next, address, context);
}
unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
// The batch_malloc() routine is esoteric and only accessible for the system
// allocator's zone, GWP-ASan interception is not provided.
return self->next->batch_malloc_function(self->next, size, results,
num_requested, context);
}
void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// A batch_free() hook is implemented because it is imperative that we never
// call free() with a GWP-ASan allocation.
for (size_t i = 0; i < num_to_be_freed; i++) {
if (UNLIKELY(gpa->PointerIsMine(to_be_freed[i]))) {
// If this batch includes guarded allocations, call free() on all of the
// individual allocations to ensure the guarded allocations are handled
// correctly.
for (size_t j = 0; j < num_to_be_freed; j++)
FreeFn(self, to_be_freed[j], context);
return;
}
}
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
if (UNLIKELY(gpa->PointerIsMine(address))) {
// TODO(vtsyrklevich): Perform this check in GuardedPageAllocator and report
// failed checks using the same pipeline.
CHECK_EQ(size, gpa->GetRequestedSize(address));
gpa->Deallocate(address);
return;
}
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
if (UNLIKELY(sampling_state.Sample()))
if (void* allocation = gpa->Allocate(size, alignment))
return allocation;
return self->next->aligned_malloc_function(self->next, size, alignment,
context);
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
if (UNLIKELY(!address))
return AlignedMallocFn(self, size, alignment, context);
if (LIKELY(!gpa->PointerIsMine(address)))
return self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (!size) {
gpa->Deallocate(address);
return nullptr;
}
void* new_alloc = gpa->Allocate(size, alignment);
if (!new_alloc)
new_alloc = self->next->aligned_malloc_function(self->next, size, alignment,
context);
if (!new_alloc)
return nullptr;
memcpy(new_alloc, address, std::min(size, gpa->GetRequestedSize(address)));
gpa->Deallocate(address);
return new_alloc;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
if (UNLIKELY(gpa->PointerIsMine(address)))
return gpa->Deallocate(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {
&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr /* next */
};
} // namespace
// We expose the allocator singleton for unit tests.
GWP_ASAN_EXPORT GuardedPageAllocator& GetMallocGpaForTesting() {
return *gpa;
}
void InstallMallocHooks(size_t max_allocated_pages,
size_t num_metadata,
size_t total_pages,
size_t sampling_frequency,
GuardedPageAllocator::OutOfMemoryCallback callback) {
static crash_reporter::CrashKeyString<24> malloc_crash_key(kMallocCrashKey);
gpa = new GuardedPageAllocator();
gpa->Init(max_allocated_pages, num_metadata, total_pages, std::move(callback),
false);
malloc_crash_key.Set(gpa->GetCrashKey());
sampling_state.Init(sampling_frequency);
base::allocator::InsertAllocatorDispatch(&g_allocator_dispatch);
}
} // namespace internal
bool IsGwpAsanMallocAllocation(const void* ptr) {
return internal::gpa && internal::gpa->PointerIsMine(ptr);
}
} // namespace gwp_asan
| 4,080 |
1,343 | <filename>core/include/ThreadPool.hpp
#ifndef ANIME4KCPP_CORE_THREADPOOL_HPP
#define ANIME4KCPP_CORE_THREADPOOL_HPP
#include<algorithm>
#include<thread>
#include<mutex>
#include<condition_variable>
#include<functional>
#include<future>
#include<memory>
#include<queue>
#include<vector>
#include<cstddef>
namespace Anime4KCPP::Utils
{
class ThreadPool;
}
class Anime4KCPP::Utils::ThreadPool
{
public:
explicit ThreadPool(std::size_t maxThreadCount);
~ThreadPool();
template<typename F>
void exec(F&& f);
template<typename F, typename... Args>
auto exec(F&& f, Args&&... args);
private:
std::vector<std::thread> threads;
std::queue<std::function<void()>> tasks;
std::condition_variable cnd;
std::mutex mtx;
bool stop;
};
inline Anime4KCPP::Utils::ThreadPool::ThreadPool(std::size_t maxThreadCount)
:stop(false)
{
threads.reserve(maxThreadCount);
for (int i = 0; i < maxThreadCount; i++)
threads.emplace_back([this]()
{
for (;;)
{
std::unique_lock<std::mutex> lock(mtx);
cnd.wait(lock, [this]
{
return stop || !tasks.empty();
});
if (stop && tasks.empty())
return;
auto task = std::move(tasks.front());
tasks.pop();
lock.unlock();
task();
}
});
}
inline Anime4KCPP::Utils::ThreadPool::~ThreadPool()
{
{
const std::lock_guard<std::mutex> lock(mtx);
stop = true;
}
cnd.notify_all();
std::for_each(threads.begin(), threads.end(), std::mem_fn(&std::thread::join));
}
template<typename F>
inline void Anime4KCPP::Utils::ThreadPool::exec(F&& f)
{
{
const std::lock_guard<std::mutex> lock(mtx);
tasks.emplace(std::forward<F>(f));
}
cnd.notify_one();
}
template<typename F, typename ...Args>
inline auto Anime4KCPP::Utils::ThreadPool::exec(F&& f, Args && ...args)
{
auto task = std::make_shared<std::packaged_task<decltype(std::declval<F>()(std::declval<Args>()...))()>>(
std::bind(std::forward<F>(f), std::forward<Args>(args)...));
auto ret = task->get_future();
{
const std::lock_guard<std::mutex> lock(mtx);
tasks.emplace([task]() { (*task)(); });
}
cnd.notify_one();
return ret;
}
#endif // !ANIME4KCPP_CORE_THREADPOOL_HPP
| 1,240 |
11,094 | <reponame>microsoft/ai-edu
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
from HelperClass.NeuralNet_1_2 import *
file_name = "../../data/ch07.npz"
def inference(net, reader):
xt_raw = np.array([5,1,7,6,5,6,2,7]).reshape(4,2)
xt = reader.NormalizePredicateData(xt_raw)
output = net.inference(xt)
r = np.argmax(output, axis=1)+1
print("output=", output)
print("r=", r)
# 主程序
if __name__ == '__main__':
num_category = 3
reader = DataReader_1_3(file_name)
reader.ReadData()
reader.NormalizeX()
reader.ToOneHot(num_category, base=1)
num_input = 2
params = HyperParameters_1_1(num_input, num_category, eta=0.1, max_epoch=100, batch_size=10, eps=1e-3, net_type=NetType.MultipleClassifier)
net = NeuralNet_1_2(params)
net.train(reader, checkpoint=1)
inference(net, reader)
| 394 |
1,232 | <filename>tree/Yu/543.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# ****************
# Descrption:
# 543. Diameter of Binary Tree
# Given a binary tree, you need to compute the length of the diameter of the tree.
# The diameter of a binary tree is the length of the longest path between any two
# nodes in a tree. This path may or may not pass through the root.
# ****************
# 思路:
# 这题最重要是写一个global sum用来比对当前最大的长度,和之前保存的最大的长度
# 因为可能subtree某个长度会高于从外围root的长度
# 本人的中文视频讲解:
# https://www.youtube.com/watch?v=0VnOfu2pYTo
# ****************
# Final Solution *
# ****************
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
#Global MAX
self.max = 0
#返回目前节点的最大值,以及对全球变量的返回值进行比对,更改
def currentMax(root):
#Edge:
if not root:
return 0
# Divide and Conquer
# 左右节点返回一个当前为止的最大值
left = currentMax(root.left)
right = currentMax(root.right)
self.max = max(self.max, left + right)
# 像上层返回左右两边取舍的最大值
return 1 + max(left, right)
currentMax(root)
return self.max
| 751 |
335 | <gh_stars>100-1000
{
"word": "Knuckleballer",
"definitions": [
"A pitcher whose pitching style largely involves throwing knuckleballs."
],
"parts-of-speech": "Noun"
} | 75 |
924 | <reponame>chrisw957/lora_gateway
/*
/ _____) _ | |
( (____ _____ ____ _| |_ _____ ____| |__
\____ \| ___ | (_ _) ___ |/ ___) _ \
_____) ) ____| | | || |_| ____( (___| | | |
(______/|_____)_|_|_| \__)_____)\____)_| |_|
(C)2013 Semtech
Description: SX1272 LoRa modem registers and bits definitions
License: Revised BSD License, see LICENSE.TXT file include in the project
Maintainer: <NAME>
*/
#ifndef _LORAGW_SX1272_REGS_LORA_H
#define _LORAGW_SX1272_REGS_LORA_H
/*!
* ============================================================================
* SX1272 Internal registers Address
* ============================================================================
*/
#define SX1272_REG_LR_FIFO 0x00
// Common settings
#define SX1272_REG_LR_OPMODE 0x01
#define SX1272_REG_LR_FRFMSB 0x06
#define SX1272_REG_LR_FRFMID 0x07
#define SX1272_REG_LR_FRFLSB 0x08
// Tx settings
#define SX1272_REG_LR_PACONFIG 0x09
#define SX1272_REG_LR_PARAMP 0x0A
#define SX1272_REG_LR_OCP 0x0B
// Rx settings
#define SX1272_REG_LR_LNA 0x0C
// LoRa registers
#define SX1272_REG_LR_FIFOADDRPTR 0x0D
#define SX1272_REG_LR_FIFOTXBASEADDR 0x0E
#define SX1272_REG_LR_FIFORXBASEADDR 0x0F
#define SX1272_REG_LR_FIFORXCURRENTADDR 0x10
#define SX1272_REG_LR_IRQFLAGSMASK 0x11
#define SX1272_REG_LR_IRQFLAGS 0x12
#define SX1272_REG_LR_RXNBBYTES 0x13
#define SX1272_REG_LR_RXHEADERCNTVALUEMSB 0x14
#define SX1272_REG_LR_RXHEADERCNTVALUELSB 0x15
#define SX1272_REG_LR_RXPACKETCNTVALUEMSB 0x16
#define SX1272_REG_LR_RXPACKETCNTVALUELSB 0x17
#define SX1272_REG_LR_MODEMSTAT 0x18
#define SX1272_REG_LR_PKTSNRVALUE 0x19
#define SX1272_REG_LR_PKTRSSIVALUE 0x1A
#define SX1272_REG_LR_RSSIVALUE 0x1B
#define SX1272_REG_LR_HOPCHANNEL 0x1C
#define SX1272_REG_LR_MODEMCONFIG1 0x1D
#define SX1272_REG_LR_MODEMCONFIG2 0x1E
#define SX1272_REG_LR_SYMBTIMEOUTLSB 0x1F
#define SX1272_REG_LR_PREAMBLEMSB 0x20
#define SX1272_REG_LR_PREAMBLELSB 0x21
#define SX1272_REG_LR_PAYLOADLENGTH 0x22
#define SX1272_REG_LR_PAYLOADMAXLENGTH 0x23
#define SX1272_REG_LR_HOPPERIOD 0x24
#define SX1272_REG_LR_FIFORXBYTEADDR 0x25
#define SX1272_REG_LR_FEIMSB 0x28
#define SX1272_REG_LR_FEIMID 0x29
#define SX1272_REG_LR_FEILSB 0x2A
#define SX1272_REG_LR_RSSIWIDEBAND 0x2C
#define SX1272_REG_LR_DETECTOPTIMIZE 0x31
#define SX1272_REG_LR_INVERTIQ 0x33
#define SX1272_REG_LR_DETECTIONTHRESHOLD 0x37
#define SX1272_REG_LR_SYNCWORD 0x39
#define SX1272_REG_LR_INVERTIQ2 0x3B
// end of documented register in datasheet
// I/O settings
#define SX1272_REG_LR_DIOMAPPING1 0x40
#define SX1272_REG_LR_DIOMAPPING2 0x41
// Version
#define SX1272_REG_LR_VERSION 0x42
// Additional settings
#define SX1272_REG_LR_AGCREF 0x43
#define SX1272_REG_LR_AGCTHRESH1 0x44
#define SX1272_REG_LR_AGCTHRESH2 0x45
#define SX1272_REG_LR_AGCTHRESH3 0x46
#define SX1272_REG_LR_PLLHOP 0x4B
#define SX1272_REG_LR_TCXO 0x58
#define SX1272_REG_LR_PADAC 0x5A
#define SX1272_REG_LR_PLL 0x5C
#define SX1272_REG_LR_PLLLOWPN 0x5E
#define SX1272_REG_LR_FORMERTEMP 0x6C
#endif // _LORAGW_SX1272_REGS_LORA_H
| 2,868 |
4,585 | package com.beardedhen.androidbootstrap.api.attributes;
import android.content.Context;
import java.io.Serializable;
/**
* A Heading defines the text size and padding of its view. Bootstrap supports styles for H1-H6
* elements out of the box.
*/
public interface BootstrapHeading extends Serializable {
String KEY = "com.beardedhen.androidbootstrap.api.attributes.BootstrapHeading";
/**
* Retrieves the text size for the current BootstrapHeading.
*
* @param context the current context
* @return the text size
*/
float getTextSize(Context context);
/**
* Retrieves the vertical padding for the current BootstrapHeading
*
* @param context the current context
* @return the vertical padding
*/
float verticalPadding(Context context);
/**
* Retrieves the horizontal padding for the current BootstrapHeading
*
* @param context the current context
* @return the horizontal padding
*/
float horizontalPadding(Context context);
}
| 330 |
344 | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
// Do not edit this file. It is machine generated.
{
"less.builtin.abs": "valor absoluto de un número",
"less.builtin.acos": "arcocoseno: inversa de la función de coseno",
"less.builtin.alpha": "devuelve el canal \"alfa\" de \"@color\"",
"less.builtin.argb": "crea un #AARRGGBB",
"less.builtin.asin": "arcoseno: inversa de la función de seno",
"less.builtin.atan": "arcotangente: inversa de la función tangente",
"less.builtin.blue": "devuelve el canal \"azul\" de \"@color\"",
"less.builtin.ceil": "redondea a un entero",
"less.builtin.color": "analiza una cadena en un color",
"less.builtin.contrast": "devolver \"@darkcolor\" si \"@color1 es> 43 % luma\"; de lo contrario, devolver \"@lightcolor\", ver notas",
"less.builtin.convert": "convierte números de un tipo a otro",
"less.builtin.cos": "función de coseno",
"less.builtin.darken": "devolver \"@color\" 10 % puntos más oscuro",
"less.builtin.data-uri": "inserta un recurso y recurre a \"url()\"",
"less.builtin.desaturate": "devolver \"@color\" 10 % puntos menos saturado",
"less.builtin.e": "contenido de la cadena de escape",
"less.builtin.escape": "La URL codifica una cadena",
"less.builtin.extract": "devuelve un valor en la posición especificada en la lista",
"less.builtin.fade": "devolver \"@color\" con 50 % de transparencia",
"less.builtin.fadein": "devolver \"@color\" 10 % puntos menos transparente",
"less.builtin.fadeout": "devolver \"@color\" 10 % puntos más transparente",
"less.builtin.floor": "redondea a un entero",
"less.builtin.green": "devuelve el canal \"verde\" de \"@color\"",
"less.builtin.greyscale": "devuelve un color gris 100 % sin saturación",
"less.builtin.hsl": "crea un color",
"less.builtin.hsla": "crea un color",
"less.builtin.hsv": "crea un color",
"less.builtin.hsva": "crea un color",
"less.builtin.hsvhue": "devuelve el canal \"matiz\" de \"@color\" en el espacio de HSV",
"less.builtin.hsvsaturation": "devuelve el canal \"saturación\" de \"@color\" en el espacio de HSV",
"less.builtin.hsvvalue": "devuelve el canal \"valor\" de \"@color\" en el espacio de HSV",
"less.builtin.hue": "devuelve el canal \"matiz\" de \"@color\" en el espacio de HSL",
"less.builtin.length": "devuelve el número de elementos de una lista de valores",
"less.builtin.lighten": "devolver \"@color\" 10 % puntos más claro",
"less.builtin.lightness": "devuelve el canal \"claridad\" de \"@color\" en el espacio de HSL",
"less.builtin.luma": "devuelve el valor \"luma\" (claridad perceptual) de \"@color\"",
"less.builtin.max": "devuelve el valor inferior de uno o varios valores",
"less.builtin.min": "devuelve el valor inferior de uno o varios valores",
"less.builtin.mix": "devolver una combinación de \"@color1\" y \"@color2\"",
"less.builtin.mod": "primer argumento, módulo, segundo argumento",
"less.builtin.percentage": "convierte a un porcentaje; p. ej. 0,5 > 50 %",
"less.builtin.pi": "devuelve pi",
"less.builtin.pow": "primer argumento elevado a la potencia del segundo",
"less.builtin.red": "devuelve el canal \"rojo\" de \"@color\"",
"less.builtin.replace": "sustitución de cadenas",
"less.builtin.round": "redondea un número a un número de ubicaciones",
"less.builtin.saturate": "devolver \"@color\" 10 % puntos más saturado",
"less.builtin.saturation": "devuelve el canal \"saturación\" de \"@color\" en el espacio de HSL",
"less.builtin.sin": "función de seno",
"less.builtin.spin": "devolver \"@color\" con 10 grados más en el matiz",
"less.builtin.sqrt": "calcula la raíz cuadrada de un número",
"less.builtin.tan": "función de tangente",
"less.builtin.unit": "quitar o cambiar la unidad de una dimensión"
} | 1,489 |
677 | <gh_stars>100-1000
/*
* Copyright (c) 2020 Bitdefender
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _WINUM_CACHE_H_
#define _WINUM_CACHE_H_
#include "introcrt.h"
typedef struct _WIN_PROCESS_MODULE WIN_PROCESS_MODULE;
/// We can have up to this many exports pointing to the same RVA.
#define MAX_OFFSETS_PER_NAME 10
///
/// Describes a cached exported RVA (Relative Virtual Address).
///
typedef struct _WINUM_CACHE_EXPORT
{
RBNODE RbNode; ///< RB tree node entry.
DWORD Rva; ///< The RVA of this export.
DWORD NameHashes[MAX_OFFSETS_PER_NAME]; ///< Hashes of the names pointing to this RVA.
DWORD NameLens[MAX_OFFSETS_PER_NAME]; ///< Length of each name pointing to this RVA.
DWORD NameOffsets[MAX_OFFSETS_PER_NAME]; ///< Name RVAs pointing to this exported RVA.
DWORD NumberOfOffsets; ///< Number of symbols pointing to the exported RVA.
/// @brief The names pointing to this RVA. Each name will point inside
/// the Names structure inside #WINUM_CACHE_EXPORTS.
PCHAR Names[MAX_OFFSETS_PER_NAME];
} WINUM_CACHE_EXPORT, *PWINUM_CACHE_EXPORT;
///
/// This structure describes the exported memory related functions.
///
typedef struct _WINUM_CACHE_MEMORY_FUNCS
{
union
{
struct
{
DWORD MemcpyRva; ///< RVA of the memcpy function.
DWORD MemcpySRva; ///< RVA of the memcpys function.
DWORD MemmoveRva; ///< RVA of the memmove function.
DWORD MemmoveSRva; ///< RVA of the memmoves function.
DWORD MemsetRva; ///< RVA of the memset function.
};
DWORD FuncArray[5]; ///< Array aliasing the above exported memory functions.
};
} WINUM_CACHE_MEMORY_FUNCS, *PWINUM_CACHE_MEMORY_FUNCS;
///
/// Describes an exports cache.
///
typedef struct _WINUM_CACHE_EXPORTS
{
RBTREE Tree; ///< The RB tree containing all the exports (#WINUM_CACHE_EXPORT entries).
WINUM_CACHE_EXPORT *Array; ///< The array of #WINUM_CACHE_EXPORT entries.
/// @brief A pointer to a contiguous memory area containing all the exported names.
PCHAR Names;
DWORD StartNames; ///< First RVA pointing to the exported names.
DWORD EndNames; ///< Last RVA pointing to the exported names.
} WINUM_CACHE_EXPORTS, *PWINUM_CACHE_EXPORTS;
///
/// Describes one module cache.
///
typedef struct _WINUM_MODULE_CACHE
{
LIST_ENTRY Link; ///< Link inside the global list of module caches.
DWORD ModuleNameHash; ///< The hash on the name of the cached module.
struct
{
DWORD EatRva; ///< RVA of the exports table.
DWORD EatSize; ///< Size of the exports table.
DWORD IatRva; ///< RVA of the imports table.
DWORD IatSize; ///< Size of the imports table.
DWORD TimeDateStamp; ///< Module time & date stamp.
DWORD SizeOfImage; ///< Size of image.
} Info;
WINUM_CACHE_EXPORTS Exports; ///< The exports cache.
WINUM_CACHE_MEMORY_FUNCS MemFuncs; ///< Memory related functions RVAs.
BYTE *Headers; ///< A buffer containing the MZ/PE headers of this module.
BOOLEAN Wow64; ///< True if this module is Wow64.
BOOLEAN ExportDirRead; ///< True if the exports directory has been read.
BOOLEAN MemoryFuncsRead;///< True if the memory functions have been identified.
/// @brief True if this caches was created for a module loaded by a statically detected process. Dirty caches
/// are NOT reused by other loaded modules, and they will be destroyed when the module is unloaded.
BOOLEAN Dirty;
} WINUM_MODULE_CACHE, *PWINUM_MODULE_CACHE;
/// @brief We will not cache more than this many exports.
#define WINUMCACHE_MAX_EXPORTS 10000u
//
// API
//
INTSTATUS
IntWinUmModCacheSetHeaders(
_In_ WIN_PROCESS_MODULE *Module,
_In_reads_bytes_(4096) BYTE *Headers
);
void
IntWinUmModCacheGet(
_In_ WIN_PROCESS_MODULE *Module
);
void
IntWinUmModCacheRelease(
_In_ WINUM_MODULE_CACHE *Cache
);
void
IntWinUmCacheUninit(
void
);
WINUM_CACHE_EXPORT *
IntWinUmModCacheExportFind(
_In_ WIN_PROCESS_MODULE *Module,
_In_ DWORD Rva,
_In_ DWORD ErrorRange
);
BOOLEAN
IntWinUmCacheIsExportDirRead(
_In_ WIN_PROCESS_MODULE *Module
);
WINUM_CACHE_EXPORT *
IntWinUmCacheGetExportFromRange(
_In_ WIN_PROCESS_MODULE *Module,
_In_ QWORD Gva,
_In_ DWORD Length
);
#endif // _WINUM_CACHE_H_
| 2,364 |
9,724 | <filename>tests/webgl_unmasked_vendor_webgl.c
#include <stdio.h>
#include <string.h>
#include <emscripten/html5.h>
#include <GLES2/gl2.h>
#include <webgl/webgl1_ext.h>
#include <assert.h>
int main()
{
EmscriptenWebGLContextAttributes attr;
emscripten_webgl_init_context_attributes(&attr);
attr.enableExtensionsByDefault = 0;
EMSCRIPTEN_WEBGL_CONTEXT_HANDLE ctx = emscripten_webgl_create_context("#canvas", &attr);
emscripten_webgl_make_context_current(ctx);
assert(!glGetError());
// This should gracefully return null and record a GL error.
const char *str = (const char *)glGetString(GL_UNMASKED_VENDOR_WEBGL);
printf("%s\n", str);
assert(glGetError());
assert(!glGetError()); // One error is enough
EM_BOOL success = emscripten_webgl_enable_extension(ctx, "WEBGL_debug_renderer_info");
if (!success)
{
// Browser does not have WEBGL_debug_renderer_info, skip remainder and return success.
return 0;
}
assert(!glGetError());
str = (const char *)glGetString(GL_UNMASKED_VENDOR_WEBGL);
printf("%s\n", str);
assert(strlen(str) > 3); // Should get something (dependent on hardware)
assert(!glGetError());
return 0;
}
| 442 |
2,313 | <reponame>zhuangbility111/ComputeLibrary
/*
* Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/AccessWindowStatic.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/core/utils/helpers/float_ops.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
namespace arm_compute
{
namespace opencl
{
namespace kernels
{
namespace
{
using ElementsProcessed = Steps;
inline Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float beta,
bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
ARM_COMPUTE_RETURN_ERROR_ON_MSG((fp_mixed_precision && (src0->data_type() != DataType::F16)), "Mixed precision floating point is supported only for F16 data");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the matrix B must be <= 3");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 2 && reshape_info.reinterpret_input_as_3d(), "The src1 tensor cannot have more than 2 dimensions if src0 has to be reinterpreted as 3D");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((reshape_info.reinterpret_input_as_3d() || reshape_info.depth_output_gemm3d() != 0) && (src2 != nullptr)
&& (!reshape_info.broadcast_bias()),
"Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D");
if(!is_interleaved_transposed)
{
ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != src1->dimension(1));
if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
{
const unsigned int m = reshape_info.reinterpret_input_as_3d() ? src0->dimension(1) * src0->dimension(2) : src0->dimension(1);
const unsigned int n = src1->dimension(0);
const unsigned int src2_dim0 = src2->dimension(0);
const unsigned int src2_dim1 = src2->dimension(1);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
if(reshape_info.broadcast_bias())
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
}
}
}
else
{
GEMMRHSMatrixInfo rhs_info;
GEMMLHSMatrixInfo lhs_info;
const auto m = static_cast<unsigned int>(reshape_info.m());
const auto n = static_cast<unsigned int>(reshape_info.n());
const int k = reshape_info.k();
const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
rhs_info.n0 = max_cl_vector_width / src1->element_size();
rhs_info.k0 = 1;
rhs_info.h0 = mult_transpose1xW_width;
rhs_info.interleave = false;
rhs_info.transpose = false;
lhs_info.m0 = 4;
lhs_info.k0 = 4;
lhs_info.v0 = mult_interleave4x4_height;
lhs_info.interleave = true;
lhs_info.transpose = true;
TensorShape tensor_shape0{ src0->tensor_shape() };
tensor_shape0.set(0, k);
tensor_shape0.set(1, m);
TensorShape tensor_shape1{ src1->tensor_shape() };
tensor_shape1.set(0, n);
tensor_shape1.set(1, k);
const TensorInfo tensor_info0 = src0->clone()->set_tensor_shape(tensor_shape0);
const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1);
const TensorInfo tensor_info_reshaped0 = src0->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(tensor_info0, lhs_info));
const TensorInfo tensor_info_reshaped1 = src1->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(tensor_info1, rhs_info));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1);
if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
{
const unsigned int src2_dim0 = src2->dimension(0);
const unsigned int src2_dim1 = src2->dimension(1);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
if(reshape_info.broadcast_bias())
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
}
}
}
if(dst->total_size() != 0)
{
const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
}
return Status{};
}
inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target,
ElementsProcessed &num_elements_processed)
{
ARM_COMPUTE_UNUSED(beta);
bool window_changed = false;
Window win{};
Window win_out{};
const DataType data_type = src0->data_type();
unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
bool reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
// In case both input and dst have to be reinterpreted as 3D tensors,
// force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
if(reinterpret_input_as_3d == reinterpret_output_as_3d)
{
reinterpret_input_as_3d = false;
reinterpret_output_as_3d = false;
}
// dst tensor auto inizialitation if not yet initialized
auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info)));
TensorInfo tmp_info(*dst);
if(reinterpret_output_as_3d)
{
// Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
// the window needs to be constructed on the 2D collapsed version of the tensor
TensorShape tmp_shape(dst->tensor_shape());
tmp_shape.collapse(2U, 1U);
tmp_info.set_tensor_shape(tmp_shape);
}
if(is_interleaved_transposed)
{
// reinterpret_input_as_3d is not supported if is_interleaved_transposed is set
ARM_COMPUTE_ERROR_ON(reshape_info.reinterpret_input_as_3d());
// Configure kernel window
num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
num_elems_processed_per_iteration_y = 4;
win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
if(src2 != nullptr)
{
const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
const int bias_processed_per_iteration_y = reshape_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y;
AccessWindowStatic src2_access(src2, 0, 0,
ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
ceil_to_multiple(src2->dimension(1), bias_processed_per_iteration_y));
window_changed = update_window_and_padding(win, src2_access); // window used by the execute_window_loop
}
}
else // The input tensors have not been reshaped
{
// Special case for 1xN, 2xN, 3xN and 4xN src0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
num_elems_processed_per_iteration_y = std::min(static_cast<int>(dst->dimension(1)), 4);
// Create kernels according to the architecture, data type and input size.
GPUTarget arch_target = get_arch_from_target(gpu_target);
if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
{
num_elems_processed_per_iteration_x = (src1->dimension(0) <= 1000 && src0->num_dimensions() == 1) ? 2 : 4;
}
// Configure window
win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
AccessWindowStatic src0_access(src0, 0, 0, src0->dimension(0), src0->dimension(1));
AccessWindowStatic src1_access(src1, 0, 0, ceil_to_multiple(src1->dimension(0), num_elems_processed_per_iteration_x), src1->dimension(1));
AccessWindowStatic dst_access(dst, 0, 0,
dst->dimension(0),
dst->dimension(1));
if(src2 != nullptr)
{
const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
AccessWindowStatic src2_access(src2, 0, 0,
ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
src2->dimension(1));
window_changed = update_window_and_padding(win, src0_access, src1_access, src2_access) || // window used by the execute_window_loop
update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
}
else
{
window_changed = update_window_and_padding(win, src0_access, src1_access) || // window used by the execute_window_loop
update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
}
}
// Collapse along the Z direction
// This collapse needs to be here in order to tune the Z dimension of LWS
Window collapsed = win;
const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
collapsed = win.collapse(win, dimension_to_collapse);
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, collapsed);
}
} // namespace
ClGemmMatrixMultiplyKernel::ClGemmMatrixMultiplyKernel()
{
_type = CLKernelType::GEMM;
}
void ClGemmMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha,
float beta,
bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
// Perform validate step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, beta,
is_interleaved_transposed, reshape_info, fp_mixed_precision));
auto padding_info = is_interleaved_transposed ? get_padding_info({ src0, src1, dst }) : get_padding_info({ src0, dst });
_reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
_reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
_add_bias = src2 != nullptr;
// In case both input and dst have to be reinterpreted as 3D tensors,
// force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
{
_reinterpret_input_as_3d = false;
_reinterpret_output_as_3d = false;
}
// Check if we need to slide the matrix B
const unsigned int num_dimensions_src0 = _reinterpret_input_as_3d ? src0->num_dimensions() - 1 : src0->num_dimensions();
_slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0);
const DataType data_type = src0->data_type();
// Get target architecture
GPUTarget gpu_target = get_target();
ElementsProcessed num_elements_processed{};
// Configure kernel window
auto win_config = validate_and_configure_window(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info,
gpu_target, num_elements_processed);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
// If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true, both will be turned off (false)
// in which case we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
// This means that the actual m used by the kernel is given by dst->dimension(1)
const unsigned int internal_m = _reinterpret_output_as_3d ? dst->dimension(1) * dst->dimension(2) : dst->dimension(1);
const unsigned int n = dst->dimension(0);
const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(1) : src0->dimension(1);
const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(2) : src0->dimension(2);
const unsigned int m0 = num_elements_processed.y();
const unsigned int n0 = num_elements_processed.x();
// Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
const unsigned int partial_store_m0 = internal_m % m0;
const unsigned int partial_store_n0 = n % n0;
// Create build options
CLBuildOptions build_opts;
build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
build_opts.add_option_if(reshape_info.broadcast_bias(), "-DBROADCAST_BIAS");
build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
build_opts.add_option_if(activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(activation_info.activation())));
build_opts.add_option_if(activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(activation_info.a()));
build_opts.add_option_if(activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(activation_info.b()));
build_opts.add_option("-DIN1_DIM_X=" + support::cpp11::to_string(src1->dimension(0)));
const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST;
std::string kernel_name;
if(is_interleaved_transposed)
{
const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
build_opts.add_option("-DN=" + support::cpp11::to_string(n));
build_opts.add_option("-DK=" + support::cpp11::to_string(src1->dimension(0) / (n0 * mult_transpose1xW_width)));
build_opts.add_option("-DH0=" + support::cpp11::to_string(mult_transpose1xW_width));
build_opts.add_option("-DV0=" + support::cpp11::to_string(mult_interleave4x4_height));
build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
if(is_data_type_float(data_type) && is_bifrost)
{
kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
}
else
{
kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
if(fp_mixed_precision && data_type == DataType::F16)
{
// currently wider accumulator is only supported for fp16 kernels.
kernel_name += "_acc32";
}
}
}
else // The input tensors have not been reshaped
{
build_opts.add_option("-DN=" + support::cpp11::to_string(n));
build_opts.add_option("-DK=" + support::cpp11::to_string(src0->dimension(0)));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
// Create kernels according to the architecture, data type and input size.
if(is_data_type_float(data_type) && is_bifrost)
{
kernel_name = "gemm_mm_floating_point";
if(src0->num_dimensions() != 1)
{
kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
if(fp_mixed_precision && data_type == DataType::F16)
{
// currently wider accumulator is only supported for fp16 kernels.
kernel_name += "_acc32";
}
}
else if(src1->dimension(0) <= 1000 && data_type == DataType::F32)
{
// The first kernel is optimized for the case of 1000 or less dst elements (e.g. FC8 of AlexNet and VGG-16, and
// FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 dst elements (e.g.
// FC6 and FC7 of AlexNet and VGG-16).
kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000";
}
// The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
// via exhaustive autotuning over a range of representative layer configurations.
set_lws_hint(cl::NDRange(4));
}
else // (MIDGARD and F32) or (F16)
{
kernel_name = "gemm_mm_floating_point";
}
}
// Create kernel
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Set config_id for enabling LWS tuning
_config_id = "gemm_";
_config_id += (is_interleaved_transposed ? "reshaped_" : "");
_config_id += (_add_bias ? "add_bias_" : "");
_config_id += (reshape_info.broadcast_bias() ? "broadcast_bias_" : "");
_config_id += (fp_mixed_precision ? "fp_mixed_" : "");
_config_id += (_reinterpret_input_as_3d ? "3di_" : "");
_config_id += (_reinterpret_output_as_3d ? "3do_" : "");
_config_id += lower_string(string_from_data_type(src0->data_type()));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(1));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(0));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(2));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(3));
_config_id += "_";
_config_id += (is_interleaved_transposed ? support::cpp11::to_string(src1->dimension(0)) : support::cpp11::to_string(src1->dimension(1)));
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status ClGemmMatrixMultiplyKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
{
// Note: num_elements_processed will be set in validate_and_configure_window()
ElementsProcessed num_elements_processed{};
ARM_COMPUTE_UNUSED(alpha);
ARM_COMPUTE_UNUSED(activation_info);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(),
src1->clone().get(),
(src2 != nullptr) ? src2->clone().get() : nullptr,
dst->clone().get(),
beta,
is_interleaved_transposed,
reshape_info,
gpu_target,
num_elements_processed)
.first);
return Status{};
}
void ClGemmMatrixMultiplyKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr);
if(src1->info()->num_dimensions() < 3)
{
// The stride_z for matrix B must be zero if we do not slice
ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
}
Window slice = window.first_slice_window_3D();
Window slice_matrix_b = slice;
slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
const unsigned int num_arguments_bias = _add_bias ? num_arguments_per_2D_tensor() + 1 : 0;
if(_reinterpret_input_as_3d)
{
// Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + num_arguments_bias;
const unsigned int total_cross_plane_pad = src0->info()->padding().top + src0->info()->padding().bottom;
_kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
}
if(_reinterpret_output_as_3d)
{
// Pass bottom paddings to the kernel if the dst has to be reinterpreted as 3D tensor
const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + num_arguments_bias;
const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom;
_kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
}
do
{
Window slice_b = slice;
// Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
// This scenario can happen when the matrix multiplication is used to perform a convolution operation
if(!_slide_matrix_b)
{
slice_b = slice_matrix_b;
}
unsigned int idx = 0;
add_2D_tensor_argument(idx, src0, slice);
add_2D_tensor_argument(idx, src1, slice_b);
if(_add_bias)
{
add_2D_tensor_argument(idx, src2, slice);
}
add_2D_tensor_argument(idx, dst, slice);
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[2]));
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src1->info()->strides_in_bytes()[2]));
if(_add_bias)
{
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src2->info()->strides_in_bytes()[2]));
}
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[2]));
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice));
}
} // namespace kernels
} // namespace opencl
} // namespace arm_compute
| 13,071 |
348 | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2021.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: <NAME> $
// $Authors: <NAME> $
// --------------------------------------------------------------------------
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SpectrumAccessQuadMZTransforming.h>
namespace OpenMS
{
SpectrumAccessQuadMZTransforming::SpectrumAccessQuadMZTransforming(
OpenSwath::SpectrumAccessPtr sptr,
double a, double b, double c, bool ppm) :
SpectrumAccessTransforming(sptr),
a_(a),
b_(b),
c_(c),
ppm_(ppm)
{}
SpectrumAccessQuadMZTransforming::~SpectrumAccessQuadMZTransforming() {}
boost::shared_ptr<OpenSwath::ISpectrumAccess> SpectrumAccessQuadMZTransforming::lightClone() const
{
// Create a light clone of *this by initializing a new
// SpectrumAccessQuadMZTransforming with a light clone of the underlying
// SpectrumAccess object and the parameters.
return boost::shared_ptr<SpectrumAccessQuadMZTransforming>(
new SpectrumAccessQuadMZTransforming(sptr_->lightClone(), a_, b_, c_, ppm_));
}
OpenSwath::SpectrumPtr SpectrumAccessQuadMZTransforming::getSpectrumById(int id)
{
OpenSwath::SpectrumPtr s = sptr_->getSpectrumById(id);
for (size_t i = 0; i < s->getMZArray()->data.size(); i++)
{
// mz = a + b * mz + c * mz^2
double predict =
a_ +
b_ * s->getMZArray()->data[i] +
c_ * s->getMZArray()->data[i] * s->getMZArray()->data[i];
// If ppm is true, we predicted the ppm deviation, not the actual new mass
if (ppm_)
{
s->getMZArray()->data[i] = s->getMZArray()->data[i] - predict*s->getMZArray()->data[i]/1000000;
}
else
{
s->getMZArray()->data[i] = predict;
}
}
return s;
}
}
| 1,287 |
1,249 | #include <algorithm>
#include <cfloat>
#include <memory>
#include <vector>
#include <wayfire/plugin.hpp>
#include <wayfire/view.hpp>
#include <wayfire/view-access-interface.hpp>
#include <wayfire/signal-definitions.hpp>
#include <wayfire/view-transform.hpp>
#include <wayfire/parser/rule_parser.hpp>
#include <wayfire/lexer/lexer.hpp>
#include <wayfire/variant.hpp>
#include <wayfire/rule/lambda_rule.hpp>
#include <wayfire/rule/rule.hpp>
#include <wayfire/util/log.hpp>
#include "lambda-rules-registration.hpp"
#include "view-action-interface.hpp"
class wayfire_window_rules_t : public wf::plugin_interface_t
{
public:
void init() override;
void fini() override;
void apply(const std::string & signal, wf::signal_data_t *data);
private:
void setup_rules_from_config();
wf::lexer_t _lexer;
// Created rule handler.
wf::signal_connection_t _created = [=] (wf::signal_data_t *data)
{
apply("created", data);
};
// Maximized rule handler.
wf::signal_connection_t _maximized = [=] (wf::signal_data_t *data)
{
apply("maximized", data);
};
// Unaximized rule handler.
wf::signal_connection_t _unmaximized = [=] (wf::signal_data_t *data)
{
apply("unmaximized", data);
};
// Minimized rule handler.
wf::signal_connection_t _minimized = [=] (wf::signal_data_t *data)
{
apply("minimized", data);
};
// Fullscreened rule handler.
wf::signal_connection_t _fullscreened = [=] (wf::signal_data_t *data)
{
apply("fullscreened", data);
};
// Auto-reload on changes to config file
wf::signal_connection_t _reload_config = [=] (wf::signal_data_t*)
{
setup_rules_from_config();
};
std::vector<std::shared_ptr<wf::rule_t>> _rules;
wf::view_access_interface_t _access_interface;
wf::view_action_interface_t _action_interface;
nonstd::observer_ptr<wf::lambda_rules_registrations_t> _lambda_registrations;
};
void wayfire_window_rules_t::init()
{
// Get the lambda rules registrations.
_lambda_registrations = wf::lambda_rules_registrations_t::get_instance();
_lambda_registrations->window_rule_instances++;
setup_rules_from_config();
output->connect_signal("view-mapped", &_created);
output->connect_signal("view-tiled", &_maximized);
output->connect_signal("view-tiled", &_unmaximized);
output->connect_signal("view-minimized", &_minimized);
output->connect_signal("view-fullscreen", &_fullscreened);
wf::get_core().connect_signal("reload-config", &_reload_config);
}
void wayfire_window_rules_t::fini()
{
_lambda_registrations->window_rule_instances--;
if (_lambda_registrations->window_rule_instances == 0)
{
wf::get_core().erase_data<wf::lambda_rules_registrations_t>();
}
}
void wayfire_window_rules_t::apply(const std::string & signal,
wf::signal_data_t *data)
{
if (data == nullptr)
{
return;
}
auto view = get_signaled_view(data);
if (view == nullptr)
{
LOGE("View is null.");
return;
}
if ((signal == "maximized") && (view->tiled_edges != wf::TILED_EDGES_ALL))
{
return;
}
if ((signal == "unmaximized") && (view->tiled_edges == wf::TILED_EDGES_ALL))
{
return;
}
for (const auto & rule : _rules)
{
_access_interface.set_view(view);
_action_interface.set_view(view);
auto error = rule->apply(signal, _access_interface, _action_interface);
if (error)
{
LOGE("Window-rules: Error while executing rule on ", signal, " signal.");
}
}
auto bounds = _lambda_registrations->rules();
auto begin = std::get<0>(bounds);
auto end = std::get<1>(bounds);
while (begin != end)
{
auto registration = std::get<1>(*begin);
bool error = false;
// Assume we will use the view access interface.
_access_interface.set_view(view);
wf::access_interface_t & access_iface = _access_interface;
// If a custom access interface is set in the regoistration, use this one.
if (registration->access_interface != nullptr)
{
access_iface = *registration->access_interface;
}
// Load if lambda wrapper.
if (registration->if_lambda != nullptr)
{
registration->rule_instance->setIfLambda(
[registration, signal, view] () -> bool
{
return registration->if_lambda(signal, view);
});
}
// Load else lambda wrapper.
if (registration->else_lambda)
{
registration->rule_instance->setElseLambda(
[registration, signal, view] () -> bool
{
return registration->else_lambda(signal, view);
});
}
// Run the lambda rule.
error = registration->rule_instance->apply(signal, _access_interface);
// Unload wrappers.
registration->rule_instance->setIfLambda(nullptr);
registration->rule_instance->setElseLambda(nullptr);
if (error)
{
LOGE("Window-rules: Error while executing rule on signal: ", signal,
", rule text:", registration->rule);
}
++begin;
}
}
void wayfire_window_rules_t::setup_rules_from_config()
{
_rules.clear();
// Build rule list.
auto section = wf::get_core().config.get_section("window-rules");
for (auto opt : section->get_registered_options())
{
_lexer.reset(opt->get_value_str());
auto rule = wf::rule_parser_t().parse(_lexer);
if (rule != nullptr)
{
_rules.push_back(rule);
}
}
}
DECLARE_WAYFIRE_PLUGIN(wayfire_window_rules_t);
| 2,541 |
549 | import argparse
import os
import logging
import sys
import itertools
import torch
from torch.utils.data import DataLoader, ConcatDataset
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from vision.utils.misc import str2bool, Timer, freeze_net_layers, store_labels
from vision.ssd.ssd import MatchPrior
from vision.ssd.vgg_ssd import create_vgg_ssd
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite
from vision.datasets.voc_dataset import VOCDataset
from vision.datasets.open_images import OpenImagesDataset
from vision.nn.multibox_loss import MultiboxLoss
from vision.ssd.config import vgg_ssd_config
from vision.ssd.config import mobilenetv1_ssd_config
from vision.ssd.config import squeezenet_ssd_config
from vision.ssd.data_preprocessing import TrainAugmentation, TestTransform
class Detector():
def __init__(self, verbose=1):
self.system_dict = {};
self.system_dict["verbose"] = verbose;
self.system_dict["local"] = {};
self.system_dict["dataset"] = {};
self.system_dict["dataset"]["train"] = {};
self.system_dict["dataset"]["val"] = {};
self.system_dict["dataset"]["val"]["status"] = False;
self.system_dict["params"] = {};
self.set_base_params();
def set_base_params(self):
self.system_dict["params"]["dataset_type"] = "voc";
self.system_dict["params"]["balance_data"] = False;
self.system_dict["params"]["label_file"] = None;
self.system_dict["params"]["batch_size"] = 32;
self.system_dict["params"]["num_workers"] = 4;
self.system_dict["params"]["net"] = "mb1-ssd"; #mb1-ssd, mb2-ssd-lite, vgg16-ssd
self.system_dict["params"]["freeze_base_net"] = False;
self.system_dict["params"]["freeze_net"] = False;
self.system_dict["params"]["mb2_width_mult"] = 1.0;
self.system_dict["params"]["base_net"] = None;
self.system_dict["params"]["resume"] = None;
self.system_dict["params"]["pretrained_ssd"] = None;
self.system_dict["params"]["use_cuda"] = True;
self.system_dict["params"]["lr"] = 0.001;
self.system_dict["params"]["momentum"] = 0.09
self.system_dict["params"]["weight_decay"] = 0.0005;
self.system_dict["params"]["gamma"] = 0.1;
self.system_dict["params"]["base_net_lr"] = None;
self.system_dict["params"]["extra_layers_lr"] = None;
self.system_dict["params"]["scheduler"] = "multi-step"; #cosine
self.system_dict["params"]["milestones"] = "80,100";
self.system_dict["params"]["t_max"] = 120;
self.system_dict["params"]["checkpoint_folder"] = "models/"
self.system_dict["params"]["num_epochs"] = 120;
self.system_dict["params"]["validation_epochs"] = 5;
self.system_dict["params"]["debug_steps"] = 100;
def set_train_data_params(self, img_dir, label_dir, label_file, batch_size=2, balance_data=False, num_workers=4):
self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
self.system_dict["dataset"]["train"]["label_dir"] = label_dir;
self.system_dict["params"]["label_file"] = label_file;
self.system_dict["params"]["batch_size"] = batch_size;
self.system_dict["params"]["balance_data"] = balance_data;
self.system_dict["params"]["num_workers"] = num_workers;
def set_val_data_params(self, img_dir, label_dir):
self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
self.system_dict["dataset"]["val"]["label_dir"] = label_dir;
self.system_dict["dataset"]["val"]["status"] = True;
def set_model_params(self, net="mb1-ssd", freeze_base_net=False,
freeze_net=False, use_gpu=True, resume=False, mb2_width_mult=1.0):
self.system_dict["params"]["net"] = net;
self.system_dict["params"]["freeze_net"] = freeze_net;
self.system_dict["params"]["freeze_base_net"] = freeze_base_net;
self.system_dict["params"]["mb2_width_mult"] = mb2_width_mult;
self.system_dict["params"]["resume"] = resume;
self.system_dict["params"]["use_cuda"] = use_gpu;
print("Downloading model");
if(net == "mb1-ssd"):
if(not os.path.isfile("mobilenet-v1-ssd-mp-0_675.pth")):
os.system("wget https://storage.googleapis.com/models-hao/mobilenet-v1-ssd-mp-0_675.pth");
self.system_dict["params"]["pretrained_ssd"] = "mobilenet-v1-ssd-mp-0_675.pth";
elif(net == "mb2-ssd-lite"):
if(not os.path.isfile("mb2-ssd-lite-mp-0_686.pth")):
os.system("wget https://storage.googleapis.com/models-hao/mb2-ssd-lite-mp-0_686.pth");
self.system_dict["params"]["pretrained_ssd"] = "mb2-ssd-lite-mp-0_686.pth";
elif(net == "vgg16-ssd"):
if(not os.path.isfile("vgg16-ssd-mp-0_7726")):
os.system("https://storage.googleapis.com/models-hao/vgg16-ssd-mp-0_7726.pth");
self.system_dict["params"]["pretrained_ssd"] = "vgg16-ssd-mp-0_7726.pth";
print("Model downloaded");
def set_lr_params(self, lr=0.001, base_net_lr=None, extra_layers_lr=None,
scheduler="multi-step", milestones=None, t_max=120, gamma=0.1):
self.system_dict["params"]["lr"] = lr;
self.system_dict["params"]["base_net_lr"] = base_net_lr;
self.system_dict["params"]["extra_layers_lr"] = extra_layers_lr;
self.system_dict["params"]["scheduler"] = scheduler
self.system_dict["params"]["milestones"] = milestones;
self.system_dict["params"]["t_max"] = t_max;
self.system_dict["params"]["gamma"] = gamma;
def set_optimizer_params(self, momentum=0.09, weight_decay=0.0005):
self.system_dict["params"]["momentum"] = momentum;
self.system_dict["params"]["weight_decay"] = weight_decay;
def train(self, num_epochs=5, val_epoch_interval=2, output_folder="models_dir/", debug_steps=100):
self.system_dict["params"]["checkpoint_folder"] = output_folder
self.system_dict["params"]["num_epochs"] = num_epochs;
self.system_dict["params"]["validation_epochs"] = val_epoch_interval;
self.system_dict["params"]["debug_steps"] = debug_steps;
if(not os.path.isdir(self.system_dict["params"]["checkpoint_folder"])):
os.mkdir(self.system_dict["params"]["checkpoint_folder"]);
self.setup_and_start_training();
def setup_and_start_training(self):
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and self.system_dict["params"]["use_cuda"] else "cpu")
if self.system_dict["params"]["use_cuda"] and torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
logging.info("Using gpu.");
else:
logging.info("Using cpu.");
timer = Timer()
logging.info(self.system_dict);
if self.system_dict["params"]["net"] == 'vgg16-ssd':
create_net = create_vgg_ssd
config = vgg_ssd_config
elif self.system_dict["params"]["net"] == 'mb1-ssd':
create_net = create_mobilenetv1_ssd
config = mobilenetv1_ssd_config
elif self.system_dict["params"]["net"] == 'mb1-ssd-lite':
create_net = create_mobilenetv1_ssd_lite
config = mobilenetv1_ssd_config
elif self.system_dict["params"]["net"] == 'sq-ssd-lite':
create_net = create_squeezenet_ssd_lite
config = squeezenet_ssd_config
elif self.system_dict["params"]["net"] == 'mb2-ssd-lite':
create_net = lambda num: create_mobilenetv2_ssd_lite(num, width_mult=self.system_dict["params"]["mb2_width_mult"])
config = mobilenetv1_ssd_config
else:
logging.fatal("The net type is wrong.")
sys.exit(1)
train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
target_transform = MatchPrior(config.priors, config.center_variance,
config.size_variance, 0.5)
test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)
logging.info("Prepare training datasets.")
datasets = [];
dataset = VOCDataset(self.system_dict["dataset"]["val"]["img_dir"],
self.system_dict["dataset"]["val"]["label_dir"],
transform=train_transform,
target_transform=target_transform,
label_file=self.system_dict["params"]["label_file"])
label_file = self.system_dict["params"]["label_file"]
#store_labels(label_file, dataset.class_names)
num_classes = len(dataset.class_names)
datasets.append(dataset)
logging.info(f"Stored labels into file {label_file}.")
train_dataset = ConcatDataset(datasets)
logging.info("Train dataset size: {}".format(len(train_dataset)))
train_loader = DataLoader(train_dataset, self.system_dict["params"]["batch_size"],
num_workers=self.system_dict["params"]["num_workers"],
shuffle=True)
if(self.system_dict["dataset"]["val"]["status"]):
val_dataset = VOCDataset(self.system_dict["dataset"]["val"]["img_dir"],
self.system_dict["dataset"]["val"]["label_dir"],
transform=test_transform,
target_transform=target_transform,
is_test=True,
label_file=self.system_dict["params"]["label_file"])
logging.info("validation dataset size: {}".format(len(val_dataset)))
val_loader = DataLoader(val_dataset, self.system_dict["params"]["batch_size"],
num_workers=self.system_dict["params"]["num_workers"],
shuffle=False)
logging.info("Build network.")
net = create_net(num_classes)
min_loss = -10000.0
last_epoch = -1
base_net_lr = self.system_dict["params"]["base_net_lr"] if self.system_dict["params"]["base_net_lr"] is not None else self.system_dict["params"]["lr"]
extra_layers_lr = self.system_dict["params"]["extra_layers_lr"] if self.system_dict["params"]["extra_layers_lr"] is not None else self.system_dict["params"]["lr"]
if self.system_dict["params"]["freeze_base_net"]:
logging.info("Freeze base net.")
freeze_net_layers(net.base_net)
params = itertools.chain(net.source_layer_add_ons.parameters(), net.extras.parameters(),
net.regression_headers.parameters(), net.classification_headers.parameters())
params = [
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
elif self.system_dict["params"]["freeze_net"]:
freeze_net_layers(net.base_net)
freeze_net_layers(net.source_layer_add_ons)
freeze_net_layers(net.extras)
params = itertools.chain(net.regression_headers.parameters(), net.classification_headers.parameters())
logging.info("Freeze all the layers except prediction heads.")
else:
params = [
{'params': net.base_net.parameters(), 'lr': base_net_lr},
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
timer.start("Load Model")
resume = self.system_dict["params"]["resume"];
base_net = self.system_dict["params"]["base_net"]
pretrained_ssd = self.system_dict["params"]["pretrained_ssd"];
if self.system_dict["params"]["resume"]:
logging.info(f"Resume from the model {resume}")
net.load(self.system_dict["params"]["resume"])
elif self.system_dict["params"]["base_net"]:
logging.info(f"Init from base net {base_net}")
net.init_from_base_net(self.system_dict["params"]["base_net"])
elif self.system_dict["params"]["pretrained_ssd"]:
logging.info(f"Init from pretrained ssd {pretrained_ssd}")
net.init_from_pretrained_ssd(self.system_dict["params"]["pretrained_ssd"])
logging.info(f'Took {timer.end("Load Model"):.2f} seconds to load the model.')
net.to(DEVICE)
criterion = MultiboxLoss(config.priors, iou_threshold=0.5, neg_pos_ratio=3,
center_variance=0.1, size_variance=0.2, device=DEVICE)
optimizer = torch.optim.SGD(params,
lr=self.system_dict["params"]["lr"],
momentum=self.system_dict["params"]["momentum"],
weight_decay=self.system_dict["params"]["weight_decay"])
lr = self.system_dict["params"]["lr"];
logging.info(f"Learning rate: {lr}, Base net learning rate: {base_net_lr}, "
+ f"Extra Layers learning rate: {extra_layers_lr}.")
if(not self.system_dict["params"]["milestones"]):
self.system_dict["params"]["milestones"] = "";
self.system_dict["params"]["milestones"] += str(int(self.system_dict["params"]["num_epochs"]/3)) + ",";
self.system_dict["params"]["milestones"] += str(int(2*self.system_dict["params"]["num_epochs"]/3));
if self.system_dict["params"]["scheduler"] == 'multi-step':
logging.info("Uses MultiStepLR scheduler.")
milestones = [int(v.strip()) for v in self.system_dict["params"]["milestones"].split(",")]
scheduler = MultiStepLR(optimizer, milestones=milestones,
gamma=0.1, last_epoch=last_epoch)
elif self.system_dict["params"]["scheduler"] == 'cosine':
logging.info("Uses CosineAnnealingLR scheduler.")
scheduler = CosineAnnealingLR(optimizer, self.system_dict["params"]["t_max"], last_epoch=last_epoch)
logging.info(f"Start training from epoch {last_epoch + 1}.")
for epoch in range(last_epoch + 1, self.system_dict["params"]["num_epochs"]):
scheduler.step()
self.base_train(train_loader, net, criterion, optimizer,
device=DEVICE, debug_steps=self.system_dict["params"]["debug_steps"], epoch=epoch)
if((self.system_dict["dataset"]["val"]["status"]) and (epoch % self.system_dict["params"]["validation_epochs"] == 0 or epoch == self.system_dict["params"]["num_epochs"] - 1)):
val_loss, val_regression_loss, val_classification_loss = self.base_test(val_loader, net, criterion, DEVICE)
logging.info(
f"Epoch: {epoch}, " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Regression Loss {val_regression_loss:.4f}, " +
f"Validation Classification Loss: {val_classification_loss:.4f}"
)
net_name = self.system_dict["params"]["net"];
model_path = os.path.join(self.system_dict["params"]["checkpoint_folder"], f"{net_name}-Epoch-{epoch}-Loss-{val_loss}.pth")
net.save(model_path)
logging.info(f"Saved model {model_path}")
if(not self.system_dict["dataset"]["val"]["status"]):
model_path = os.path.join(self.system_dict["params"]["checkpoint_folder"], f"{net_name}-Epoch-{epoch}.pth")
net.save(model_path)
logging.info(f"Saved model {model_path}")
def base_train(self, loader, net, criterion, optimizer, device, debug_steps=100, epoch=-1):
net.train(True)
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
for i, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
optimizer.zero_grad()
confidence, locations = net(images)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes) # TODO CHANGE BOXES
loss = regression_loss + classification_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
if i and i % debug_steps == 0:
avg_loss = running_loss / debug_steps
avg_reg_loss = running_regression_loss / debug_steps
avg_clf_loss = running_classification_loss / debug_steps
logging.info(
f"Epoch: {epoch}, Step: {i}, " +
f"Average Loss: {avg_loss:.4f}, " +
f"Average Regression Loss {avg_reg_loss:.4f}, " +
f"Average Classification Loss: {avg_clf_loss:.4f}"
)
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
def base_test(self, loader, net, criterion, device):
net.eval()
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
num = 0
for _, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
num += 1
with torch.no_grad():
confidence, locations = net(images)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes)
loss = regression_loss + classification_loss
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
return running_loss / num, running_regression_loss / num, running_classification_loss / num
| 9,779 |
550 | package play.mvc.results;
import play.exceptions.UnexpectedException;
import play.mvc.Http;
import play.mvc.Http.Request;
import play.mvc.Http.Response;
/**
* 302 Redirect
*/
public class RedirectToStatic extends Result {
String file;
public RedirectToStatic(String file) {
this.file = file;
}
public void apply(Request request, Response response) {
try {
response.status = Http.StatusCode.FOUND;
response.setHeader("Location", file);
} catch (Exception e) {
throw new UnexpectedException(e);
}
}
}
| 244 |
14,668 | <reponame>chromium/chromium
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/raw_ptr.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include <memory>
#include "base/callback_helpers.h"
#include "base/containers/cxx20_erase.h"
#include "base/cxx17_backports.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/overlay_plane_data.h"
#include "ui/gfx/overlay_priority_hint.h"
#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_utils.h"
#include "ui/gl/gl_version_info.h"
namespace gpu {
namespace gles2 {
namespace {
template <typename ClientType, typename ServiceType, typename GenFunction>
error::Error GenHelper(GLsizei n,
const volatile ClientType* client_ids,
ClientServiceMap<ClientType, ServiceType>* id_map,
GenFunction gen_function) {
DCHECK(n >= 0);
std::vector<ClientType> client_ids_copy(client_ids, client_ids + n);
for (GLsizei ii = 0; ii < n; ++ii) {
if (id_map->HasClientID(client_ids_copy[ii])) {
return error::kInvalidArguments;
}
}
if (!CheckUniqueAndNonNullIds(n, client_ids_copy.data())) {
return error::kInvalidArguments;
}
std::vector<ServiceType> service_ids(n, 0);
gen_function(n, service_ids.data());
for (GLsizei ii = 0; ii < n; ++ii) {
id_map->SetIDMapping(client_ids_copy[ii], service_ids[ii]);
}
return error::kNoError;
}
template <typename ClientType, typename ServiceType, typename GenFunction>
error::Error CreateHelper(ClientType client_id,
ClientServiceMap<ClientType, ServiceType>* id_map,
GenFunction create_function) {
if (id_map->HasClientID(client_id)) {
return error::kInvalidArguments;
}
ServiceType service_id = create_function();
id_map->SetIDMapping(client_id, service_id);
return error::kNoError;
}
template <typename ClientType, typename ServiceType, typename DeleteFunction>
error::Error DeleteHelper(GLsizei n,
const volatile ClientType* client_ids,
ClientServiceMap<ClientType, ServiceType>* id_map,
DeleteFunction delete_function) {
DCHECK(n >= 0);
std::vector<ServiceType> service_ids(n, 0);
for (GLsizei ii = 0; ii < n; ++ii) {
ClientType client_id = client_ids[ii];
// Don't pass service IDs of objects with a client ID of 0. They are
// emulated and should not be deleteable
if (client_id != 0) {
service_ids[ii] = id_map->GetServiceIDOrInvalid(client_id);
id_map->RemoveClientID(client_id);
}
}
delete_function(n, service_ids.data());
return error::kNoError;
}
template <typename ClientType, typename ServiceType, typename DeleteFunction>
error::Error DeleteHelper(ClientType client_id,
ClientServiceMap<ClientType, ServiceType>* id_map,
DeleteFunction delete_function) {
delete_function(id_map->GetServiceIDOrInvalid(client_id));
id_map->RemoveClientID(client_id);
return error::kNoError;
}
template <typename ClientType, typename ServiceType, typename GenFunction>
ServiceType GetServiceID(ClientType client_id,
ClientServiceMap<ClientType, ServiceType>* id_map,
bool create_if_missing,
GenFunction gen_function) {
ServiceType service_id = id_map->invalid_service_id();
if (id_map->GetServiceID(client_id, &service_id)) {
return service_id;
}
if (create_if_missing) {
service_id = gen_function();
id_map->SetIDMapping(client_id, service_id);
return service_id;
}
return id_map->invalid_service_id();
}
GLuint GetTextureServiceID(gl::GLApi* api,
GLuint client_id,
PassthroughResources* resources,
bool create_if_missing) {
GLuint service_id = resources->texture_id_map.invalid_service_id();
if (resources->texture_id_map.GetServiceID(client_id, &service_id)) {
return service_id;
}
if (create_if_missing) {
service_id = 0;
api->glGenTexturesFn(1, &service_id);
resources->texture_id_map.SetIDMapping(client_id, service_id);
return service_id;
}
return resources->texture_id_map.invalid_service_id();
}
GLuint GetBufferServiceID(gl::GLApi* api,
GLuint client_id,
PassthroughResources* resources,
bool create_if_missing) {
return GetServiceID(client_id, &resources->buffer_id_map, create_if_missing,
[api]() {
GLuint service_id = 0;
api->glGenBuffersARBFn(1, &service_id);
return service_id;
});
}
GLuint GetRenderbufferServiceID(gl::GLApi* api,
GLuint client_id,
PassthroughResources* resources,
bool create_if_missing) {
return GetServiceID(client_id, &resources->renderbuffer_id_map,
create_if_missing, [api]() {
GLuint service_id = 0;
api->glGenRenderbuffersEXTFn(1, &service_id);
return service_id;
});
}
GLuint GetFramebufferServiceID(gl::GLApi* api,
GLuint client_id,
ClientServiceMap<GLuint, GLuint>* id_map,
bool create_if_missing) {
return GetServiceID(client_id, id_map, create_if_missing, [api]() {
GLuint service_id = 0;
api->glGenFramebuffersEXTFn(1, &service_id);
return service_id;
});
}
GLuint GetTransformFeedbackServiceID(GLuint client_id,
ClientServiceMap<GLuint, GLuint>* id_map) {
return id_map->GetServiceIDOrInvalid(client_id);
}
GLuint GetVertexArrayServiceID(GLuint client_id,
ClientServiceMap<GLuint, GLuint>* id_map) {
return id_map->GetServiceIDOrInvalid(client_id);
}
GLuint GetProgramServiceID(GLuint client_id, PassthroughResources* resources) {
return resources->program_id_map.GetServiceIDOrInvalid(client_id);
}
GLuint GetShaderServiceID(GLuint client_id, PassthroughResources* resources) {
return resources->shader_id_map.GetServiceIDOrInvalid(client_id);
}
GLuint GetQueryServiceID(GLuint client_id,
ClientServiceMap<GLuint, GLuint>* id_map) {
return id_map->GetServiceIDOrInvalid(client_id);
}
GLuint GetSamplerServiceID(GLuint client_id, PassthroughResources* resources) {
return resources->sampler_id_map.GetServiceIDOrInvalid(client_id);
}
GLsync GetSyncServiceID(GLuint client_id, PassthroughResources* resources) {
return reinterpret_cast<GLsync>(
resources->sync_id_map.GetServiceIDOrInvalid(client_id));
}
template <typename T>
void InsertValueIntoBuffer(std::vector<uint8_t>* data,
const T& value,
size_t offset) {
DCHECK_LE(offset + sizeof(T), data->size());
memcpy(data->data() + offset, &value, sizeof(T));
}
template <typename T>
void AppendValueToBuffer(std::vector<uint8_t>* data, const T& value) {
const base::CheckedNumeric<size_t> old_size = data->size();
data->resize((old_size + sizeof(T)).ValueOrDie());
memcpy(data->data() + old_size.ValueOrDie(), &value, sizeof(T));
}
void AppendStringToBuffer(std::vector<uint8_t>* data,
const char* str,
size_t len) {
const base::CheckedNumeric<size_t> old_size = data->size();
data->resize((old_size + len).ValueOrDie());
memcpy(data->data() + old_size.ValueOrDie(), str, len);
}
void AssignGLRectangle(GLint rectangle[4],
GLint x,
GLint y,
GLint width,
GLint height) {
rectangle[0] = x;
rectangle[1] = y;
rectangle[2] = width;
rectangle[3] = height;
}
// In order to minimize the amount of data copied, the command buffer client
// unpack pixels before sending the glTex[Sub]Image[2|3]D calls. The only
// parameter it doesn't handle is the alignment. Resetting the unpack state is
// not needed when uploading from a PBO and for compressed formats which the
// client sends untouched. This class handles resetting and restoring the unpack
// state.
// TODO(<EMAIL>) it would be nicer to handle the resetting /
// restoring on the client side.
class ScopedUnpackStateButAlignmentReset {
public:
ScopedUnpackStateButAlignmentReset(gl::GLApi* api, bool enable, bool is_3d)
: api_(api) {
if (!enable) {
return;
}
api_->glGetIntegervFn(GL_UNPACK_SKIP_PIXELS, &skip_pixels_);
api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, 0);
api_->glGetIntegervFn(GL_UNPACK_SKIP_ROWS, &skip_rows_);
api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, 0);
api_->glGetIntegervFn(GL_UNPACK_ROW_LENGTH, &row_length_);
api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
if (is_3d) {
api_->glGetIntegervFn(GL_UNPACK_SKIP_IMAGES, &skip_images_);
api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, 0);
api_->glGetIntegervFn(GL_UNPACK_IMAGE_HEIGHT, &image_height_);
api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, 0);
}
}
~ScopedUnpackStateButAlignmentReset() {
if (skip_pixels_ != 0) {
api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, skip_pixels_);
}
if (skip_rows_ != 0) {
api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, skip_rows_);
}
if (skip_images_ != 0) {
api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, skip_images_);
}
if (row_length_ != 0) {
api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, row_length_);
}
if (image_height_ != 0) {
api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, image_height_);
}
}
private:
raw_ptr<gl::GLApi> api_;
GLint skip_pixels_ = 0;
GLint skip_rows_ = 0;
GLint skip_images_ = 0;
GLint row_length_ = 0;
GLint image_height_ = 0;
};
class ScopedPackStateRowLengthReset {
public:
ScopedPackStateRowLengthReset(gl::GLApi* api, bool enable) : api_(api) {
if (!enable) {
return;
}
api_->glGetIntegervFn(GL_PACK_ROW_LENGTH, &row_length_);
api_->glPixelStoreiFn(GL_PACK_ROW_LENGTH, 0);
}
~ScopedPackStateRowLengthReset() {
if (row_length_ != 0) {
api_->glPixelStoreiFn(GL_PACK_ROW_LENGTH, row_length_);
}
}
private:
raw_ptr<gl::GLApi> api_;
GLint row_length_ = 0;
};
bool ModifyAttachmentForEmulatedFramebuffer(GLenum* attachment) {
switch (*attachment) {
case GL_BACK:
*attachment = GL_COLOR_ATTACHMENT0;
return true;
case GL_DEPTH:
*attachment = GL_DEPTH_ATTACHMENT;
return true;
case GL_STENCIL:
*attachment = GL_STENCIL_ATTACHMENT;
return true;
default:
return false;
}
}
bool ModifyAttachmentsForEmulatedFramebuffer(std::vector<GLenum>* attachments) {
for (GLenum& attachment : *attachments) {
if (!ModifyAttachmentForEmulatedFramebuffer(&attachment)) {
return false;
}
}
return true;
}
} // anonymous namespace
// Implementations of commands
error::Error GLES2DecoderPassthroughImpl::DoActiveTexture(GLenum texture) {
CheckErrorCallbackState();
api()->glActiveTextureFn(texture);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
active_texture_unit_ = static_cast<size_t>(texture) - GL_TEXTURE0;
DCHECK(active_texture_unit_ < kMaxTextureUnits);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoAttachShader(GLuint program,
GLuint shader) {
api()->glAttachShaderFn(GetProgramServiceID(program, resources_),
GetShaderServiceID(shader, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindAttribLocation(
GLuint program,
GLuint index,
const char* name) {
api()->glBindAttribLocationFn(GetProgramServiceID(program, resources_), index,
name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
GLuint buffer) {
CheckErrorCallbackState();
api()->glBindBufferFn(target, GetBufferServiceID(api(), buffer, resources_,
bind_generates_resource_));
if (CheckErrorCallbackState()) {
return error::kNoError;
}
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
if (target == GL_ELEMENT_ARRAY_BUFFER) {
bound_element_array_buffer_dirty_ = false;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindBufferBase(GLenum target,
GLuint index,
GLuint buffer) {
CheckErrorCallbackState();
api()->glBindBufferBaseFn(
target, index,
GetBufferServiceID(api(), buffer, resources_, bind_generates_resource_));
if (CheckErrorCallbackState()) {
return error::kNoError;
}
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
if (target == GL_ELEMENT_ARRAY_BUFFER) {
bound_element_array_buffer_dirty_ = false;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindBufferRange(GLenum target,
GLuint index,
GLuint buffer,
GLintptr offset,
GLsizeiptr size) {
CheckErrorCallbackState();
api()->glBindBufferRangeFn(
target, index,
GetBufferServiceID(api(), buffer, resources_, bind_generates_resource_),
offset, size);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
if (target == GL_ELEMENT_ARRAY_BUFFER) {
bound_element_array_buffer_dirty_ = false;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindFramebuffer(
GLenum target,
GLuint framebuffer) {
CheckErrorCallbackState();
api()->glBindFramebufferEXTFn(
target, GetFramebufferServiceID(api(), framebuffer, &framebuffer_id_map_,
bind_generates_resource_));
if (CheckErrorCallbackState()) {
return error::kNoError;
}
// Update tracking of the bound framebuffer
bool draw_framebuffer_changed = false;
switch (target) {
case GL_FRAMEBUFFER_EXT:
draw_framebuffer_changed = true;
bound_draw_framebuffer_ = framebuffer;
bound_read_framebuffer_ = framebuffer;
break;
case GL_DRAW_FRAMEBUFFER:
draw_framebuffer_changed = true;
bound_draw_framebuffer_ = framebuffer;
break;
case GL_READ_FRAMEBUFFER:
bound_read_framebuffer_ = framebuffer;
break;
default:
NOTREACHED();
break;
}
// Resync the surface offset if the draw framebuffer has changed to or from
// the default framebuffer
if (draw_framebuffer_changed && bound_draw_framebuffer_ != framebuffer &&
(bound_draw_framebuffer_ == 0 || framebuffer == 0)) {
ApplySurfaceDrawOffset();
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindImageTexture(GLuint unit,
GLuint texture,
GLint level,
GLboolean layered,
GLint layer,
GLenum access,
GLenum format) {
api()->glBindImageTextureEXTFn(
unit,
GetTextureServiceID(api(), texture, resources_, bind_generates_resource_),
level, layered, layer, access, format);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindRenderbuffer(
GLenum target,
GLuint renderbuffer) {
api()->glBindRenderbufferEXTFn(
target, GetRenderbufferServiceID(api(), renderbuffer, resources_,
bind_generates_resource_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindSampler(GLuint unit,
GLuint sampler) {
api()->glBindSamplerFn(unit, GetSamplerServiceID(sampler, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindTexture(GLenum target,
GLuint texture) {
GLuint service_id =
GetTextureServiceID(api(), texture, resources_, bind_generates_resource_);
CheckErrorCallbackState();
api()->glBindTextureFn(target, service_id);
// Only update tracking if no error was generated in the bind call
if (CheckErrorCallbackState()) {
return error::kNoError;
}
// Track the currently bound textures
DCHECK(GLenumToTextureTarget(target) != TextureTarget::kUnkown);
scoped_refptr<TexturePassthrough> texture_passthrough;
// If there was anything bound that required an image bind / copy,
// forget it since it's no longer bound to a sampler.
RemovePendingBindingTexture(target, active_texture_unit_);
if (service_id != 0) {
// Label the texture with additional context info
const char* label = ContextTypeToLabel(feature_info_->context_type());
api()->glObjectLabelFn(GL_TEXTURE, service_id, strlen(label), label);
// Create a new texture object to track this texture
if (!resources_->texture_object_map.GetServiceID(texture,
&texture_passthrough) ||
texture_passthrough == nullptr) {
texture_passthrough = new TexturePassthrough(service_id, target);
resources_->texture_object_map.SetIDMapping(texture, texture_passthrough);
} else {
// Shouldn't be possible to get here if this texture has a different
// target than the one it was just bound to
DCHECK(texture_passthrough->target() == target);
}
DCHECK(texture_passthrough);
// If |texture_passthrough| has a bound image that requires processing
// before a draw, then keep track of it.
if (texture_passthrough->is_bind_pending()) {
textures_pending_binding_.emplace_back(target, active_texture_unit_,
texture_passthrough->AsWeakPtr());
}
}
BoundTexture* bound_texture =
&bound_textures_[static_cast<size_t>(GLenumToTextureTarget(target))]
[active_texture_unit_];
bound_texture->client_id = texture;
bound_texture->texture = std::move(texture_passthrough);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindTransformFeedback(
GLenum target,
GLuint transformfeedback) {
api()->glBindTransformFeedbackFn(
target, GetTransformFeedbackServiceID(transformfeedback,
&transform_feedback_id_map_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendColor(GLclampf red,
GLclampf green,
GLclampf blue,
GLclampf alpha) {
api()->glBlendColorFn(red, green, blue, alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendEquation(GLenum mode) {
api()->glBlendEquationFn(mode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendEquationiOES(GLuint buf,
GLenum mode) {
api()->glBlendEquationiOESFn(buf, mode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate(
GLenum modeRGB,
GLenum modeAlpha) {
api()->glBlendEquationSeparateFn(modeRGB, modeAlpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparateiOES(
GLuint buf,
GLenum modeRGB,
GLenum modeAlpha) {
api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendFunc(GLenum sfactor,
GLenum dfactor) {
api()->glBlendFuncFn(sfactor, dfactor);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendFunciOES(GLuint buf,
GLenum sfactor,
GLenum dfactor) {
api()->glBlendFunciOESFn(buf, sfactor, dfactor);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB,
GLenum dstRGB,
GLenum srcAlpha,
GLenum dstAlpha) {
api()->glBlendFuncSeparateFn(srcRGB, dstRGB, srcAlpha, dstAlpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparateiOES(
GLuint buf,
GLenum srcRGB,
GLenum dstRGB,
GLenum srcAlpha,
GLenum dstAlpha) {
api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
GLsizeiptr size,
const void* data,
GLenum usage) {
CheckErrorCallbackState();
api()->glBufferDataFn(target, size, data, usage);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
if (target == GL_ELEMENT_ARRAY_BUFFER) {
LazilyUpdateCurrentlyBoundElementArrayBuffer();
}
// Calling buffer data on a mapped buffer will implicitly unmap it
resources_->mapped_buffer_map.erase(bound_buffers_[target]);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBufferSubData(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
api()->glBufferSubDataFn(target, offset, size, data);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCheckFramebufferStatus(
GLenum target,
uint32_t* result) {
*result = api()->glCheckFramebufferStatusEXTFn(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClear(GLbitfield mask) {
api()->glClearFn(mask);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearBufferfi(GLenum buffer,
GLint drawbuffers,
GLfloat depth,
GLint stencil) {
api()->glClearBufferfiFn(buffer, drawbuffers, depth, stencil);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearBufferfv(
GLenum buffer,
GLint drawbuffers,
const volatile GLfloat* value) {
api()->glClearBufferfvFn(buffer, drawbuffers,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearBufferiv(
GLenum buffer,
GLint drawbuffers,
const volatile GLint* value) {
api()->glClearBufferivFn(buffer, drawbuffers,
const_cast<const GLint*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearBufferuiv(
GLenum buffer,
GLint drawbuffers,
const volatile GLuint* value) {
api()->glClearBufferuivFn(buffer, drawbuffers,
const_cast<const GLuint*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearColor(GLclampf red,
GLclampf green,
GLclampf blue,
GLclampf alpha) {
api()->glClearColorFn(red, green, blue, alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearDepthf(GLclampf depth) {
api()->glClearDepthfFn(depth);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClearStencil(GLint s) {
api()->glClearStencilFn(s);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoClientWaitSync(GLuint sync,
GLbitfield flags,
GLuint64 timeout,
GLenum* result) {
// Force GL_SYNC_FLUSH_COMMANDS_BIT to avoid infinite wait.
GLbitfield modified_flags = flags | GL_SYNC_FLUSH_COMMANDS_BIT;
*result = api()->glClientWaitSyncFn(GetSyncServiceID(sync, resources_),
modified_flags, timeout);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoColorMask(GLboolean red,
GLboolean green,
GLboolean blue,
GLboolean alpha) {
api()->glColorMaskFn(red, green, blue, alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoColorMaskiOES(GLuint buf,
GLboolean red,
GLboolean green,
GLboolean blue,
GLboolean alpha) {
api()->glColorMaskiOESFn(buf, red, green, blue, alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompileShader(GLuint shader) {
api()->glCompileShaderFn(GetShaderServiceID(shader, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompressedTexImage2D(
GLenum target,
GLint level,
GLenum internalformat,
GLsizei width,
GLsizei height,
GLint border,
GLsizei image_size,
GLsizei data_size,
const void* data) {
CheckErrorCallbackState();
api()->glCompressedTexImage2DRobustANGLEFn(target, level, internalformat,
width, height, border, image_size,
data_size, data);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompressedTexSubImage2D(
GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLsizei image_size,
GLsizei data_size,
const void* data) {
api()->glCompressedTexSubImage2DRobustANGLEFn(target, level, xoffset, yoffset,
width, height, format,
image_size, data_size, data);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompressedTexImage3D(
GLenum target,
GLint level,
GLenum internalformat,
GLsizei width,
GLsizei height,
GLsizei depth,
GLint border,
GLsizei image_size,
GLsizei data_size,
const void* data) {
CheckErrorCallbackState();
api()->glCompressedTexImage3DRobustANGLEFn(target, level, internalformat,
width, height, depth, border,
image_size, data_size, data);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompressedTexSubImage3D(
GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLsizei width,
GLsizei height,
GLsizei depth,
GLenum format,
GLsizei image_size,
GLsizei data_size,
const void* data) {
api()->glCompressedTexSubImage3DRobustANGLEFn(
target, level, xoffset, yoffset, zoffset, width, height, depth, format,
image_size, data_size, data);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoContextVisibilityHintCHROMIUM(
GLboolean visibility) {
if (feature_info_->IsWebGLContext())
context_->SetVisibility(visibility == GL_TRUE);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopyBufferSubData(
GLenum readtarget,
GLenum writetarget,
GLintptr readoffset,
GLintptr writeoffset,
GLsizeiptr size) {
api()->glCopyBufferSubDataFn(readtarget, writetarget, readoffset, writeoffset,
size);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTexImage2D(
GLenum target,
GLint level,
GLenum internalformat,
GLint x,
GLint y,
GLsizei width,
GLsizei height,
GLint border) {
CheckErrorCallbackState();
api()->glCopyTexImage2DFn(target, level, internalformat, x, y, width, height,
border);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
// Texture data copying can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTexSubImage2D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint x,
GLint y,
GLsizei width,
GLsizei height) {
api()->glCopyTexSubImage2DFn(target, level, xoffset, yoffset, x, y, width,
height);
// Texture data copying can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTexSubImage3D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLint x,
GLint y,
GLsizei width,
GLsizei height) {
api()->glCopyTexSubImage3DFn(target, level, xoffset, yoffset, zoffset, x, y,
width, height);
// Texture data copying can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCreateProgram(GLuint client_id) {
return CreateHelper(client_id, &resources_->program_id_map,
[this]() { return api()->glCreateProgramFn(); });
}
error::Error GLES2DecoderPassthroughImpl::DoCreateShader(GLenum type,
GLuint client_id) {
return CreateHelper(client_id, &resources_->shader_id_map,
[this, type]() { return api()->glCreateShaderFn(type); });
}
error::Error GLES2DecoderPassthroughImpl::DoCullFace(GLenum mode) {
api()->glCullFaceFn(mode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteBuffers(
GLsizei n,
const volatile GLuint* buffers) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
LazilyUpdateCurrentlyBoundElementArrayBuffer();
std::vector<GLuint> service_ids(n, 0);
for (GLsizei ii = 0; ii < n; ++ii) {
GLuint client_id = buffers[ii];
// Update the bound and mapped buffer state tracking
for (auto& buffer_binding : bound_buffers_) {
if (buffer_binding.second == client_id) {
buffer_binding.second = 0;
}
resources_->mapped_buffer_map.erase(client_id);
}
service_ids[ii] =
resources_->buffer_id_map.GetServiceIDOrInvalid(client_id);
resources_->buffer_id_map.RemoveClientID(client_id);
auto is_the_deleted_buffer = [client_id](const auto& update) {
return update.first == client_id;
};
base::EraseIf(buffer_shadow_updates_, is_the_deleted_buffer);
for (PendingQuery& pending_query : pending_queries_) {
base::EraseIf(pending_query.buffer_shadow_updates, is_the_deleted_buffer);
}
}
api()->glDeleteBuffersARBFn(n, service_ids.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteFramebuffers(
GLsizei n,
const volatile GLuint* framebuffers) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
std::vector<GLuint> framebuffers_copy(framebuffers, framebuffers + n);
// If a bound framebuffer is deleted, it's binding is reset to 0. In the case
// of an emulated default framebuffer, bind the emulated one.
for (GLuint framebuffer : framebuffers_copy) {
if (framebuffer == bound_draw_framebuffer_) {
bound_draw_framebuffer_ = 0;
if (emulated_back_buffer_) {
api()->glBindFramebufferEXTFn(
GL_DRAW_FRAMEBUFFER, emulated_back_buffer_->framebuffer_service_id);
}
// Update the surface offset if the bound draw framebuffer is deleted
ApplySurfaceDrawOffset();
}
if (framebuffer == bound_read_framebuffer_) {
bound_read_framebuffer_ = 0;
if (emulated_back_buffer_) {
api()->glBindFramebufferEXTFn(
GL_READ_FRAMEBUFFER, emulated_back_buffer_->framebuffer_service_id);
}
}
}
return DeleteHelper(n, framebuffers_copy.data(), &framebuffer_id_map_,
[this](GLsizei n, GLuint* framebuffers) {
api()->glDeleteFramebuffersEXTFn(n, framebuffers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteProgram(GLuint program) {
return DeleteHelper(
program, &resources_->program_id_map,
[this](GLuint program) { api()->glDeleteProgramFn(program); });
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteRenderbuffers(
GLsizei n,
const volatile GLuint* renderbuffers) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
return DeleteHelper(n, renderbuffers, &resources_->renderbuffer_id_map,
[this](GLsizei n, GLuint* renderbuffers) {
api()->glDeleteRenderbuffersEXTFn(n, renderbuffers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteSamplers(
GLsizei n,
const volatile GLuint* samplers) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
return DeleteHelper(n, samplers, &resources_->sampler_id_map,
[this](GLsizei n, GLuint* samplers) {
api()->glDeleteSamplersFn(n, samplers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteSync(GLuint sync) {
return DeleteHelper(sync, &resources_->sync_id_map, [this](uintptr_t sync) {
api()->glDeleteSyncFn(reinterpret_cast<GLsync>(sync));
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteShader(GLuint shader) {
return DeleteHelper(
shader, &resources_->shader_id_map,
[this](GLuint shader) { api()->glDeleteShaderFn(shader); });
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteTextures(
GLsizei n,
const volatile GLuint* textures) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
// Textures that are currently associated with a mailbox are stored in the
// texture_object_map_ and are deleted automatically when they are
// unreferenced. Only delete textures that are not in this map.
std::vector<GLuint> non_mailbox_client_ids;
for (GLsizei ii = 0; ii < n; ++ii) {
GLuint client_id = textures[ii];
scoped_refptr<TexturePassthrough> texture;
if (!resources_->texture_object_map.GetServiceID(client_id, &texture) ||
texture == nullptr) {
// Delete with DeleteHelper
non_mailbox_client_ids.push_back(client_id);
} else {
// Deleted when unreferenced
resources_->texture_id_map.RemoveClientID(client_id);
resources_->texture_object_map.RemoveClientID(client_id);
resources_->texture_shared_image_map.erase(client_id);
UpdateTextureBinding(texture->target(), client_id, nullptr);
}
// Notify the discardable manager that the texture is deleted
group_->passthrough_discardable_manager()->DeleteTexture(client_id,
group_.get());
}
return DeleteHelper(
non_mailbox_client_ids.size(), non_mailbox_client_ids.data(),
&resources_->texture_id_map, [this](GLsizei n, GLuint* textures) {
api()->glDeleteTexturesFn(n, textures);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteTransformFeedbacks(
GLsizei n,
const volatile GLuint* ids) {
// DeleteHelper requires that n is non-negative because it allocates a copy of
// the IDs
if (n < 0) {
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
return DeleteHelper(n, ids, &transform_feedback_id_map_,
[this](GLsizei n, GLuint* transform_feedbacks) {
api()->glDeleteTransformFeedbacksFn(
n, transform_feedbacks);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDepthFunc(GLenum func) {
api()->glDepthFuncFn(func);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDepthMask(GLboolean flag) {
api()->glDepthMaskFn(flag);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDepthRangef(GLclampf zNear,
GLclampf zFar) {
api()->glDepthRangefFn(zNear, zFar);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDetachShader(GLuint program,
GLuint shader) {
api()->glDetachShaderFn(GetProgramServiceID(program, resources_),
GetShaderServiceID(shader, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDisable(GLenum cap) {
api()->glDisableFn(cap);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDisableVertexAttribArray(
GLuint index) {
api()->glDisableVertexAttribArrayFn(index);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDispatchCompute(
GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) {
BindPendingImagesForSamplersIfNeeded();
api()->glDispatchComputeFn(num_groups_x, num_groups_y, num_groups_z);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDispatchComputeIndirect(
GLintptr offset) {
BindPendingImagesForSamplersIfNeeded();
// TODO(<EMAIL>): Use glDispatchComputeIndirectRobustANGLEFn()
// when it's ready in ANGLE.
api()->glDispatchComputeIndirectFn(offset);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawArrays(GLenum mode,
GLint first,
GLsizei count) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawArraysFn(mode, first, count);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawArraysIndirect(
GLenum mode,
const void* offset) {
BindPendingImagesForSamplersIfNeeded();
// TODO(<EMAIL>): Use glDrawArraysIndirectRobustANGLEFn() when
// it's ready in ANGLE.
api()->glDrawArraysIndirectFn(mode, offset);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawElements(GLenum mode,
GLsizei count,
GLenum type,
const void* indices) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawElementsFn(mode, count, type, indices);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawElementsIndirect(
GLenum mode,
GLenum type,
const void* offset) {
BindPendingImagesForSamplersIfNeeded();
// TODO(<EMAIL>): Use glDrawElementsIndirectRobustANGLEFn() when
// it's ready in ANGLE.
api()->glDrawElementsIndirectFn(mode, type, offset);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEnable(GLenum cap) {
api()->glEnableFn(cap);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEnableVertexAttribArray(
GLuint index) {
api()->glEnableVertexAttribArrayFn(index);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFenceSync(GLenum condition,
GLbitfield flags,
GLuint client_id) {
if (resources_->sync_id_map.HasClientID(client_id)) {
return error::kInvalidArguments;
}
CheckErrorCallbackState();
GLsync service_id = api()->glFenceSyncFn(condition, flags);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
resources_->sync_id_map.SetIDMapping(client_id,
reinterpret_cast<uintptr_t>(service_id));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFinish() {
// Finish can take a long time, make sure the watchdog gives it the most
// amount of time to complete.
group_->ReportProgress();
api()->glFinishFn();
group_->ReportProgress();
error::Error error = ProcessReadPixels(true);
if (error != error::kNoError) {
return error;
}
return ProcessQueries(true);
}
error::Error GLES2DecoderPassthroughImpl::DoFlush() {
api()->glFlushFn();
error::Error error = ProcessReadPixels(false);
if (error != error::kNoError) {
return error;
}
return ProcessQueries(false);
}
error::Error GLES2DecoderPassthroughImpl::DoFlushMappedBufferRange(
GLenum target,
GLintptr offset,
GLsizeiptr size) {
if (target == GL_ELEMENT_ARRAY_BUFFER) {
LazilyUpdateCurrentlyBoundElementArrayBuffer();
}
auto bound_buffers_iter = bound_buffers_.find(target);
if (bound_buffers_iter == bound_buffers_.end() ||
bound_buffers_iter->second == 0) {
InsertError(GL_INVALID_OPERATION, "No buffer bound to this target.");
return error::kNoError;
}
GLuint client_buffer = bound_buffers_iter->second;
auto mapped_buffer_info_iter =
resources_->mapped_buffer_map.find(client_buffer);
if (mapped_buffer_info_iter == resources_->mapped_buffer_map.end()) {
InsertError(GL_INVALID_OPERATION, "Buffer is not mapped.");
return error::kNoError;
}
const MappedBuffer& map_info = mapped_buffer_info_iter->second;
if (offset < 0) {
InsertError(GL_INVALID_VALUE, "Offset cannot be negative.");
return error::kNoError;
}
if (size < 0) {
InsertError(GL_INVALID_VALUE, "Size cannot be negative.");
return error::kNoError;
}
base::CheckedNumeric<size_t> range_start(offset);
base::CheckedNumeric<size_t> range_end = offset + size;
if (!range_end.IsValid() || range_end.ValueOrDefault(0) > map_info.size) {
InsertError(GL_INVALID_OPERATION,
"Flush range is not within the original mapping size.");
return error::kNoError;
}
uint8_t* mem = GetSharedMemoryAs<uint8_t*>(
map_info.data_shm_id, map_info.data_shm_offset, map_info.size);
if (!mem) {
return error::kOutOfBounds;
}
memcpy(map_info.map_ptr + offset, mem + offset, size);
api()->glFlushMappedBufferRangeFn(target, offset, size);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferParameteri(GLenum target,
GLenum pname,
GLint param) {
api()->glFramebufferParameteriFn(target, pname, param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferRenderbuffer(
GLenum target,
GLenum attachment,
GLenum renderbuffertarget,
GLuint renderbuffer) {
if (IsEmulatedFramebufferBound(target)) {
InsertError(GL_INVALID_OPERATION,
"Cannot change the attachments of the default framebuffer.");
return error::kNoError;
}
api()->glFramebufferRenderbufferEXTFn(
target, attachment, renderbuffertarget,
GetRenderbufferServiceID(api(), renderbuffer, resources_, false));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferTexture2D(
GLenum target,
GLenum attachment,
GLenum textarget,
GLuint texture,
GLint level) {
if (IsEmulatedFramebufferBound(target)) {
InsertError(GL_INVALID_OPERATION,
"Cannot change the attachments of the default framebuffer.");
return error::kNoError;
}
BindPendingImageForClientIDIfNeeded(texture);
api()->glFramebufferTexture2DEXTFn(
target, attachment, textarget,
GetTextureServiceID(api(), texture, resources_, false), level);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferTextureLayer(
GLenum target,
GLenum attachment,
GLuint texture,
GLint level,
GLint layer) {
if (IsEmulatedFramebufferBound(target)) {
InsertError(GL_INVALID_OPERATION,
"Cannot change the attachments of the default framebuffer.");
return error::kNoError;
}
api()->glFramebufferTextureLayerFn(
target, attachment,
GetTextureServiceID(api(), texture, resources_, false), level, layer);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferTextureMultiviewOVR(
GLenum target,
GLenum attachment,
GLuint texture,
GLint level,
GLint base_view_index,
GLsizei num_views) {
if (IsEmulatedFramebufferBound(target)) {
InsertError(GL_INVALID_OPERATION,
"Cannot change the attachments of the default framebuffer.");
return error::kNoError;
}
api()->glFramebufferTextureMultiviewOVRFn(
target, attachment,
GetTextureServiceID(api(), texture, resources_, false), level,
base_view_index, num_views);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFrontFace(GLenum mode) {
api()->glFrontFaceFn(mode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGenBuffers(
GLsizei n,
volatile GLuint* buffers) {
return GenHelper(n, buffers, &resources_->buffer_id_map,
[this](GLsizei n, GLuint* buffers) {
api()->glGenBuffersARBFn(n, buffers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGenerateMipmap(GLenum target) {
api()->glGenerateMipmapEXTFn(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGenFramebuffers(
GLsizei n,
volatile GLuint* framebuffers) {
return GenHelper(n, framebuffers, &framebuffer_id_map_,
[this](GLsizei n, GLuint* framebuffers) {
api()->glGenFramebuffersEXTFn(n, framebuffers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGenRenderbuffers(
GLsizei n,
volatile GLuint* renderbuffers) {
return GenHelper(n, renderbuffers, &resources_->renderbuffer_id_map,
[this](GLsizei n, GLuint* renderbuffers) {
api()->glGenRenderbuffersEXTFn(n, renderbuffers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGenSamplers(
GLsizei n,
volatile GLuint* samplers) {
return GenHelper(n, samplers, &resources_->sampler_id_map,
[this](GLsizei n, GLuint* samplers) {
api()->glGenSamplersFn(n, samplers);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGenTextures(
GLsizei n,
volatile GLuint* textures) {
return GenHelper(n, textures, &resources_->texture_id_map,
[this](GLsizei n, GLuint* textures) {
api()->glGenTexturesFn(n, textures);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGenTransformFeedbacks(
GLsizei n,
volatile GLuint* ids) {
return GenHelper(n, ids, &transform_feedback_id_map_,
[this](GLsizei n, GLuint* transform_feedbacks) {
api()->glGenTransformFeedbacksFn(n, transform_feedbacks);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGetActiveAttrib(GLuint program,
GLuint index,
GLint* size,
GLenum* type,
std::string* name,
int32_t* success) {
CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint active_attribute_max_length = 0;
api()->glGetProgramivFn(service_id, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
&active_attribute_max_length);
if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
std::vector<char> name_buffer(active_attribute_max_length, 0);
GLsizei length = 0;
api()->glGetActiveAttribFn(service_id, index, name_buffer.size(), &length,
size, type, name_buffer.data());
DCHECK(length <= active_attribute_max_length);
*name = length > 0 ? std::string(name_buffer.data(), length) : std::string();
*success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniform(GLuint program,
GLuint index,
GLint* size,
GLenum* type,
std::string* name,
int32_t* success) {
CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint active_uniform_max_length = 0;
api()->glGetProgramivFn(service_id, GL_ACTIVE_UNIFORM_MAX_LENGTH,
&active_uniform_max_length);
if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
std::vector<char> name_buffer(active_uniform_max_length, 0);
GLsizei length = 0;
api()->glGetActiveUniformFn(service_id, index, name_buffer.size(), &length,
size, type, name_buffer.data());
DCHECK(length <= active_uniform_max_length);
*name = length > 0 ? std::string(name_buffer.data(), length) : std::string();
*success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformBlockiv(
GLuint program,
GLuint index,
GLenum pname,
GLsizei bufSize,
GLsizei* length,
GLint* params) {
api()->glGetActiveUniformBlockivRobustANGLEFn(
GetProgramServiceID(program, resources_), index, pname, bufSize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformBlockName(
GLuint program,
GLuint index,
std::string* name) {
CheckErrorCallbackState();
GLuint program_service_id = GetProgramServiceID(program, resources_);
GLint max_name_length = 0;
api()->glGetProgramivFn(program_service_id,
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
&max_name_length);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
std::vector<GLchar> buffer(max_name_length, 0);
GLsizei length = 0;
api()->glGetActiveUniformBlockNameFn(program_service_id, index,
max_name_length, &length, buffer.data());
DCHECK(length <= max_name_length);
*name = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformsiv(
GLuint program,
GLsizei count,
const GLuint* indices,
GLenum pname,
GLint* params) {
api()->glGetActiveUniformsivFn(GetProgramServiceID(program, resources_),
count, indices, pname, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetAttachedShaders(
GLuint program,
GLsizei maxcount,
GLsizei* count,
GLuint* shaders) {
api()->glGetAttachedShadersFn(GetProgramServiceID(program, resources_),
maxcount, count, shaders);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetAttribLocation(GLuint program,
const char* name,
GLint* result) {
*result = api()->glGetAttribLocationFn(
GetProgramServiceID(program, resources_), name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLboolean* params) {
return GetNumericHelper(pname, bufsize, length, params,
[this](GLenum pname, GLsizei bufsize, GLsizei* length,
GLboolean* params) {
api()->glGetBooleanvRobustANGLEFn(pname, bufsize,
length, params);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGetBooleani_v(GLenum pname,
GLuint index,
GLsizei bufsize,
GLsizei* length,
GLboolean* data) {
glGetBooleani_vRobustANGLE(pname, index, bufsize, length, data);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
GLenum target,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint64* params) {
CheckErrorCallbackState();
api()->glGetBufferParameteri64vRobustANGLEFn(target, pname, bufsize, length,
params);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
PatchGetBufferResults(target, pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteriv(
GLenum target,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
CheckErrorCallbackState();
api()->glGetBufferParameterivRobustANGLEFn(target, pname, bufsize, length,
params);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
PatchGetBufferResults(target, pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetError(uint32_t* result) {
FlushErrors();
*result = PopError();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetFloatv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
return GetNumericHelper(
pname, bufsize, length, params,
[this](GLenum pname, GLsizei bufsize, GLsizei* length, GLfloat* params) {
api()->glGetFloatvRobustANGLEFn(pname, bufsize, length, params);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGetFragDataLocation(
GLuint program,
const char* name,
GLint* result) {
*result = api()->glGetFragDataLocationFn(
GetProgramServiceID(program, resources_), name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetFramebufferAttachmentParameteriv(
GLenum target,
GLenum attachment,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
GLenum updated_attachment = attachment;
if (IsEmulatedFramebufferBound(target)) {
// Update the attachment do the equivalent one in the emulated framebuffer
if (!ModifyAttachmentForEmulatedFramebuffer(&updated_attachment)) {
InsertError(GL_INVALID_OPERATION, "Invalid attachment.");
*length = 0;
return error::kNoError;
}
// Generate errors for parameter names that are only valid for non-default
// framebuffers
switch (pname) {
case GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME:
case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL:
case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE:
case GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER:
InsertError(GL_INVALID_ENUM, "Invalid parameter name.");
*length = 0;
return error::kNoError;
}
}
CheckErrorCallbackState();
api()->glGetFramebufferAttachmentParameterivRobustANGLEFn(
target, updated_attachment, pname, bufsize, length, params);
if (CheckErrorCallbackState()) {
DCHECK(*length == 0);
return error::kNoError;
}
// Update the results of the query, if needed
const error::Error error = PatchGetFramebufferAttachmentParameter(
target, updated_attachment, pname, *length, params);
if (error != error::kNoError) {
*length = 0;
return error;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetInteger64v(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint64* params) {
return GetNumericHelper(
pname, bufsize, length, params,
[this](GLenum pname, GLsizei bufsize, GLsizei* length, GLint64* params) {
api()->glGetInteger64vRobustANGLEFn(pname, bufsize, length, params);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGetIntegeri_v(GLenum pname,
GLuint index,
GLsizei bufsize,
GLsizei* length,
GLint* data) {
glGetIntegeri_vRobustANGLE(pname, index, bufsize, length, data);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetInteger64i_v(GLenum pname,
GLuint index,
GLsizei bufsize,
GLsizei* length,
GLint64* data) {
glGetInteger64i_vRobustANGLE(pname, index, bufsize, length, data);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetIntegerv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
return GetNumericHelper(
pname, bufsize, length, params,
[this](GLenum pname, GLsizei bufsize, GLsizei* length, GLint* params) {
api()->glGetIntegervRobustANGLEFn(pname, bufsize, length, params);
});
}
error::Error GLES2DecoderPassthroughImpl::DoGetInternalformativ(GLenum target,
GLenum format,
GLenum pname,
GLsizei bufSize,
GLsizei* length,
GLint* params) {
api()->glGetInternalformativRobustANGLEFn(target, format, pname, bufSize,
length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramiv(GLuint program,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetProgramivRobustANGLEFn(GetProgramServiceID(program, resources_),
pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoLog(
GLuint program,
std::string* infolog) {
CheckErrorCallbackState();
GLint info_log_len = 0;
api()->glGetProgramivFn(GetProgramServiceID(program, resources_),
GL_INFO_LOG_LENGTH, &info_log_len);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
std::vector<char> buffer(info_log_len, 0);
GLsizei length = 0;
api()->glGetProgramInfoLogFn(GetProgramServiceID(program, resources_),
info_log_len, &length, buffer.data());
DCHECK(length <= info_log_len);
*infolog = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramInterfaceiv(
GLuint program,
GLenum program_interface,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
// glGetProgramInterfaceivRobustANGLE remains to be implemented in ANGLE.
if (bufsize < 1) {
return error::kOutOfBounds;
}
*length = 1;
api()->glGetProgramInterfaceivFn(GetProgramServiceID(program, resources_),
program_interface, pname, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceiv(
GLuint program,
GLenum program_interface,
GLuint index,
GLsizei prop_count,
const GLenum* props,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetProgramResourceivFn(GetProgramServiceID(program, resources_),
program_interface, index, prop_count, props,
bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceIndex(
GLuint program,
GLenum program_interface,
const char* name,
GLuint* index) {
*index = api()->glGetProgramResourceIndexFn(
GetProgramServiceID(program, resources_), program_interface, name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceLocation(
GLuint program,
GLenum program_interface,
const char* name,
GLint* location) {
*location = api()->glGetProgramResourceLocationFn(
GetProgramServiceID(program, resources_), program_interface, name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceName(
GLuint program,
GLenum program_interface,
GLuint index,
std::string* name) {
CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint max_name_length = 0;
api()->glGetProgramInterfaceivFn(service_id, program_interface,
GL_MAX_NAME_LENGTH, &max_name_length);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
std::vector<GLchar> buffer(max_name_length, 0);
GLsizei length = 0;
api()->glGetProgramResourceNameFn(service_id, program_interface, index,
max_name_length, &length, buffer.data());
DCHECK_LE(length, max_name_length);
*name = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetRenderbufferParameteriv(
GLenum target,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetRenderbufferParameterivRobustANGLEFn(target, pname, bufsize,
length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetSamplerParameterfv(
GLuint sampler,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
api()->glGetSamplerParameterfvRobustANGLEFn(
GetSamplerServiceID(sampler, resources_), pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetSamplerParameteriv(
GLuint sampler,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetSamplerParameterivRobustANGLEFn(
GetSamplerServiceID(sampler, resources_), pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetShaderiv(GLuint shader,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetShaderivRobustANGLEFn(GetShaderServiceID(shader, resources_),
pname, bufsize, length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetShaderInfoLog(
GLuint shader,
std::string* infolog) {
CheckErrorCallbackState();
GLuint service_id = GetShaderServiceID(shader, resources_);
GLint info_log_len = 0;
api()->glGetShaderivFn(service_id, GL_INFO_LOG_LENGTH, &info_log_len);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
std::vector<char> buffer(info_log_len, 0);
GLsizei length = 0;
api()->glGetShaderInfoLogFn(service_id, info_log_len, &length, buffer.data());
DCHECK(length <= info_log_len);
*infolog = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetShaderPrecisionFormat(
GLenum shadertype,
GLenum precisiontype,
GLint* range,
GLint* precision,
int32_t* success) {
CheckErrorCallbackState();
api()->glGetShaderPrecisionFormatFn(shadertype, precisiontype, range,
precision);
*success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetShaderSource(
GLuint shader,
std::string* source) {
CheckErrorCallbackState();
GLuint shader_service_id = GetShaderServiceID(shader, resources_);
GLint shader_source_length = 0;
api()->glGetShaderivFn(shader_service_id, GL_SHADER_SOURCE_LENGTH,
&shader_source_length);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
std::vector<char> buffer(shader_source_length, 0);
GLsizei length = 0;
api()->glGetShaderSourceFn(shader_service_id, shader_source_length, &length,
buffer.data());
DCHECK(length <= shader_source_length);
*source = shader_source_length > 0 ? std::string(buffer.data(), length)
: std::string();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetString(GLenum name,
uint32_t bucket_id) {
std::string extensions;
const char* str = nullptr;
switch (name) {
case GL_VERSION:
str = GetServiceVersionString(feature_info_.get());
break;
case GL_SHADING_LANGUAGE_VERSION:
str = GetServiceShadingLanguageVersionString(feature_info_.get());
break;
case GL_EXTENSIONS: {
extensions = gfx::MakeExtensionString(feature_info_->extensions());
str = extensions.c_str();
break;
}
default:
str = reinterpret_cast<const char*>(api()->glGetStringFn(name));
break;
}
Bucket* bucket = CreateBucket(bucket_id);
bucket->SetFromString(str);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetSynciv(GLuint sync,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* values) {
api()->glGetSyncivFn(GetSyncServiceID(sync, resources_), pname, bufsize,
length, values);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetTexParameterfv(GLenum target,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
api()->glGetTexParameterfvRobustANGLEFn(target, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetTexParameteriv(GLenum target,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetTexParameterivRobustANGLEFn(target, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVarying(
GLuint program,
GLuint index,
GLsizei* size,
GLenum* type,
std::string* name,
int32_t* success) {
CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint transform_feedback_varying_max_length = 0;
api()->glGetProgramivFn(service_id, GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
&transform_feedback_varying_max_length);
if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
std::vector<char> name_buffer(transform_feedback_varying_max_length, 0);
GLsizei length = 0;
api()->glGetTransformFeedbackVaryingFn(service_id, index, name_buffer.size(),
&length, size, type,
name_buffer.data());
DCHECK(length <= transform_feedback_varying_max_length);
*name = length > 0 ? std::string(name_buffer.data(), length) : std::string();
*success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformBlockIndex(
GLuint program,
const char* name,
GLint* index) {
*index = api()->glGetUniformBlockIndexFn(
GetProgramServiceID(program, resources_), name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformfv(GLuint program,
GLint location,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
// GetUniform*RobustANGLE entry points expect bufsize in bytes like the entry
// points in GL_EXT_robustness
api()->glGetUniformfvRobustANGLEFn(GetProgramServiceID(program, resources_),
location, bufsize * sizeof(*params),
length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformiv(GLuint program,
GLint location,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
// GetUniform*RobustANGLE entry points expect bufsize in bytes like the entry
// points in GL_EXT_robustness
api()->glGetUniformivRobustANGLEFn(GetProgramServiceID(program, resources_),
location, bufsize * sizeof(*params),
length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformuiv(GLuint program,
GLint location,
GLsizei bufsize,
GLsizei* length,
GLuint* params) {
// GetUniform*RobustANGLE entry points expect bufsize in bytes like the entry
// points in GL_EXT_robustness
api()->glGetUniformuivRobustANGLEFn(GetProgramServiceID(program, resources_),
location, bufsize * sizeof(*params),
length, params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformIndices(
GLuint program,
GLsizei count,
const char* const* names,
GLsizei bufSize,
GLuint* indices) {
api()->glGetUniformIndicesFn(GetProgramServiceID(program, resources_), count,
names, indices);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformLocation(
GLuint program,
const char* name,
GLint* location) {
*location = api()->glGetUniformLocationFn(
GetProgramServiceID(program, resources_), name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetVertexAttribfv(GLuint index,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
api()->glGetVertexAttribfvRobustANGLEFn(index, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetVertexAttribiv(GLuint index,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetVertexAttribivRobustANGLEFn(index, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetVertexAttribIiv(GLuint index,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
api()->glGetVertexAttribIivRobustANGLEFn(index, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetVertexAttribIuiv(
GLuint index,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLuint* params) {
api()->glGetVertexAttribIuivRobustANGLEFn(index, pname, bufsize, length,
params);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetVertexAttribPointerv(
GLuint index,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLuint* pointer) {
std::array<void*, 1> temp_pointers{{nullptr}};
GLsizei temp_length = 0;
api()->glGetVertexAttribPointervRobustANGLEFn(
index, pname, static_cast<GLsizei>(temp_pointers.size()), &temp_length,
temp_pointers.data());
DCHECK(temp_length >= 0 &&
temp_length <= static_cast<GLsizei>(temp_pointers.size()) &&
temp_length <= bufsize);
for (GLsizei ii = 0; ii < temp_length; ii++) {
pointer[ii] =
static_cast<GLuint>(reinterpret_cast<uintptr_t>(temp_pointers[ii]));
}
*length = temp_length;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoHint(GLenum target, GLenum mode) {
api()->glHintFn(target, mode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoInvalidateFramebuffer(
GLenum target,
GLsizei count,
const volatile GLenum* attachments) {
// Validate that count is non-negative before allocating a vector
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<GLenum> attachments_copy(attachments, attachments + count);
if (IsEmulatedFramebufferBound(target)) {
// Update the attachment do the equivalent one in the emulated framebuffer
if (!ModifyAttachmentsForEmulatedFramebuffer(&attachments_copy)) {
InsertError(GL_INVALID_OPERATION, "Invalid attachment.");
return error::kNoError;
}
}
api()->glInvalidateFramebufferFn(target, count, attachments_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoInvalidateSubFramebuffer(
GLenum target,
GLsizei count,
const volatile GLenum* attachments,
GLint x,
GLint y,
GLsizei width,
GLsizei height) {
// Validate that count is non-negative before allocating a vector
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<GLenum> attachments_copy(attachments, attachments + count);
if (IsEmulatedFramebufferBound(target)) {
// Update the attachment do the equivalent one in the emulated framebuffer
if (!ModifyAttachmentsForEmulatedFramebuffer(&attachments_copy)) {
InsertError(GL_INVALID_OPERATION, "Invalid attachment.");
return error::kNoError;
}
}
api()->glInvalidateSubFramebufferFn(target, count, attachments_copy.data(), x,
y, width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsBuffer(GLuint buffer,
uint32_t* result) {
*result =
api()->glIsBufferFn(GetBufferServiceID(api(), buffer, resources_, false));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsEnabled(GLenum cap,
uint32_t* result) {
*result = api()->glIsEnabledFn(cap);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsEnablediOES(GLenum target,
GLuint index,
uint32_t* result) {
*result = api()->glIsEnablediOESFn(target, index);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsFramebuffer(GLuint framebuffer,
uint32_t* result) {
*result = api()->glIsFramebufferEXTFn(
GetFramebufferServiceID(api(), framebuffer, &framebuffer_id_map_, false));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsProgram(GLuint program,
uint32_t* result) {
*result = api()->glIsProgramFn(GetProgramServiceID(program, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsRenderbuffer(GLuint renderbuffer,
uint32_t* result) {
*result = api()->glIsRenderbufferEXTFn(
GetRenderbufferServiceID(api(), renderbuffer, resources_, false));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsSampler(GLuint sampler,
uint32_t* result) {
*result = api()->glIsSamplerFn(GetSamplerServiceID(sampler, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsShader(GLuint shader,
uint32_t* result) {
*result = api()->glIsShaderFn(GetShaderServiceID(shader, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsSync(GLuint sync,
uint32_t* result) {
*result = api()->glIsSyncFn(GetSyncServiceID(sync, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsTexture(GLuint texture,
uint32_t* result) {
*result = api()->glIsTextureFn(
GetTextureServiceID(api(), texture, resources_, false));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoIsTransformFeedback(
GLuint transformfeedback,
uint32_t* result) {
*result = api()->glIsTransformFeedbackFn(GetTransformFeedbackServiceID(
transformfeedback, &transform_feedback_id_map_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoLineWidth(GLfloat width) {
api()->glLineWidthFn(width);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoLinkProgram(GLuint program) {
TRACE_EVENT0("gpu", "GLES2DecoderPassthroughImpl::DoLinkProgram");
SCOPED_UMA_HISTOGRAM_TIMER("GPU.PassthroughDoLinkProgramTime");
GLuint program_service_id = GetProgramServiceID(program, resources_);
api()->glLinkProgramFn(program_service_id);
// Program linking can be very slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
linking_program_service_id_ = program_service_id;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMemoryBarrierEXT(
GLbitfield barriers) {
api()->glMemoryBarrierEXTFn(barriers);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMemoryBarrierByRegion(
GLbitfield barriers) {
api()->glMemoryBarrierByRegionFn(barriers);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMultiDrawBeginCHROMIUM(
GLsizei drawcount) {
if (drawcount < 0) {
InsertError(GL_INVALID_VALUE, "drawcount cannot be negative.");
return error::kNoError;
}
if (!multi_draw_manager_->Begin(drawcount)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMultiDrawEndCHROMIUM() {
MultiDrawManager::ResultData result;
if (!multi_draw_manager_->End(&result)) {
return error::kInvalidArguments;
}
switch (result.draw_function) {
case MultiDrawManager::DrawFunction::DrawArrays:
api()->glMultiDrawArraysANGLEFn(result.mode, result.firsts.data(),
result.counts.data(), result.drawcount);
return error::kNoError;
case MultiDrawManager::DrawFunction::DrawArraysInstanced:
api()->glMultiDrawArraysInstancedANGLEFn(
result.mode, result.firsts.data(), result.counts.data(),
result.instance_counts.data(), result.drawcount);
return error::kNoError;
case MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance:
api()->glMultiDrawArraysInstancedBaseInstanceANGLEFn(
result.mode, result.firsts.data(), result.counts.data(),
result.instance_counts.data(), result.baseinstances.data(),
result.drawcount);
return error::kNoError;
case MultiDrawManager::DrawFunction::DrawElements:
api()->glMultiDrawElementsANGLEFn(result.mode, result.counts.data(),
result.type, result.indices.data(),
result.drawcount);
return error::kNoError;
case MultiDrawManager::DrawFunction::DrawElementsInstanced:
api()->glMultiDrawElementsInstancedANGLEFn(
result.mode, result.counts.data(), result.type, result.indices.data(),
result.instance_counts.data(), result.drawcount);
return error::kNoError;
case MultiDrawManager::DrawFunction::
DrawElementsInstancedBaseVertexBaseInstance:
api()->glMultiDrawElementsInstancedBaseVertexBaseInstanceANGLEFn(
result.mode, result.counts.data(), result.type, result.indices.data(),
result.instance_counts.data(), result.basevertices.data(),
result.baseinstances.data(), result.drawcount);
return error::kNoError;
default:
NOTREACHED();
return error::kLostContext;
}
}
error::Error GLES2DecoderPassthroughImpl::DoPauseTransformFeedback() {
api()->glPauseTransformFeedbackFn();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPixelStorei(GLenum pname,
GLint param) {
api()->glPixelStoreiFn(pname, param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPolygonOffset(GLfloat factor,
GLfloat units) {
api()->glPolygonOffsetFn(factor, units);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoReadBuffer(GLenum src) {
api()->glReadBufferFn(src);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoReadPixels(GLint x,
GLint y,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
GLsizei bufsize,
GLsizei* length,
GLsizei* columns,
GLsizei* rows,
void* pixels,
int32_t* success) {
CheckErrorCallbackState();
ScopedPackStateRowLengthReset reset_row_length(
api(), bufsize != 0 && feature_info_->gl_version_info().is_es3);
api()->glReadPixelsRobustANGLEFn(x, y, width, height, format, type, bufsize,
length, columns, rows, pixels);
*success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoReadPixelsAsync(
GLint x,
GLint y,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
GLsizei bufsize,
GLsizei* length,
GLsizei* columns,
GLsizei* rows,
uint32_t pixels_shm_id,
uint32_t pixels_shm_offset,
uint32_t result_shm_id,
uint32_t result_shm_offset) {
DCHECK(feature_info_->feature_flags().use_async_readpixels &&
bound_buffers_[GL_PIXEL_PACK_BUFFER] == 0);
CheckErrorCallbackState();
ScopedPackStateRowLengthReset reset_row_length(
api(), bufsize != 0 && feature_info_->gl_version_info().is_es3);
PendingReadPixels pending_read_pixels;
pending_read_pixels.pixels_shm_id = pixels_shm_id;
pending_read_pixels.pixels_shm_offset = pixels_shm_offset;
pending_read_pixels.result_shm_id = result_shm_id;
pending_read_pixels.result_shm_offset = result_shm_offset;
api()->glGenBuffersARBFn(1, &pending_read_pixels.buffer_service_id);
api()->glBindBufferFn(GL_PIXEL_PACK_BUFFER_ARB,
pending_read_pixels.buffer_service_id);
// GL_STREAM_READ is not available until ES3.
const GLenum usage_hint = feature_info_->gl_version_info().IsAtLeastGLES(3, 0)
? GL_STREAM_READ
: GL_STATIC_DRAW;
const uint32_t bytes_per_pixel =
GLES2Util::ComputeImageGroupSize(format, type);
if (bytes_per_pixel == 0) {
InsertError(GL_INVALID_ENUM, "Invalid ReadPixels format or type.");
return error::kNoError;
}
if (width < 0 || height < 0) {
InsertError(GL_INVALID_VALUE, "Width and height cannot be negative.");
return error::kNoError;
}
if (!base::CheckMul(bytes_per_pixel, width, height)
.AssignIfValid(&pending_read_pixels.pixels_size)) {
return error::kOutOfBounds;
}
api()->glBufferDataFn(GL_PIXEL_PACK_BUFFER_ARB,
pending_read_pixels.pixels_size, nullptr, usage_hint);
// No need to worry about ES3 pixel pack parameters, because no
// PIXEL_PACK_BUFFER is bound, and all these settings haven't been
// sent to GL.
api()->glReadPixelsFn(x, y, width, height, format, type, nullptr);
api()->glBindBufferFn(GL_PIXEL_PACK_BUFFER_ARB, 0);
// Test for errors now before creating a fence
if (CheckErrorCallbackState()) {
return error::kNoError;
}
pending_read_pixels.fence = gl::GLFence::Create();
if (CheckErrorCallbackState()) {
return error::kNoError;
}
pending_read_pixels_.push_back(std::move(pending_read_pixels));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoReleaseShaderCompiler() {
api()->glReleaseShaderCompilerFn();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoRenderbufferStorage(
GLenum target,
GLenum internalformat,
GLsizei width,
GLsizei height) {
api()->glRenderbufferStorageEXTFn(target, internalformat, width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoResumeTransformFeedback() {
api()->glResumeTransformFeedbackFn();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSampleCoverage(GLclampf value,
GLboolean invert) {
api()->glSampleCoverageFn(value, invert);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSamplerParameterf(GLuint sampler,
GLenum pname,
GLfloat param) {
api()->glSamplerParameterfFn(GetSamplerServiceID(sampler, resources_), pname,
param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSamplerParameterfv(
GLuint sampler,
GLenum pname,
const volatile GLfloat* params) {
std::array<GLfloat, 1> params_copy{{params[0]}};
api()->glSamplerParameterfvRobustANGLEFn(
GetSamplerServiceID(sampler, resources_), pname,
static_cast<GLsizei>(params_copy.size()), params_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSamplerParameteri(GLuint sampler,
GLenum pname,
GLint param) {
api()->glSamplerParameteriFn(GetSamplerServiceID(sampler, resources_), pname,
param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSamplerParameteriv(
GLuint sampler,
GLenum pname,
const volatile GLint* params) {
std::array<GLint, 1> params_copy{{params[0]}};
api()->glSamplerParameterivRobustANGLEFn(
GetSamplerServiceID(sampler, resources_), pname,
static_cast<GLsizei>(params_copy.size()), params_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScissor(GLint x,
GLint y,
GLsizei width,
GLsizei height) {
CheckErrorCallbackState();
gfx::Vector2d scissor_offset = GetSurfaceDrawOffset();
api()->glScissorFn(x + scissor_offset.x(), y + scissor_offset.y(), width,
height);
if (CheckErrorCallbackState()) {
// Skip any state tracking updates if an error was generated
return error::kNoError;
}
AssignGLRectangle(scissor_, x, y, width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoShaderBinary(GLsizei n,
const GLuint* shaders,
GLenum binaryformat,
const void* binary,
GLsizei length) {
std::vector<GLuint> service_shaders(n, 0);
for (GLsizei i = 0; i < n; i++) {
service_shaders[i] = GetShaderServiceID(shaders[i], resources_);
}
api()->glShaderBinaryFn(n, service_shaders.data(), binaryformat, binary,
length);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoShaderSource(GLuint shader,
GLsizei count,
const char** string,
const GLint* length) {
api()->glShaderSourceFn(GetShaderServiceID(shader, resources_), count, string,
length);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilFunc(GLenum func,
GLint ref,
GLuint mask) {
api()->glStencilFuncFn(func, ref, mask);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilFuncSeparate(GLenum face,
GLenum func,
GLint ref,
GLuint mask) {
api()->glStencilFuncSeparateFn(face, func, ref, mask);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilMask(GLuint mask) {
api()->glStencilMaskFn(mask);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilMaskSeparate(GLenum face,
GLuint mask) {
api()->glStencilMaskSeparateFn(face, mask);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilOp(GLenum fail,
GLenum zfail,
GLenum zpass) {
api()->glStencilOpFn(fail, zfail, zpass);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoStencilOpSeparate(GLenum face,
GLenum fail,
GLenum zfail,
GLenum zpass) {
api()->glStencilOpSeparateFn(face, fail, zfail, zpass);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexImage2D(GLenum target,
GLint level,
GLint internalformat,
GLsizei width,
GLsizei height,
GLint border,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, false);
CheckErrorCallbackState();
api()->glTexImage2DRobustANGLEFn(target, level, internalformat, width, height,
border, format, type, image_size, pixels);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexImage3D(GLenum target,
GLint level,
GLint internalformat,
GLsizei width,
GLsizei height,
GLsizei depth,
GLint border,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, true);
CheckErrorCallbackState();
api()->glTexImage3DRobustANGLEFn(target, level, internalformat, width, height,
depth, border, format, type, image_size,
pixels);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexParameterf(GLenum target,
GLenum pname,
GLfloat param) {
// Don't allow clients to modify the resource initialization state.
if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
InsertError(GL_INVALID_ENUM, "Invalid enum.");
return error::kNoError;
}
api()->glTexParameterfFn(target, pname, param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexParameterfv(
GLenum target,
GLenum pname,
const volatile GLfloat* params) {
// Don't allow clients to modify the resource initialization state.
if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
InsertError(GL_INVALID_ENUM, "Invalid enum.");
return error::kNoError;
}
std::array<GLfloat, 1> params_copy{{params[0]}};
api()->glTexParameterfvRobustANGLEFn(target, pname,
static_cast<GLsizei>(params_copy.size()),
params_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexParameteri(GLenum target,
GLenum pname,
GLint param) {
// Don't allow clients to modify the resource initialization state.
if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
InsertError(GL_INVALID_ENUM, "Invalid enum.");
return error::kNoError;
}
api()->glTexParameteriFn(target, pname, param);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexParameteriv(
GLenum target,
GLenum pname,
const volatile GLint* params) {
// Don't allow clients to modify the resource initialization state.
if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
InsertError(GL_INVALID_ENUM, "Invalid enum.");
return error::kNoError;
}
std::array<GLint, 1> params_copy{{params[0]}};
api()->glTexParameterivRobustANGLEFn(target, pname,
static_cast<GLsizei>(params_copy.size()),
params_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexStorage3D(GLenum target,
GLsizei levels,
GLenum internalFormat,
GLsizei width,
GLsizei height,
GLsizei depth) {
CheckErrorCallbackState();
api()->glTexStorage3DFn(target, levels, internalFormat, width, height, depth);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexSubImage2D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, false);
api()->glTexSubImage2DRobustANGLEFn(target, level, xoffset, yoffset, width,
height, format, type, image_size, pixels);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexSubImage3D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLsizei width,
GLsizei height,
GLsizei depth,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, true);
api()->glTexSubImage3DRobustANGLEFn(target, level, xoffset, yoffset, zoffset,
width, height, depth, format, type,
image_size, pixels);
// Texture data upload can be slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTransformFeedbackVaryings(
GLuint program,
GLsizei count,
const char** varyings,
GLenum buffermode) {
api()->glTransformFeedbackVaryingsFn(GetProgramServiceID(program, resources_),
count, varyings, buffermode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1f(GLint location,
GLfloat x) {
api()->glUniform1fFn(location, x);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1fv(
GLint location,
GLsizei count,
const volatile GLfloat* v) {
api()->glUniform1fvFn(location, count, const_cast<const GLfloat*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1i(GLint location, GLint x) {
api()->glUniform1iFn(location, x);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1iv(
GLint location,
GLsizei count,
const volatile GLint* v) {
api()->glUniform1ivFn(location, count, const_cast<const GLint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1ui(GLint location,
GLuint x) {
api()->glUniform1uiFn(location, x);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform1uiv(
GLint location,
GLsizei count,
const volatile GLuint* v) {
api()->glUniform1uivFn(location, count, const_cast<const GLuint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2f(GLint location,
GLfloat x,
GLfloat y) {
api()->glUniform2fFn(location, x, y);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2fv(
GLint location,
GLsizei count,
const volatile GLfloat* v) {
api()->glUniform2fvFn(location, count, const_cast<const GLfloat*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2i(GLint location,
GLint x,
GLint y) {
api()->glUniform2iFn(location, x, y);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2iv(
GLint location,
GLsizei count,
const volatile GLint* v) {
api()->glUniform2ivFn(location, count, const_cast<const GLint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2ui(GLint location,
GLuint x,
GLuint y) {
api()->glUniform2uiFn(location, x, y);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform2uiv(
GLint location,
GLsizei count,
const volatile GLuint* v) {
api()->glUniform2uivFn(location, count, const_cast<const GLuint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3f(GLint location,
GLfloat x,
GLfloat y,
GLfloat z) {
api()->glUniform3fFn(location, x, y, z);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3fv(
GLint location,
GLsizei count,
const volatile GLfloat* v) {
api()->glUniform3fvFn(location, count, const_cast<const GLfloat*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3i(GLint location,
GLint x,
GLint y,
GLint z) {
api()->glUniform3iFn(location, x, y, z);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3iv(
GLint location,
GLsizei count,
const volatile GLint* v) {
api()->glUniform3ivFn(location, count, const_cast<const GLint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3ui(GLint location,
GLuint x,
GLuint y,
GLuint z) {
api()->glUniform3uiFn(location, x, y, z);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform3uiv(
GLint location,
GLsizei count,
const volatile GLuint* v) {
api()->glUniform3uivFn(location, count, const_cast<const GLuint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4f(GLint location,
GLfloat x,
GLfloat y,
GLfloat z,
GLfloat w) {
api()->glUniform4fFn(location, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4fv(
GLint location,
GLsizei count,
const volatile GLfloat* v) {
api()->glUniform4fvFn(location, count, const_cast<const GLfloat*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4i(GLint location,
GLint x,
GLint y,
GLint z,
GLint w) {
api()->glUniform4iFn(location, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4iv(
GLint location,
GLsizei count,
const volatile GLint* v) {
api()->glUniform4ivFn(location, count, const_cast<const GLint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4ui(GLint location,
GLuint x,
GLuint y,
GLuint z,
GLuint w) {
api()->glUniform4uiFn(location, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniform4uiv(
GLint location,
GLsizei count,
const volatile GLuint* v) {
api()->glUniform4uivFn(location, count, const_cast<const GLuint*>(v));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformBlockBinding(
GLuint program,
GLuint index,
GLuint binding) {
api()->glUniformBlockBindingFn(GetProgramServiceID(program, resources_),
index, binding);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix2fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix2fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix2x3fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix2x3fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix2x4fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix2x4fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix3fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix3fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix3x2fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix3x2fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix3x4fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix3x4fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix4fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix4fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix4x2fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix4x2fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUniformMatrix4x3fv(
GLint location,
GLsizei count,
GLboolean transpose,
const volatile GLfloat* value) {
api()->glUniformMatrix4x3fvFn(location, count, transpose,
const_cast<const GLfloat*>(value));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUseProgram(GLuint program) {
api()->glUseProgramFn(GetProgramServiceID(program, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoValidateProgram(GLuint program) {
api()->glValidateProgramFn(GetProgramServiceID(program, resources_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib1f(GLuint indx,
GLfloat x) {
api()->glVertexAttrib1fFn(indx, x);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib1fv(
GLuint indx,
const volatile GLfloat* values) {
api()->glVertexAttrib1fvFn(indx, const_cast<const GLfloat*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib2f(GLuint indx,
GLfloat x,
GLfloat y) {
api()->glVertexAttrib2fFn(indx, x, y);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib2fv(
GLuint indx,
const volatile GLfloat* values) {
api()->glVertexAttrib2fvFn(indx, const_cast<const GLfloat*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib3f(GLuint indx,
GLfloat x,
GLfloat y,
GLfloat z) {
api()->glVertexAttrib3fFn(indx, x, y, z);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib3fv(
GLuint indx,
const volatile GLfloat* values) {
api()->glVertexAttrib3fvFn(indx, const_cast<const GLfloat*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib4f(GLuint indx,
GLfloat x,
GLfloat y,
GLfloat z,
GLfloat w) {
api()->glVertexAttrib4fFn(indx, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttrib4fv(
GLuint indx,
const volatile GLfloat* values) {
api()->glVertexAttrib4fvFn(indx, const_cast<const GLfloat*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribI4i(GLuint indx,
GLint x,
GLint y,
GLint z,
GLint w) {
api()->glVertexAttribI4iFn(indx, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribI4iv(
GLuint indx,
const volatile GLint* values) {
api()->glVertexAttribI4ivFn(indx, const_cast<const GLint*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribI4ui(GLuint indx,
GLuint x,
GLuint y,
GLuint z,
GLuint w) {
api()->glVertexAttribI4uiFn(indx, x, y, z, w);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribI4uiv(
GLuint indx,
const volatile GLuint* values) {
api()->glVertexAttribI4uivFn(indx, const_cast<const GLuint*>(values));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribIPointer(
GLuint indx,
GLint size,
GLenum type,
GLsizei stride,
const void* ptr) {
api()->glVertexAttribIPointerFn(indx, size, type, stride, ptr);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribPointer(
GLuint indx,
GLint size,
GLenum type,
GLboolean normalized,
GLsizei stride,
const void* ptr) {
api()->glVertexAttribPointerFn(indx, size, type, normalized, stride, ptr);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoViewport(GLint x,
GLint y,
GLsizei width,
GLsizei height) {
CheckErrorCallbackState();
gfx::Vector2d viewport_offset = GetSurfaceDrawOffset();
api()->glViewportFn(x + viewport_offset.x(), y + viewport_offset.y(), width,
height);
if (CheckErrorCallbackState()) {
// Skip any state tracking updates if an error was generated. Viewport may
// have been out of bounds.
return error::kNoError;
}
AssignGLRectangle(viewport_, x, y, width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoWaitSync(GLuint sync,
GLbitfield flags,
GLuint64 timeout) {
api()->glWaitSyncFn(GetSyncServiceID(sync, resources_), flags, timeout);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlitFramebufferCHROMIUM(
GLint srcX0,
GLint srcY0,
GLint srcX1,
GLint srcY1,
GLint dstX0,
GLint dstY0,
GLint dstX1,
GLint dstY1,
GLbitfield mask,
GLenum filter) {
DCHECK(feature_info_->feature_flags().chromium_framebuffer_multisample);
api()->glBlitFramebufferFn(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1,
dstY1, mask, filter);
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleCHROMIUM(
GLenum target,
GLsizei samples,
GLenum internalformat,
GLsizei width,
GLsizei height) {
DCHECK(feature_info_->feature_flags().chromium_framebuffer_multisample);
api()->glRenderbufferStorageMultisampleFn(target, samples, internalformat,
width, height);
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleAdvancedAMD(
GLenum target,
GLsizei samples,
GLsizei storageSamples,
GLenum internalformat,
GLsizei width,
GLsizei height) {
DCHECK(feature_info_->feature_flags().amd_framebuffer_multisample_advanced);
api()->glRenderbufferStorageMultisampleAdvancedAMDFn(
target, samples, storageSamples, internalformat, width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleEXT(
GLenum target,
GLsizei samples,
GLenum internalformat,
GLsizei width,
GLsizei height) {
DCHECK(feature_info_->feature_flags().multisampled_render_to_texture);
api()->glRenderbufferStorageMultisampleEXTFn(target, samples, internalformat,
width, height);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFramebufferTexture2DMultisampleEXT(
GLenum target,
GLenum attachment,
GLenum textarget,
GLuint texture,
GLint level,
GLsizei samples) {
DCHECK(feature_info_->feature_flags().multisampled_render_to_texture);
if (IsEmulatedFramebufferBound(target)) {
InsertError(GL_INVALID_OPERATION,
"Cannot change the attachments of the default framebuffer.");
return error::kNoError;
}
BindPendingImageForClientIDIfNeeded(texture);
api()->glFramebufferTexture2DMultisampleEXTFn(
target, attachment, textarget,
GetTextureServiceID(api(), texture, resources_, false), level, samples);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DEXT(
GLenum target,
GLsizei levels,
GLenum internalFormat,
GLsizei width,
GLsizei height) {
CheckErrorCallbackState();
api()->glTexStorage2DEXTFn(target, levels, internalFormat, width, height);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
UpdateTextureSizeFromTarget(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM(
GLenum target,
GLenum internalFormat,
GLenum bufferUsage,
GLsizei width,
GLsizei height) {
TextureTarget target_enum = GLenumToTextureTarget(target);
if (target_enum == TextureTarget::kCubeMap ||
target_enum == TextureTarget::kUnkown) {
InsertError(GL_INVALID_ENUM, "Invalid target");
return error::kNoError;
}
const BoundTexture& bound_texture =
bound_textures_[static_cast<size_t>(target_enum)][active_texture_unit_];
if (bound_texture.texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "No texture bound");
return error::kNoError;
}
gfx::BufferFormat buffer_format;
if (!GetGFXBufferFormat(internalFormat, &buffer_format)) {
InsertError(GL_INVALID_ENUM, "Invalid buffer format");
return error::kNoError;
}
gfx::BufferUsage buffer_usage;
if (!GetGFXBufferUsage(bufferUsage, &buffer_usage)) {
InsertError(GL_INVALID_ENUM, "Invalid buffer usage");
return error::kNoError;
}
if (!GetContextGroup()->image_factory()) {
InsertError(GL_INVALID_OPERATION, "Cannot create GL image");
return error::kNoError;
}
bool is_cleared;
scoped_refptr<gl::GLImage> image =
GetContextGroup()->image_factory()->CreateAnonymousImage(
gfx::Size(width, height), buffer_format, buffer_usage,
gpu::kNullSurfaceHandle, &is_cleared);
if (!image || !image->BindTexImage(target)) {
InsertError(GL_INVALID_OPERATION, "Failed to create or bind GL Image");
return error::kNoError;
}
bound_texture.texture->SetLevelImage(target, 0, image.get());
// Target is already validated
UpdateTextureSizeFromTarget(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGenQueriesEXT(
GLsizei n,
volatile GLuint* queries) {
return GenHelper(
n, queries, &query_id_map_, [this](GLsizei n, GLuint* queries) {
if (feature_info_->feature_flags().occlusion_query_boolean) {
// glGenQueries is not loaded unless GL_EXT_occlusion_query_boolean is
// present. All queries must be emulated so they don't need to be
// generated.
api()->glGenQueriesFn(n, queries);
} else {
for (GLsizei i = 0; i < n; i++) {
queries[i] = 0;
}
}
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteQueriesEXT(
GLsizei n,
const volatile GLuint* queries) {
// Validate n is non-negative before allcoating a vector of size n
if (n < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<GLuint> queries_copy(queries, queries + n);
// If any of these queries are pending or active, remove them from the lists
for (GLuint query_client_id : queries_copy) {
GLuint query_service_id = 0;
if (!query_id_map_.GetServiceID(query_client_id, &query_service_id) ||
query_service_id == 0) {
continue;
}
QueryInfo query_info = query_info_map_[query_service_id];
query_info_map_.erase(query_service_id);
if (query_info.type == GL_NONE) {
// Query was never started
continue;
}
auto active_queries_iter = active_queries_.find(query_info.type);
if (active_queries_iter != active_queries_.end()) {
active_queries_.erase(active_queries_iter);
}
RemovePendingQuery(query_service_id);
}
return DeleteHelper(
queries_copy.size(), queries_copy.data(), &query_id_map_,
[this](GLsizei n, GLuint* queries) {
if (feature_info_->feature_flags().occlusion_query_boolean) {
// glDeleteQueries is not loaded unless GL_EXT_occlusion_query_boolean
// is present. All queries must be emulated so they don't need to be
// deleted.
api()->glDeleteQueriesFn(n, queries);
}
});
}
error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT(
GLuint id,
GLenum target,
int32_t sync_shm_id,
uint32_t sync_shm_offset,
uint32_t submit_count) {
scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id);
if (!buffer)
return error::kInvalidArguments;
QuerySync* sync = static_cast<QuerySync*>(
buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync)));
if (!sync)
return error::kOutOfBounds;
GLuint service_id = GetQueryServiceID(id, &query_id_map_);
if (IsEmulatedQueryTarget(target)) {
DCHECK_EQ(target,
static_cast<GLenum>(GL_COMMANDS_ISSUED_TIMESTAMP_CHROMIUM));
} else {
// glQueryCounter is not loaded unless GL_EXT_disjoint_timer_query is present
if (!feature_info_->feature_flags().ext_disjoint_timer_query) {
InsertError(GL_INVALID_ENUM, "Invalid query target.");
return error::kNoError;
}
// Flush all previous errors
CheckErrorCallbackState();
api()->glQueryCounterFn(service_id, target);
// Check if a new error was generated
if (CheckErrorCallbackState()) {
return error::kNoError;
}
}
QueryInfo* query_info = &query_info_map_[service_id];
query_info->type = target;
// Make sure to stop tracking this query if it was still pending a result from
// a previous glEndQuery
RemovePendingQuery(service_id);
PendingQuery pending_query;
pending_query.target = target;
pending_query.service_id = service_id;
pending_query.shm = std::move(buffer);
pending_query.sync = sync;
pending_query.submit_count = submit_count;
if (target == GL_COMMANDS_ISSUED_TIMESTAMP_CHROMIUM)
pending_query.commands_issued_timestamp = base::TimeTicks::Now();
pending_queries_.push_back(std::move(pending_query));
return ProcessQueries(false);
}
error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
GLenum target,
GLuint id,
int32_t sync_shm_id,
uint32_t sync_shm_offset) {
GLuint service_id = GetQueryServiceID(id, &query_id_map_);
QueryInfo* query_info = &query_info_map_[service_id];
scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id);
if (!buffer)
return error::kInvalidArguments;
QuerySync* sync = static_cast<QuerySync*>(
buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync)));
if (!sync)
return error::kOutOfBounds;
if (target == GL_PROGRAM_COMPLETION_QUERY_CHROMIUM) {
linking_program_service_id_ = 0u;
}
if (IsEmulatedQueryTarget(target)) {
if (active_queries_.find(target) != active_queries_.end()) {
InsertError(GL_INVALID_OPERATION, "Query already active on target.");
return error::kNoError;
}
if (id == 0) {
InsertError(GL_INVALID_OPERATION, "Query id is 0.");
return error::kNoError;
}
if (query_info->type != GL_NONE && query_info->type != target) {
InsertError(GL_INVALID_OPERATION,
"Query type does not match the target.");
return error::kNoError;
}
} else {
// glBeginQuery is not loaded unless GL_EXT_occlusion_query_boolean is
// present
if (!feature_info_->feature_flags().occlusion_query_boolean) {
InsertError(GL_INVALID_ENUM, "Invalid query target.");
return error::kNoError;
}
// Flush all previous errors
CheckErrorCallbackState();
api()->glBeginQueryFn(target, service_id);
// Check if a new error was generated
if (CheckErrorCallbackState()) {
return error::kNoError;
}
}
query_info->type = target;
// Make sure to stop tracking this query if it was still pending a result from
// a previous glEndQuery
RemovePendingQuery(service_id);
ActiveQuery query;
query.service_id = service_id;
query.shm = std::move(buffer);
query.sync = sync;
if (target == GL_COMMANDS_ISSUED_CHROMIUM)
query.command_processing_start_time = base::TimeTicks::Now();
active_queries_[target] = std::move(query);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBeginTransformFeedback(
GLenum primitivemode) {
api()->glBeginTransformFeedbackFn(primitivemode);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target,
uint32_t submit_count) {
if (IsEmulatedQueryTarget(target)) {
auto active_query_iter = active_queries_.find(target);
if (active_query_iter == active_queries_.end()) {
InsertError(GL_INVALID_OPERATION, "No active query on target.");
return error::kNoError;
}
if (target == GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM &&
!pending_read_pixels_.empty()) {
GLuint query_service_id = active_query_iter->second.service_id;
pending_read_pixels_.back().waiting_async_pack_queries.insert(
query_service_id);
}
} else {
// glEndQuery is not loaded unless GL_EXT_occlusion_query_boolean is present
if (!feature_info_->feature_flags().occlusion_query_boolean) {
InsertError(GL_INVALID_ENUM, "Invalid query target.");
return error::kNoError;
}
// Flush all previous errors
CheckErrorCallbackState();
api()->glEndQueryFn(target);
// Check if a new error was generated
if (CheckErrorCallbackState()) {
return error::kNoError;
}
}
DCHECK(active_queries_.find(target) != active_queries_.end());
ActiveQuery active_query = std::move(active_queries_[target]);
active_queries_.erase(target);
PendingQuery pending_query;
pending_query.target = target;
pending_query.service_id = active_query.service_id;
pending_query.shm = std::move(active_query.shm);
pending_query.sync = active_query.sync;
pending_query.submit_count = submit_count;
switch (target) {
case GL_COMMANDS_COMPLETED_CHROMIUM:
pending_query.commands_completed_fence = gl::GLFence::Create();
break;
case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM:
pending_query.buffer_shadow_update_fence = gl::GLFence::Create();
pending_query.buffer_shadow_updates = std::move(buffer_shadow_updates_);
buffer_shadow_updates_.clear();
break;
case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
pending_query.program_service_id = linking_program_service_id_;
break;
case GL_COMMANDS_ISSUED_CHROMIUM:
pending_query.commands_issued_time =
active_query.active_time +
(base::TimeTicks::Now() - active_query.command_processing_start_time);
break;
default:
break;
}
pending_queries_.push_back(std::move(pending_query));
return ProcessQueries(false);
}
error::Error GLES2DecoderPassthroughImpl::DoEndTransformFeedback() {
api()->glEndTransformFeedbackFn();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSetDisjointValueSyncCHROMIUM(
DisjointValueSync* sync) {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoInsertEventMarkerEXT(
GLsizei length,
const char* marker) {
api()->glInsertEventMarkerEXTFn(length, marker);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPushGroupMarkerEXT(
GLsizei length,
const char* marker) {
api()->glPushGroupMarkerEXTFn(length, marker);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPopGroupMarkerEXT() {
api()->glPopGroupMarkerEXTFn();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGenVertexArraysOES(
GLsizei n,
volatile GLuint* arrays) {
return GenHelper(n, arrays, &vertex_array_id_map_,
[this](GLsizei n, GLuint* arrays) {
api()->glGenVertexArraysOESFn(n, arrays);
});
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteVertexArraysOES(
GLsizei n,
const volatile GLuint* arrays) {
return DeleteHelper(n, arrays, &vertex_array_id_map_,
[this](GLsizei n, GLuint* arrays) {
api()->glDeleteVertexArraysOESFn(n, arrays);
});
}
error::Error GLES2DecoderPassthroughImpl::DoIsVertexArrayOES(GLuint array,
uint32_t* result) {
*result = api()->glIsVertexArrayOESFn(
GetVertexArrayServiceID(array, &vertex_array_id_map_));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindVertexArrayOES(GLuint array) {
api()->glBindVertexArrayOESFn(
GetVertexArrayServiceID(array, &vertex_array_id_map_));
bound_element_array_buffer_dirty_ = true;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
GLbitfield flags) {
ca_layer_shared_state_ = nullptr;
if (offscreen_) {
if (offscreen_single_buffer_) {
return error::kNoError;
}
DCHECK(emulated_back_buffer_);
// Make sure the emulated front buffer is allocated and the correct size
if (emulated_front_buffer_ &&
emulated_front_buffer_->size != emulated_back_buffer_->size) {
emulated_front_buffer_->Destroy(true);
emulated_front_buffer_ = nullptr;
}
if (emulated_front_buffer_ == nullptr) {
if (!available_color_textures_.empty()) {
emulated_front_buffer_ = std::move(available_color_textures_.back());
available_color_textures_.pop_back();
} else {
emulated_front_buffer_ = std::make_unique<EmulatedColorBuffer>(
api(), emulated_default_framebuffer_format_);
emulated_front_buffer_->Resize(emulated_back_buffer_->size);
}
}
DCHECK_EQ(emulated_front_buffer_->size, emulated_back_buffer_->size);
if (emulated_default_framebuffer_format_.samples > 0) {
// Resolve the multisampled renderbuffer into the emulated_front_buffer_
emulated_back_buffer_->Blit(emulated_front_buffer_.get());
} else {
DCHECK(emulated_back_buffer_->color_texture != nullptr);
// If the offscreen buffer should be preserved, copy the old backbuffer
// into the new one
if (offscreen_target_buffer_preserved_) {
emulated_back_buffer_->Blit(emulated_front_buffer_.get());
}
// Swap the front and back buffer textures and update the framebuffer
// attachment.
std::unique_ptr<EmulatedColorBuffer> old_front_buffer =
std::move(emulated_front_buffer_);
emulated_front_buffer_ =
emulated_back_buffer_->SetColorBuffer(std::move(old_front_buffer));
}
return error::kNoError;
}
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
"gpu", "AsyncSwapBuffers",
TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->SwapBuffersAsync(
base::BindOnce(
&GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
weak_ptr_factory_.GetWeakPtr(), "SwapBuffers", swap_id),
base::DoNothing());
return error::kNoError;
} else {
return CheckSwapBuffersResult(surface_->SwapBuffers(base::DoNothing()),
"SwapBuffers");
}
}
error::Error GLES2DecoderPassthroughImpl::DoGetMaxValueInBufferCHROMIUM(
GLuint buffer_id,
GLsizei count,
GLenum type,
GLuint offset,
uint32_t* result) {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEnableFeatureCHROMIUM(
const char* feature) {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
GLenum target,
GLintptr offset,
GLsizeiptr size,
GLbitfield access,
void* ptr,
int32_t data_shm_id,
uint32_t data_shm_offset,
uint32_t* result) {
CheckErrorCallbackState();
GLbitfield filtered_access = access;
// Always filter out GL_MAP_UNSYNCHRONIZED_BIT to get rid of undefined
// behaviors.
filtered_access = (filtered_access & ~GL_MAP_UNSYNCHRONIZED_BIT);
if ((filtered_access & GL_MAP_INVALIDATE_BUFFER_BIT) != 0) {
// To be on the safe side, always map GL_MAP_INVALIDATE_BUFFER_BIT to
// GL_MAP_INVALIDATE_RANGE_BIT.
filtered_access = (filtered_access & ~GL_MAP_INVALIDATE_BUFFER_BIT);
filtered_access = (filtered_access | GL_MAP_INVALIDATE_RANGE_BIT);
}
if ((filtered_access & GL_MAP_INVALIDATE_RANGE_BIT) == 0) {
// If this user intends to use this buffer without invalidating the data, we
// need to also add GL_MAP_READ_BIT to preserve the original data when
// copying it to shared memory.
filtered_access = (filtered_access | GL_MAP_READ_BIT);
}
void* mapped_ptr =
api()->glMapBufferRangeFn(target, offset, size, filtered_access);
if (CheckErrorCallbackState() || mapped_ptr == nullptr) {
// Had an error while mapping, don't copy any data
*result = 0;
return error::kNoError;
}
if ((filtered_access & GL_MAP_INVALIDATE_RANGE_BIT) == 0) {
memcpy(ptr, mapped_ptr, size);
}
// Track the mapping of this buffer so that data can be synchronized when it
// is unmapped
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
if (target == GL_ELEMENT_ARRAY_BUFFER) {
LazilyUpdateCurrentlyBoundElementArrayBuffer();
}
GLuint client_buffer = bound_buffers_.at(target);
MappedBuffer mapped_buffer_info;
mapped_buffer_info.size = size;
mapped_buffer_info.original_access = access;
mapped_buffer_info.filtered_access = filtered_access;
mapped_buffer_info.map_ptr = static_cast<uint8_t*>(mapped_ptr);
mapped_buffer_info.data_shm_id = data_shm_id;
mapped_buffer_info.data_shm_offset = data_shm_offset;
DCHECK(resources_->mapped_buffer_map.find(client_buffer) ==
resources_->mapped_buffer_map.end());
resources_->mapped_buffer_map.insert(
std::make_pair(client_buffer, mapped_buffer_info));
*result = 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
if (target == GL_ELEMENT_ARRAY_BUFFER) {
LazilyUpdateCurrentlyBoundElementArrayBuffer();
}
auto bound_buffers_iter = bound_buffers_.find(target);
if (bound_buffers_iter == bound_buffers_.end()) {
InsertError(GL_INVALID_ENUM, "Invalid buffer target.");
return error::kNoError;
}
if (bound_buffers_iter->second == 0) {
InsertError(GL_INVALID_OPERATION, "No buffer bound to this target.");
return error::kNoError;
}
GLuint client_buffer = bound_buffers_iter->second;
auto mapped_buffer_info_iter =
resources_->mapped_buffer_map.find(client_buffer);
if (mapped_buffer_info_iter == resources_->mapped_buffer_map.end()) {
InsertError(GL_INVALID_OPERATION, "Buffer is not mapped.");
return error::kNoError;
}
const MappedBuffer& map_info = mapped_buffer_info_iter->second;
if ((map_info.filtered_access & GL_MAP_WRITE_BIT) != 0 &&
(map_info.filtered_access & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
uint8_t* mem = GetSharedMemoryAs<uint8_t*>(
map_info.data_shm_id, map_info.data_shm_offset, map_info.size);
if (!mem) {
return error::kOutOfBounds;
}
memcpy(map_info.map_ptr, mem, map_info.size);
}
api()->glUnmapBufferFn(target);
resources_->mapped_buffer_map.erase(mapped_buffer_info_iter);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(
GLuint width,
GLuint height,
GLfloat scale_factor,
gfx::ColorSpace color_space,
GLboolean alpha) {
// gfx::Size uses integers, make sure width and height do not overflow
static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
static const GLuint kMaxDimension =
static_cast<GLuint>(std::numeric_limits<int>::max());
gfx::Size safe_size(base::clamp(width, 1U, kMaxDimension),
base::clamp(height, 1U, kMaxDimension));
if (offscreen_) {
if (!ResizeOffscreenFramebuffer(safe_size)) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because "
<< "ResizeOffscreenFramebuffer failed.";
return error::kLostContext;
}
} else {
if (!surface_->Resize(safe_size, scale_factor, color_space, !!alpha)) {
LOG(ERROR)
<< "GLES2DecoderPassthroughImpl: Context lost because resize failed.";
return error::kLostContext;
}
DCHECK(context_->IsCurrent(surface_.get()));
if (!context_->IsCurrent(surface_.get())) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because context "
"no longer current after resize callback.";
return error::kLostContext;
}
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetRequestableExtensionsCHROMIUM(
const char** extensions) {
*extensions = reinterpret_cast<const char*>(
api()->glGetStringFn(GL_REQUESTABLE_EXTENSIONS_ANGLE));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoRequestExtensionCHROMIUM(
const char* extension) {
api()->glRequestExtensionANGLEFn(extension);
// Make sure there are no pending GL errors before re-initializing feature
// info
FlushErrors();
// Make sure newly enabled extensions are exposed and usable.
context_->ReinitializeDynamicBindings();
InitializeFeatureInfo(feature_info_->context_type(),
feature_info_->disallowed_features(), true);
// Support for CHROMIUM_texture_storage_image depends on the underlying
// ImageFactory's ability to create anonymous images.
gpu::ImageFactory* image_factory = group_->image_factory();
if (image_factory && image_factory->SupportsCreateAnonymousImage()) {
feature_info_->EnableCHROMIUMTextureStorageImage();
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
GLuint service_program = 0;
if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
return error::kNoError;
}
GLint num_attributes = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_ATTRIBUTES,
&num_attributes);
GLint num_uniforms = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_UNIFORMS, &num_uniforms);
const base::CheckedNumeric<size_t> buffer_header_size(
sizeof(ProgramInfoHeader));
const base::CheckedNumeric<size_t> buffer_block_size(sizeof(ProgramInput));
const base::CheckedNumeric<size_t> attribute_block_size =
buffer_block_size * num_attributes;
const base::CheckedNumeric<size_t> uniform_block_size =
buffer_block_size * num_uniforms;
data->resize((buffer_header_size + attribute_block_size + uniform_block_size)
.ValueOrDie(),
0);
GLint link_status = 0;
api()->glGetProgramivFn(service_program, GL_LINK_STATUS, &link_status);
ProgramInfoHeader header;
header.link_status = link_status;
header.num_attribs = num_attributes;
header.num_uniforms = num_uniforms;
InsertValueIntoBuffer(data, header, 0);
GLint active_attribute_max_length = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
&active_attribute_max_length);
std::vector<char> attrib_name_buf(active_attribute_max_length, 0);
for (GLint attrib_index = 0; attrib_index < num_attributes; attrib_index++) {
GLsizei length = 0;
GLint size = 0;
GLenum type = GL_NONE;
api()->glGetActiveAttribFn(service_program, attrib_index,
attrib_name_buf.size(), &length, &size, &type,
attrib_name_buf.data());
ProgramInput input;
input.size = size;
input.type = type;
int32_t location =
api()->glGetAttribLocationFn(service_program, attrib_name_buf.data());
input.location_offset = data->size();
AppendValueToBuffer(data, location);
input.name_offset = data->size();
input.name_length = length;
AppendStringToBuffer(data, attrib_name_buf.data(), length);
InsertValueIntoBuffer(
data, input,
(buffer_header_size + (buffer_block_size * attrib_index)).ValueOrDie());
}
GLint active_uniform_max_length = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_UNIFORM_MAX_LENGTH,
&active_uniform_max_length);
std::vector<char> uniform_name_buf(active_uniform_max_length, 0);
for (GLint uniform_index = 0; uniform_index < num_uniforms; uniform_index++) {
GLsizei length = 0;
GLint size = 0;
GLenum type = GL_NONE;
api()->glGetActiveUniformFn(service_program, uniform_index,
uniform_name_buf.size(), &length, &size, &type,
uniform_name_buf.data());
ProgramInput input;
input.size = size;
input.type = type;
input.location_offset = data->size();
int32_t base_location =
api()->glGetUniformLocationFn(service_program, uniform_name_buf.data());
AppendValueToBuffer(data, base_location);
GLSLArrayName parsed_service_name(uniform_name_buf.data());
if (size > 1 || parsed_service_name.IsArrayName()) {
for (GLint location_index = 1; location_index < size; location_index++) {
std::string array_element_name = parsed_service_name.base_name() + "[" +
base::NumberToString(location_index) +
"]";
int32_t element_location = api()->glGetUniformLocationFn(
service_program, array_element_name.c_str());
AppendValueToBuffer(data, element_location);
}
}
input.name_offset = data->size();
input.name_length = length;
AppendStringToBuffer(data, uniform_name_buf.data(), length);
InsertValueIntoBuffer(data, input,
(buffer_header_size + attribute_block_size +
(buffer_block_size * uniform_index))
.ValueOrDie());
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformBlocksCHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
GLuint service_program = 0;
if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
return error::kNoError;
}
GLint num_uniform_blocks = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_UNIFORM_BLOCKS,
&num_uniform_blocks);
// Resize the data to fit the headers and info objects so that strings can be
// appended.
const base::CheckedNumeric<size_t> buffer_header_size(
sizeof(UniformBlocksHeader));
const base::CheckedNumeric<size_t> buffer_block_size(
sizeof(UniformBlockInfo));
data->resize((buffer_header_size + (num_uniform_blocks * buffer_block_size))
.ValueOrDie(),
0);
UniformBlocksHeader header;
header.num_uniform_blocks = num_uniform_blocks;
InsertValueIntoBuffer(data, header, 0);
GLint active_uniform_block_max_length = 0;
api()->glGetProgramivFn(service_program,
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
&active_uniform_block_max_length);
std::vector<char> uniform_block_name_buf(active_uniform_block_max_length, 0);
for (GLint uniform_block_index = 0; uniform_block_index < num_uniform_blocks;
uniform_block_index++) {
UniformBlockInfo block_info;
GLint uniform_block_binding = 0;
api()->glGetActiveUniformBlockivFn(service_program, uniform_block_index,
GL_UNIFORM_BLOCK_BINDING,
&uniform_block_binding);
block_info.binding = uniform_block_binding;
GLint uniform_block_data_size = 0;
api()->glGetActiveUniformBlockivFn(service_program, uniform_block_index,
GL_UNIFORM_BLOCK_DATA_SIZE,
&uniform_block_data_size);
block_info.data_size = uniform_block_data_size;
GLint uniform_block_name_length = 0;
api()->glGetActiveUniformBlockNameFn(
service_program, uniform_block_index, active_uniform_block_max_length,
&uniform_block_name_length, uniform_block_name_buf.data());
DCHECK(uniform_block_name_length + 1 <= active_uniform_block_max_length);
block_info.name_offset = data->size();
block_info.name_length = uniform_block_name_length + 1;
AppendStringToBuffer(data, uniform_block_name_buf.data(),
uniform_block_name_length + 1);
GLint uniform_block_active_uniforms = 0;
api()->glGetActiveUniformBlockivFn(service_program, uniform_block_index,
GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS,
&uniform_block_active_uniforms);
block_info.active_uniforms = uniform_block_active_uniforms;
std::vector<GLint> uniform_block_indices_buf(uniform_block_active_uniforms,
0);
api()->glGetActiveUniformBlockivFn(service_program, uniform_block_index,
GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES,
uniform_block_indices_buf.data());
block_info.active_uniform_offset = data->size();
for (GLint uniform_block_uniform_index_index = 0;
uniform_block_uniform_index_index < uniform_block_active_uniforms;
uniform_block_uniform_index_index++) {
AppendValueToBuffer(
data,
static_cast<uint32_t>(
uniform_block_indices_buf[uniform_block_uniform_index_index]));
}
GLint uniform_block_referenced_by_vertex_shader = 0;
api()->glGetActiveUniformBlockivFn(
service_program, uniform_block_index,
GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER,
&uniform_block_referenced_by_vertex_shader);
block_info.referenced_by_vertex_shader =
uniform_block_referenced_by_vertex_shader;
GLint uniform_block_referenced_by_fragment_shader = 0;
api()->glGetActiveUniformBlockivFn(
service_program, uniform_block_index,
GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER,
&uniform_block_referenced_by_fragment_shader);
block_info.referenced_by_fragment_shader =
uniform_block_referenced_by_fragment_shader;
InsertValueIntoBuffer(
data, block_info,
(buffer_header_size + (buffer_block_size * uniform_block_index))
.ValueOrDie());
}
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVaryingsCHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
GLuint service_program = 0;
if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
return error::kNoError;
}
GLint transform_feedback_buffer_mode = 0;
api()->glGetProgramivFn(service_program, GL_TRANSFORM_FEEDBACK_BUFFER_MODE,
&transform_feedback_buffer_mode);
GLint num_transform_feedback_varyings = 0;
api()->glGetProgramivFn(service_program, GL_TRANSFORM_FEEDBACK_VARYINGS,
&num_transform_feedback_varyings);
// Resize the data to fit the headers and info objects so that strings can be
// appended.
const base::CheckedNumeric<size_t> buffer_header_size(
sizeof(TransformFeedbackVaryingsHeader));
const base::CheckedNumeric<size_t> buffer_block_size(
sizeof(TransformFeedbackVaryingInfo));
data->resize((buffer_header_size +
(num_transform_feedback_varyings * buffer_block_size))
.ValueOrDie(),
0);
TransformFeedbackVaryingsHeader header;
header.transform_feedback_buffer_mode = transform_feedback_buffer_mode;
header.num_transform_feedback_varyings = num_transform_feedback_varyings;
InsertValueIntoBuffer(data, header, 0);
GLint max_transform_feedback_varying_length = 0;
api()->glGetProgramivFn(service_program,
GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
&max_transform_feedback_varying_length);
std::vector<char> transform_feedback_varying_name_buf(
max_transform_feedback_varying_length, 0);
for (GLint transform_feedback_varying_index = 0;
transform_feedback_varying_index < num_transform_feedback_varyings;
transform_feedback_varying_index++) {
GLsizei length = 0;
GLint size = 0;
GLenum type = GL_NONE;
api()->glGetTransformFeedbackVaryingFn(
service_program, transform_feedback_varying_index,
max_transform_feedback_varying_length, &length, &size, &type,
transform_feedback_varying_name_buf.data());
TransformFeedbackVaryingInfo varying_info;
varying_info.size = size;
varying_info.type = type;
DCHECK(length + 1 <= max_transform_feedback_varying_length);
varying_info.name_length = data->size();
varying_info.name_length = length + 1;
AppendStringToBuffer(data, transform_feedback_varying_name_buf.data(),
length + 1);
InsertValueIntoBuffer(
data, varying_info,
(buffer_header_size +
(buffer_block_size * transform_feedback_varying_index))
.ValueOrDie());
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformsES3CHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
GLuint service_program = 0;
if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
return error::kNoError;
}
GLint num_uniforms = 0;
api()->glGetProgramivFn(service_program, GL_ACTIVE_UNIFORMS, &num_uniforms);
UniformsES3Header header;
header.num_uniforms = num_uniforms;
AppendValueToBuffer(data, header);
for (GLuint uniform_index = 0;
uniform_index < static_cast<GLuint>(num_uniforms); uniform_index++) {
UniformES3Info uniform_info;
GLint uniform_block_index = 0;
api()->glGetActiveUniformsivFn(service_program, 1, &uniform_index,
GL_UNIFORM_BLOCK_INDEX,
&uniform_block_index);
uniform_info.block_index = uniform_block_index;
GLint uniform_offset = 0;
api()->glGetActiveUniformsivFn(service_program, 1, &uniform_index,
GL_UNIFORM_OFFSET, &uniform_offset);
uniform_info.offset = uniform_offset;
GLint uniform_array_stride = 0;
api()->glGetActiveUniformsivFn(service_program, 1, &uniform_index,
GL_UNIFORM_ARRAY_STRIDE,
&uniform_array_stride);
uniform_info.array_stride = uniform_array_stride;
GLint uniform_matrix_stride = 0;
api()->glGetActiveUniformsivFn(service_program, 1, &uniform_index,
GL_UNIFORM_MATRIX_STRIDE,
&uniform_matrix_stride);
uniform_info.matrix_stride = uniform_matrix_stride;
GLint uniform_is_row_major = 0;
api()->glGetActiveUniformsivFn(service_program, 1, &uniform_index,
GL_UNIFORM_IS_ROW_MAJOR,
&uniform_is_row_major);
uniform_info.is_row_major = uniform_is_row_major;
AppendValueToBuffer(data, uniform_info);
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetTranslatedShaderSourceANGLE(
GLuint shader,
std::string* source) {
CheckErrorCallbackState();
GLuint service_id = GetShaderServiceID(shader, resources_);
GLint translated_source_length = 0;
api()->glGetShaderivFn(service_id, GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
&translated_source_length);
if (CheckErrorCallbackState()) {
return error::kNoError;
}
if (translated_source_length > 0) {
std::vector<char> buffer(translated_source_length, 0);
GLsizei length = 0;
api()->glGetTranslatedShaderSourceANGLEFn(
service_id, translated_source_length, &length, buffer.data());
DCHECK(length <= translated_source_length);
*source = length > 0 ? std::string(buffer.data(), length) : std::string();
} else {
*source = std::string();
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSwapBuffersWithBoundsCHROMIUM(
uint64_t swap_id,
GLsizei count,
const volatile GLint* rects,
GLbitfield flags) {
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
ca_layer_shared_state_ = nullptr;
std::vector<gfx::Rect> bounds(count);
for (GLsizei i = 0; i < count; ++i) {
bounds[i] = gfx::Rect(rects[i * 4 + 0], rects[i * 4 + 1], rects[i * 4 + 2],
rects[i * 4 + 3]);
}
client()->OnSwapBuffers(swap_id, flags);
return CheckSwapBuffersResult(
surface_->SwapBuffersWithBounds(bounds, base::DoNothing()),
"SwapBuffersWithBounds");
}
error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
uint64_t swap_id,
GLint x,
GLint y,
GLint width,
GLint height,
GLbitfield flags) {
if (!surface_->SupportsPostSubBuffer()) {
InsertError(GL_INVALID_OPERATION,
"glPostSubBufferCHROMIUM is not supported for this surface.");
return error::kNoError;
}
ca_layer_shared_state_ = nullptr;
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
"gpu", "AsyncSwapBuffers",
TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->PostSubBufferAsync(
x, y, width, height,
base::BindOnce(
&GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
weak_ptr_factory_.GetWeakPtr(), "PostSubBuffer", swap_id),
base::DoNothing());
return error::kNoError;
} else {
return CheckSwapBuffersResult(
surface_->PostSubBuffer(x, y, width, height, base::DoNothing()),
"PostSubBuffer");
}
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
GLuint source_id,
GLint source_level,
GLenum dest_target,
GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
gl::ScopedEnableTextureRectangleInShaderCompiler enable(
feature_info_->IsWebGLContext() ? api() : nullptr);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopyTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
dest_target, GetTextureServiceID(api(), dest_id, resources_, false),
dest_level, internalformat, dest_type, unpack_flip_y,
unpack_premultiply_alpha, unpack_unmultiply_alpha);
UpdateTextureSizeFromClientID(dest_id);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
GLuint source_id,
GLint source_level,
GLenum dest_target,
GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
GLint y,
GLsizei width,
GLsizei height,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
gl::ScopedEnableTextureRectangleInShaderCompiler enable(
feature_info_->IsWebGLContext() ? api() : nullptr);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopySubTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
dest_target, GetTextureServiceID(api(), dest_id, resources_, false),
dest_level, xoffset, yoffset, x, y, width, height, unpack_flip_y,
unpack_premultiply_alpha, unpack_unmultiply_alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawArraysInstancedANGLE(
GLenum mode,
GLint first,
GLsizei count,
GLsizei primcount) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawArraysInstancedANGLEFn(mode, first, count, primcount);
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoDrawArraysInstancedBaseInstanceANGLE(
GLenum mode,
GLint first,
GLsizei count,
GLsizei primcount,
GLuint baseinstance) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawArraysInstancedBaseInstanceANGLEFn(mode, first, count, primcount,
baseinstance);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawElementsInstancedANGLE(
GLenum mode,
GLsizei count,
GLenum type,
const void* indices,
GLsizei primcount) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawElementsInstancedANGLEFn(mode, count, type, indices, primcount);
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoDrawElementsInstancedBaseVertexBaseInstanceANGLE(
GLenum mode,
GLsizei count,
GLenum type,
const void* indices,
GLsizei primcount,
GLint basevertex,
GLuint baseinstance) {
BindPendingImagesForSamplersIfNeeded();
api()->glDrawElementsInstancedBaseVertexBaseInstanceANGLEFn(
mode, count, type, indices, primcount, basevertex, baseinstance);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoVertexAttribDivisorANGLE(
GLuint index,
GLuint divisor) {
api()->glVertexAttribDivisorANGLEFn(index, divisor);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoProduceTextureDirectCHROMIUM(
GLuint texture_client_id,
const volatile GLbyte* mailbox) {
scoped_refptr<TexturePassthrough> texture;
if (!resources_->texture_object_map.GetServiceID(texture_client_id,
&texture) ||
texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "Unknown texture.");
return error::kNoError;
}
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
mailbox_manager_->ProduceTexture(mb, texture.get());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCreateAndConsumeTextureINTERNAL(
GLuint texture_client_id,
const volatile GLbyte* mailbox) {
if (!texture_client_id ||
resources_->texture_id_map.HasClientID(texture_client_id)) {
return error::kInvalidArguments;
}
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
scoped_refptr<TexturePassthrough> texture = TexturePassthrough::CheckedCast(
group_->mailbox_manager()->ConsumeTexture(mb));
if (texture == nullptr) {
// Create texture to handle invalid mailbox (see http://crbug.com/472465 and
// http://crbug.com/851878).
DoGenTextures(1, &texture_client_id);
InsertError(GL_INVALID_OPERATION, "Invalid mailbox name.");
return error::kNoError;
}
// Update id mappings
resources_->texture_id_map.RemoveClientID(texture_client_id);
resources_->texture_id_map.SetIDMapping(texture_client_id,
texture->service_id());
resources_->texture_object_map.RemoveClientID(texture_client_id);
resources_->texture_object_map.SetIDMapping(texture_client_id, texture);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindUniformLocationCHROMIUM(
GLuint program,
GLint location,
const char* name) {
api()->glBindUniformLocationCHROMIUMFn(
GetProgramServiceID(program, resources_), location, name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindTexImage2DCHROMIUM(
GLenum target,
GLint imageId) {
return BindTexImage2DCHROMIUMImpl(target, 0, imageId);
}
error::Error
GLES2DecoderPassthroughImpl::DoBindTexImage2DWithInternalformatCHROMIUM(
GLenum target,
GLenum internalformat,
GLint imageId) {
return BindTexImage2DCHROMIUMImpl(target, internalformat, imageId);
}
error::Error GLES2DecoderPassthroughImpl::DoReleaseTexImage2DCHROMIUM(
GLenum target,
GLint imageId) {
TextureTarget target_enum = GLenumToTextureTarget(target);
if (target_enum == TextureTarget::kCubeMap ||
target_enum == TextureTarget::kUnkown) {
InsertError(GL_INVALID_ENUM, "Invalid target");
return error::kNoError;
}
const BoundTexture& bound_texture =
bound_textures_[static_cast<size_t>(target_enum)][active_texture_unit_];
if (bound_texture.texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "No texture bound");
return error::kNoError;
}
gl::GLImage* image = group_->image_manager()->LookupImage(imageId);
if (image == nullptr) {
InsertError(GL_INVALID_OPERATION, "No image found with the given ID");
return error::kNoError;
}
// Only release the image if it is currently bound
if (bound_texture.texture->GetLevelImage(target, 0) == image) {
image->ReleaseTexImage(target);
bound_texture.texture->SetLevelImage(target, 0, nullptr);
}
// Target is already validated
UpdateTextureSizeFromTarget(target);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTraceBeginCHROMIUM(
const char* category_name,
const char* trace_name) {
if (!gpu_tracer_->Begin(category_name, trace_name, kTraceCHROMIUM)) {
InsertError(GL_INVALID_OPERATION, "Failed to create begin trace");
return error::kNoError;
}
debug_marker_manager_.PushGroup(trace_name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoTraceEndCHROMIUM() {
if (!gpu_tracer_->End(kTraceCHROMIUM)) {
InsertError(GL_INVALID_OPERATION, "No trace to end");
return error::kNoError;
}
debug_marker_manager_.PopGroup();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDiscardFramebufferEXT(
GLenum target,
GLsizei count,
const volatile GLenum* attachments) {
// Validate that count is non-negative before allocating a vector
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<GLenum> attachments_copy(attachments, attachments + count);
if (feature_info_->gl_version_info().is_es3) {
api()->glInvalidateFramebufferFn(target, count, attachments_copy.data());
} else {
api()->glDiscardFramebufferEXTFn(target, count, attachments_copy.data());
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoLoseContextCHROMIUM(GLenum current,
GLenum other) {
if (!ValidContextLostReason(current) || !ValidContextLostReason(other)) {
InsertError(GL_INVALID_ENUM, "invalid context loss reason.");
return error::kNoError;
}
MarkContextLost(GetContextLostReasonFromResetStatus(current));
group_->LoseContexts(GetContextLostReasonFromResetStatus(other));
reset_by_robustness_extension_ = true;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDescheduleUntilFinishedCHROMIUM() {
if (!gl::GLFence::IsSupported()) {
return error::kNoError;
}
auto fence = gl::GLFence::Create();
if (!fence) {
InsertError(GL_INVALID_OPERATION, "gl::GLFence::Create() failed.");
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
return error::kLostContext;
}
deschedule_until_finished_fences_.push_back(std::move(fence));
if (deschedule_until_finished_fences_.size() == 1) {
return error::kNoError;
}
DCHECK_EQ(2u, deschedule_until_finished_fences_.size());
if (deschedule_until_finished_fences_[0]->HasCompleted()) {
deschedule_until_finished_fences_.erase(
deschedule_until_finished_fences_.begin());
return error::kNoError;
}
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
"cc", "GLES2DecoderPassthroughImpl::DescheduleUntilFinished",
TRACE_ID_LOCAL(this));
client()->OnDescheduleUntilFinished();
return error::kDeferLaterCommands;
}
error::Error GLES2DecoderPassthroughImpl::DoDrawBuffersEXT(
GLsizei count,
const volatile GLenum* bufs) {
if (!feature_info_->feature_flags().ext_draw_buffers &&
!feature_info_->gl_version_info().is_es3) {
return error::kUnknownCommand;
}
// Validate that count is non-negative before allocating a vector
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<GLenum> bufs_copy(bufs, bufs + count);
api()->glDrawBuffersARBFn(count, bufs_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDiscardBackbufferCHROMIUM() {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleOverlayPlaneCHROMIUM(
GLint plane_z_order,
GLenum plane_transform,
GLuint overlay_texture_id,
GLint bounds_x,
GLint bounds_y,
GLint bounds_width,
GLint bounds_height,
GLfloat uv_x,
GLfloat uv_y,
GLfloat uv_width,
GLfloat uv_height,
bool enable_blend,
GLuint gpu_fence_id) {
scoped_refptr<TexturePassthrough> passthrough_texture;
if (!resources_->texture_object_map.GetServiceID(overlay_texture_id,
&passthrough_texture) ||
passthrough_texture == nullptr) {
InsertError(GL_INVALID_VALUE, "invalid texture id");
return error::kNoError;
}
gl::GLImage* image =
passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
if (!image) {
InsertError(GL_INVALID_VALUE, "texture has no image");
return error::kNoError;
}
gfx::OverlayTransform transform = GetGFXOverlayTransform(plane_transform);
if (transform == gfx::OVERLAY_TRANSFORM_INVALID) {
InsertError(GL_INVALID_ENUM, "invalid transform enum");
return error::kNoError;
}
std::unique_ptr<gfx::GpuFence> gpu_fence;
if (gpu_fence_id != 0) {
gpu_fence = GetGpuFenceManager()->GetGpuFence(gpu_fence_id);
if (!gpu_fence) {
InsertError(GL_INVALID_ENUM, "unknown fence");
return error::kNoError;
}
}
if (!surface_->ScheduleOverlayPlane(
image, std::move(gpu_fence),
gfx::OverlayPlaneData(
plane_z_order, transform,
gfx::Rect(bounds_x, bounds_y, bounds_width, bounds_height),
gfx::RectF(uv_x, uv_y, uv_width, uv_height), enable_blend,
/*damage_rect=*/gfx::Rect(), /*opacity=*/1.0f,
gfx::OverlayPriorityHint::kNone,
/*rounded_corners*/ gfx::RRectF(), image->color_space(),
/*hdr_metadata=*/absl::nullopt))) {
InsertError(GL_INVALID_OPERATION, "failed to schedule overlay");
return error::kNoError;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerSharedStateCHROMIUM(
GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) {
if (!ca_layer_shared_state_) {
ca_layer_shared_state_ = std::make_unique<CALayerSharedState>();
}
ca_layer_shared_state_->opacity = opacity;
ca_layer_shared_state_->is_clipped = is_clipped;
ca_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(
gfx::RectF(clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3]));
ca_layer_shared_state_->rounded_corner_bounds =
gfx::RRectF(rounded_corner_bounds[0], rounded_corner_bounds[1],
rounded_corner_bounds[2], rounded_corner_bounds[3],
rounded_corner_bounds[4]);
ca_layer_shared_state_->sorting_context_id = sorting_context_id;
ca_layer_shared_state_->transform =
gfx::Transform(transform[0], transform[4], transform[8], transform[12],
transform[1], transform[5], transform[9], transform[13],
transform[2], transform[6], transform[10], transform[14],
transform[3], transform[7], transform[11], transform[15]);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerCHROMIUM(
GLuint contents_texture_id,
const GLfloat* contents_rect,
GLuint background_color,
GLuint edge_aa_mask,
GLenum filter,
const GLfloat* bounds_rect) {
if (!ca_layer_shared_state_) {
InsertError(GL_INVALID_OPERATION,
"glScheduleCALayerSharedStateCHROMIUM has not been called");
return error::kNoError;
}
gl::GLImage* image = nullptr;
if (contents_texture_id) {
scoped_refptr<TexturePassthrough> passthrough_texture;
if (!resources_->texture_object_map.GetServiceID(contents_texture_id,
&passthrough_texture) ||
passthrough_texture == nullptr) {
InsertError(GL_INVALID_VALUE, "unknown texture");
return error::kNoError;
}
DCHECK(passthrough_texture);
image =
passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
if (!image) {
InsertError(GL_INVALID_VALUE, "unsupported texture format");
return error::kNoError;
}
}
ui::CARendererLayerParams params = ui::CARendererLayerParams(
ca_layer_shared_state_->is_clipped, ca_layer_shared_state_->clip_rect,
ca_layer_shared_state_->rounded_corner_bounds,
ca_layer_shared_state_->sorting_context_id,
ca_layer_shared_state_->transform, image,
gfx::RectF(contents_rect[0], contents_rect[1], contents_rect[2],
contents_rect[3]),
gfx::ToEnclosingRect(gfx::RectF(bounds_rect[0], bounds_rect[1],
bounds_rect[2], bounds_rect[3])),
background_color, edge_aa_mask, ca_layer_shared_state_->opacity, filter,
gfx::ProtectedVideoType::kClear);
if (!surface_->ScheduleCALayer(params)) {
InsertError(GL_INVALID_OPERATION, "failed to schedule CALayer");
return error::kNoError;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
GLsizei n,
const volatile GLuint* textures) {
// Validate that count is non-negative before allocating a vector
if (n < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
return error::kNoError;
}
std::vector<gl::GLSurface::CALayerInUseQuery> queries;
queries.reserve(n);
for (GLsizei i = 0; i < n; ++i) {
gl::GLImage* image = nullptr;
GLuint texture_id = textures[i];
if (texture_id) {
// If a |texture_id| is invalid (due to a client error), report that it
// is not in use. Failing the GL call can result in compositor hangs.
// https://crbug.com/1120795
scoped_refptr<TexturePassthrough> passthrough_texture;
if (resources_->texture_object_map.GetServiceID(texture_id,
&passthrough_texture)) {
if (passthrough_texture) {
image = passthrough_texture->GetLevelImage(
passthrough_texture->target(), 0);
}
}
}
gl::GLSurface::CALayerInUseQuery query;
query.image = image;
query.texture = texture_id;
queries.push_back(query);
}
surface_->ScheduleCALayerInUseQuery(std::move(queries));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
GLuint texture_0,
GLuint texture_1,
GLint z_order,
GLint content_x,
GLint content_y,
GLint content_width,
GLint content_height,
GLint quad_x,
GLint quad_y,
GLint quad_width,
GLint quad_height,
GLfloat transform_c1r1,
GLfloat transform_c2r1,
GLfloat transform_c1r2,
GLfloat transform_c2r2,
GLfloat transform_tx,
GLfloat transform_ty,
GLboolean is_clipped,
GLint clip_x,
GLint clip_y,
GLint clip_width,
GLint clip_height,
GLuint protected_video_type) {
if (protected_video_type >
static_cast<GLuint>(gfx::ProtectedVideoType::kMaxValue)) {
InsertError(GL_INVALID_VALUE, "invalid protected video type");
return error::kNoError;
}
if (!texture_0) {
InsertError(GL_INVALID_VALUE, "invalid texture");
return error::kNoError;
}
std::unique_ptr<ui::DCRendererLayerParams> params =
std::make_unique<ui::DCRendererLayerParams>();
GLuint texture_ids[] = {texture_0, texture_1};
size_t i = 0;
for (GLuint texture_id : texture_ids) {
if (!texture_id)
break;
scoped_refptr<TexturePassthrough> passthrough_texture;
if (!resources_->texture_object_map.GetServiceID(texture_id,
&passthrough_texture) ||
passthrough_texture == nullptr) {
InsertError(GL_INVALID_VALUE, "unknown texture");
return error::kNoError;
}
DCHECK(passthrough_texture);
gl::GLImage* image =
passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
if (!image) {
InsertError(GL_INVALID_VALUE, "unsupported texture format");
return error::kNoError;
}
params->images[i++] = scoped_refptr<gl::GLImage>(image);
}
params->z_order = z_order;
params->content_rect =
gfx::Rect(content_x, content_y, content_width, content_height);
params->quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
params->transform =
gfx::Transform(transform_c1r1, transform_c2r1, transform_c1r2,
transform_c2r2, transform_tx, transform_ty);
if (is_clipped) {
params->clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
}
params->protected_video_type =
static_cast<gfx::ProtectedVideoType>(protected_video_type);
if (!surface_->ScheduleDCLayer(std::move(params)))
InsertError(GL_INVALID_OPERATION, "failed to schedule DCLayer");
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCommitOverlayPlanesCHROMIUM(
uint64_t swap_id,
GLbitfield flags) {
if (!surface_->SupportsCommitOverlayPlanes()) {
InsertError(GL_INVALID_OPERATION,
"glCommitOverlayPlanes not supported by surface.");
return error::kNoError;
}
ca_layer_shared_state_ = nullptr;
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
"gpu", "AsyncSwapBuffers",
TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->CommitOverlayPlanesAsync(
base::BindOnce(
&GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
weak_ptr_factory_.GetWeakPtr(), "CommitOverlayPlanes", swap_id),
base::DoNothing());
return error::kNoError;
} else {
return CheckSwapBuffersResult(
surface_->CommitOverlayPlanes(base::DoNothing()),
"CommitOverlayPlanes");
}
}
error::Error GLES2DecoderPassthroughImpl::DoSetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
gfx::ColorSpace color_space) {
scoped_refptr<TexturePassthrough> passthrough_texture;
if (!resources_->texture_object_map.GetServiceID(texture_id,
&passthrough_texture) ||
passthrough_texture == nullptr) {
InsertError(GL_INVALID_VALUE, "unknown texture.");
return error::kNoError;
}
scoped_refptr<gl::GLImage> image =
passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
if (image == nullptr) {
InsertError(GL_INVALID_VALUE, "no image associated with texture.");
return error::kNoError;
}
image->SetColorSpace(color_space);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoFlushDriverCachesCHROMIUM() {
// On Adreno Android devices we need to use a workaround to force caches to
// clear.
if (feature_info_->workarounds().unbind_egl_context_to_flush_driver_caches) {
context_->ReleaseCurrent(nullptr);
context_->MakeCurrent(surface_.get());
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCoverageModulationCHROMIUM(
GLenum components) {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBlendBarrierKHR() {
NOTIMPLEMENTED();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindFragDataLocationIndexedEXT(
GLuint program,
GLuint colorNumber,
GLuint index,
const char* name) {
api()->glBindFragDataLocationIndexedFn(
GetProgramServiceID(program, resources_), colorNumber, index, name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoBindFragDataLocationEXT(
GLuint program,
GLuint colorNumber,
const char* name) {
api()->glBindFragDataLocationFn(GetProgramServiceID(program, resources_),
colorNumber, name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetFragDataIndexEXT(
GLuint program,
const char* name,
GLint* index) {
*index = api()->glGetFragDataIndexFn(GetProgramServiceID(program, resources_),
name);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
GLint x,
GLint y,
GLint width,
GLint height) {
GLint current_framebuffer = 0;
api()->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, ¤t_framebuffer);
if (current_framebuffer != 0) {
InsertError(GL_INVALID_OPERATION, "framebuffer must not be bound.");
return error::kNoError;
}
if (!surface_->SupportsDCLayers()) {
InsertError(GL_INVALID_OPERATION,
"surface doesn't support SetDrawRectangle.");
return error::kNoError;
}
gfx::Rect rect(x, y, width, height);
if (!surface_->SetDrawRectangle(rect)) {
InsertError(GL_INVALID_OPERATION, "SetDrawRectangle failed on surface");
// If SetDrawRectangle failed, we may not have a current context any
// more, make sure to report lost context.
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
return error::kLostContext;
}
ApplySurfaceDrawOffset();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSetEnableDCLayersCHROMIUM(
GLboolean enable) {
GLint current_framebuffer = 0;
api()->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, ¤t_framebuffer);
if (current_framebuffer != 0) {
InsertError(GL_INVALID_OPERATION, "framebuffer must not be bound.");
return error::kNoError;
}
if (!surface_->SupportsDCLayers()) {
InsertError(GL_INVALID_OPERATION,
"surface doesn't support SetDrawRectangle.");
return error::kNoError;
}
if (!surface_->SetEnableDCLayers(!!enable)) {
InsertError(GL_INVALID_OPERATION, "SetEnableDCLayers failed on surface.");
// If SetEnableDCLayers failed, we may not have a current context any
// more, make sure to report lost context.
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
return error::kLostContext;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoWindowRectanglesEXT(
GLenum mode,
GLsizei n,
const volatile GLint* box) {
std::vector<GLint> box_copy(box, box + (n * 4));
api()->glWindowRectanglesEXTFn(mode, n, box_copy.data());
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCreateGpuFenceINTERNAL(
GLuint gpu_fence_id) {
if (!feature_info_->feature_flags().chromium_gpu_fence)
return error::kUnknownCommand;
if (!GetGpuFenceManager()->CreateGpuFence(gpu_fence_id))
return error::kInvalidArguments;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoWaitGpuFenceCHROMIUM(
GLuint gpu_fence_id) {
if (!feature_info_->feature_flags().chromium_gpu_fence)
return error::kUnknownCommand;
if (!GetGpuFenceManager()->GpuFenceServerWait(gpu_fence_id))
return error::kInvalidArguments;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDestroyGpuFenceCHROMIUM(
GLuint gpu_fence_id) {
if (!feature_info_->feature_flags().chromium_gpu_fence)
return error::kUnknownCommand;
if (!GetGpuFenceManager()->RemoveGpuFence(gpu_fence_id))
return error::kInvalidArguments;
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoSetReadbackBufferShadowAllocationINTERNAL(
GLuint buffer_id,
GLuint shm_id,
GLuint shm_offset,
GLuint size) {
BufferShadowUpdate update;
update.shm = GetSharedMemoryBuffer(shm_id);
update.shm_offset = shm_offset;
update.size = size;
GLuint buffer_service_id = 0;
if (!resources_->buffer_id_map.GetServiceID(buffer_id, &buffer_service_id)) {
InsertError(GL_INVALID_OPERATION, "Invalid buffer ID");
return error::kNoError;
}
if (!update.shm) {
return error::kInvalidArguments;
}
if (update.shm->GetRemainingSize(shm_offset) < size) {
return error::kOutOfBounds;
}
buffer_shadow_updates_.emplace(buffer_id, std::move(update));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoMaxShaderCompilerThreadsKHR(
GLuint count) {
api()->glMaxShaderCompilerThreadsKHRFn(count);
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoInitializeDiscardableTextureCHROMIUM(
GLuint texture_id,
ServiceDiscardableHandle&& discardable_handle) {
scoped_refptr<TexturePassthrough> texture_passthrough;
if (!resources_->texture_object_map.GetServiceID(texture_id,
&texture_passthrough) ||
texture_passthrough == nullptr) {
InsertError(GL_INVALID_VALUE, "Invalid texture ID");
return error::kNoError;
}
group_->passthrough_discardable_manager()->InitializeTexture(
texture_id, group_.get(), texture_passthrough->estimated_size(),
std::move(discardable_handle));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoLockDiscardableTextureCHROMIUM(
GLuint texture_id) {
if (!group_->passthrough_discardable_manager()->LockTexture(texture_id,
group_.get())) {
InsertError(GL_INVALID_VALUE, "Texture ID not initialized");
return error::kNoError;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUnlockDiscardableTextureCHROMIUM(
GLuint texture_id) {
TexturePassthrough* texture_to_unbind = nullptr;
if (!group_->passthrough_discardable_manager()->UnlockTexture(
texture_id, group_.get(), &texture_to_unbind)) {
InsertError(GL_INVALID_VALUE, "Texture ID not initialized");
return error::kNoError;
}
if (texture_to_unbind != nullptr) {
UpdateTextureBinding(texture_to_unbind->target(), texture_id, nullptr);
}
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint texture_client_id,
GLenum internalformat,
const volatile GLbyte* mailbox) {
// RGB emulation is not needed here.
if (internalformat != GL_NONE) {
InsertError(GL_INVALID_ENUM, "internal format not supported.");
return error::kNoError;
}
if (!texture_client_id ||
resources_->texture_id_map.HasClientID(texture_client_id)) {
InsertError(GL_INVALID_OPERATION, "invalid client ID");
return error::kNoError;
}
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
auto shared_image = group_->shared_image_representation_factory()
->ProduceGLTexturePassthrough(mb);
if (shared_image == nullptr) {
// Create texture to handle invalid mailbox (see http://crbug.com/472465 and
// http://crbug.com/851878).
DoGenTextures(1, &texture_client_id);
InsertError(GL_INVALID_OPERATION, "invalid mailbox name.");
return error::kNoError;
}
auto texture = shared_image->GetTexturePassthrough();
// Update id mappings
resources_->texture_id_map.RemoveClientID(texture_client_id);
resources_->texture_id_map.SetIDMapping(texture_client_id,
texture->service_id());
resources_->texture_object_map.RemoveClientID(texture_client_id);
resources_->texture_object_map.SetIDMapping(texture_client_id, texture);
resources_->texture_shared_image_map[texture_client_id] =
PassthroughResources::SharedImageData(std::move(shared_image));
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoBeginSharedImageAccessDirectCHROMIUM(
GLuint client_id,
GLenum mode) {
if (mode != GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM &&
mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM &&
mode != GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
InsertError(GL_INVALID_ENUM, "unrecognized access mode");
return error::kNoError;
}
auto found = resources_->texture_shared_image_map.find(client_id);
if (found == resources_->texture_shared_image_map.end()) {
InsertError(GL_INVALID_OPERATION, "texture is not a shared image");
return error::kNoError;
}
if (found->second.is_being_accessed()) {
InsertError(GL_INVALID_OPERATION, "shared image is being accessed.");
return error::kNoError;
}
if (!found->second.BeginAccess(mode, api())) {
InsertError(GL_INVALID_OPERATION, "unable to begin access");
return error::kNoError;
}
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEndSharedImageAccessDirectCHROMIUM(
GLuint client_id) {
auto found = resources_->texture_shared_image_map.find(client_id);
if (found == resources_->texture_shared_image_map.end()) {
InsertError(GL_INVALID_OPERATION, "texture is not a shared image");
return error::kNoError;
}
if (!found->second.is_being_accessed()) {
InsertError(GL_INVALID_OPERATION, "shared image is not being accessed.");
return error::kNoError;
}
found->second.EndAccess();
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoBeginBatchReadAccessSharedImageCHROMIUM() {
DCHECK(group_->shared_image_manager());
group_->shared_image_manager()->BeginBatchReadAccess();
return error::kNoError;
}
error::Error
GLES2DecoderPassthroughImpl::DoEndBatchReadAccessSharedImageCHROMIUM() {
DCHECK(group_->shared_image_manager());
group_->shared_image_manager()->EndBatchReadAccess();
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoEnableiOES(GLenum target,
GLuint index) {
api()->glEnableiOESFn(target, index);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDisableiOES(GLenum target,
GLuint index) {
api()->glDisableiOESFn(target, index);
return error::kNoError;
}
} // namespace gles2
} // namespace gpu
| 89,743 |
818 | <filename>addons/common/tracing/decision-common/src/main/java/org/kie/kogito/tracing/decision/event/evaluate/EvaluateEvent.java
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.kogito.tracing.decision.event.evaluate;
import java.util.HashMap;
import java.util.Map;
import org.kie.dmn.api.core.DMNContext;
import org.kie.dmn.api.core.DMNResult;
import org.kie.dmn.api.core.ast.DMNNode;
import org.kie.dmn.api.core.event.AfterEvaluateAllEvent;
import org.kie.dmn.api.core.event.AfterEvaluateBKMEvent;
import org.kie.dmn.api.core.event.AfterEvaluateContextEntryEvent;
import org.kie.dmn.api.core.event.AfterEvaluateDecisionEvent;
import org.kie.dmn.api.core.event.AfterEvaluateDecisionServiceEvent;
import org.kie.dmn.api.core.event.AfterEvaluateDecisionTableEvent;
import org.kie.dmn.api.core.event.AfterInvokeBKMEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateAllEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateBKMEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateContextEntryEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateDecisionEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateDecisionServiceEvent;
import org.kie.dmn.api.core.event.BeforeEvaluateDecisionTableEvent;
import org.kie.dmn.api.core.event.BeforeInvokeBKMEvent;
import org.kie.dmn.feel.runtime.FEELFunction;
import org.kie.kogito.decision.DecisionExecutionIdUtils;
import org.kie.kogito.tracing.decision.event.trace.TraceResourceId;
import static org.kie.kogito.tracing.decision.event.evaluate.EvaluateEventType.AFTER_EVALUATE_DECISION_SERVICE;
import static org.kie.kogito.tracing.decision.event.evaluate.EvaluateEventType.BEFORE_EVALUATE_DECISION_SERVICE;
public class EvaluateEvent {
private EvaluateEventType type;
private long timestamp;
private long nanoTime;
private String executionId;
private String modelNamespace;
private String modelName;
private String nodeId;
private String nodeName;
private Map<String, Object> context;
private EvaluateResult result;
private EvaluateContextEntryResult contextEntryResult;
private EvaluateDecisionTableResult decisionTableResult;
private EvaluateEvent(
EvaluateEventType type,
long timestamp,
long nanoTime,
String executionId,
String modelNamespace,
String modelName,
String nodeId,
String nodeName,
Map<String, Object> context,
EvaluateResult result,
EvaluateContextEntryResult contextEntryResult,
EvaluateDecisionTableResult decisionTableResult) {
this.type = type;
this.timestamp = timestamp;
this.nanoTime = nanoTime;
this.executionId = executionId;
this.modelNamespace = modelNamespace;
this.modelName = modelName;
this.nodeId = nodeId;
this.nodeName = nodeName;
this.context = context;
this.result = result;
this.contextEntryResult = contextEntryResult;
this.decisionTableResult = decisionTableResult;
}
public EvaluateEvent(EvaluateEventType type, long timestamp, long nanoTime, DMNResult result, String modelNamespace, String modelName) {
this(type, timestamp, nanoTime, DecisionExecutionIdUtils.get(result.getContext()), modelNamespace, modelName,
null, null, extractContext(result.getContext()), EvaluateResult.from(result), null, null);
}
public EvaluateEvent(EvaluateEventType type, long timestamp, long nanoTime, DMNResult result, DMNNode node) {
this(type, timestamp, nanoTime, DecisionExecutionIdUtils.get(result.getContext()), node.getModelNamespace(), node.getModelName(),
node.getId(), node.getName(), extractContext(result.getContext()), EvaluateResult.from(result), null, null);
}
public EvaluateEvent(EvaluateEventType type, long timestamp, long nanoTime, DMNResult result, String nodeName, EvaluateContextEntryResult contextEntryResult) {
this(type, timestamp, nanoTime, DecisionExecutionIdUtils.get(result.getContext()), null, null, null,
nodeName, extractContext(result.getContext()), EvaluateResult.from(result), contextEntryResult, null);
}
public EvaluateEvent(EvaluateEventType type, long timestamp, long nanoTime, DMNResult result, String nodeName, EvaluateDecisionTableResult decisionTableResult) {
this(type, timestamp, nanoTime, DecisionExecutionIdUtils.get(result.getContext()), null, null,
null, nodeName, extractContext(result.getContext()), EvaluateResult.from(result), null, decisionTableResult);
}
private EvaluateEvent() {
}
public EvaluateEventType getType() {
return type;
}
public long getTimestamp() {
return timestamp;
}
public long getNanoTime() {
return nanoTime;
}
public String getExecutionId() {
return executionId;
}
public String getModelNamespace() {
return modelNamespace;
}
public String getModelName() {
return modelName;
}
public String getNodeId() {
return nodeId;
}
public String getNodeName() {
return nodeName;
}
public Map<String, Object> getContext() {
return context;
}
public EvaluateResult getResult() {
return result;
}
public EvaluateContextEntryResult getContextEntryResult() {
return contextEntryResult;
}
public EvaluateDecisionTableResult getDecisionTableResult() {
return decisionTableResult;
}
public TraceResourceId toTraceResourceId(String serviceUrl) {
return getType() == BEFORE_EVALUATE_DECISION_SERVICE || getType() == AFTER_EVALUATE_DECISION_SERVICE
? new TraceResourceId(serviceUrl, getModelNamespace(), getModelName(), getNodeId(), getNodeName())
: new TraceResourceId(serviceUrl, getModelNamespace(), getModelName());
}
public static EvaluateEvent from(BeforeEvaluateAllEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_EVALUATE_ALL, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getModelNamespace(), event.getModelName());
}
public static EvaluateEvent from(AfterEvaluateAllEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_EVALUATE_ALL, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getModelNamespace(), event.getModelName());
}
public static EvaluateEvent from(BeforeEvaluateBKMEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_EVALUATE_BKM, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getBusinessKnowledgeModel());
}
public static EvaluateEvent from(AfterEvaluateBKMEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_EVALUATE_BKM, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getBusinessKnowledgeModel());
}
public static EvaluateEvent from(BeforeEvaluateContextEntryEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_EVALUATE_CONTEXT_ENTRY, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getNodeName(),
EvaluateContextEntryResult.from(event));
}
public static EvaluateEvent from(AfterEvaluateContextEntryEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_EVALUATE_CONTEXT_ENTRY, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getNodeName(),
EvaluateContextEntryResult.from(event));
}
public static EvaluateEvent from(BeforeEvaluateDecisionEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_EVALUATE_DECISION, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getDecision());
}
public static EvaluateEvent from(AfterEvaluateDecisionEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_EVALUATE_DECISION, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getDecision());
}
public static EvaluateEvent from(BeforeEvaluateDecisionServiceEvent event) {
return new EvaluateEvent(BEFORE_EVALUATE_DECISION_SERVICE, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getDecisionService());
}
public static EvaluateEvent from(AfterEvaluateDecisionServiceEvent event) {
return new EvaluateEvent(AFTER_EVALUATE_DECISION_SERVICE, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getDecisionService());
}
public static EvaluateEvent from(BeforeEvaluateDecisionTableEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_EVALUATE_DECISION_TABLE, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getNodeName(),
EvaluateDecisionTableResult.from(event));
}
public static EvaluateEvent from(AfterEvaluateDecisionTableEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_EVALUATE_DECISION_TABLE, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getNodeName(),
EvaluateDecisionTableResult.from(event));
}
public static EvaluateEvent from(BeforeInvokeBKMEvent event) {
return new EvaluateEvent(EvaluateEventType.BEFORE_INVOKE_BKM, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getBusinessKnowledgeModel());
}
public static EvaluateEvent from(AfterInvokeBKMEvent event) {
return new EvaluateEvent(EvaluateEventType.AFTER_INVOKE_BKM, System.currentTimeMillis(), System.nanoTime(), event.getResult(), event.getBusinessKnowledgeModel());
}
public static Map<String, Object> extractContext(DMNContext context) {
return context.getAll().entrySet().stream()
.filter(e -> !(e.getValue() instanceof FEELFunction))
// This collect method avoids this bug (https://bugs.openjdk.java.net/browse/JDK-8148463) on variables with null value
.collect(HashMap::new, (m, v) -> m.put(v.getKey(), v.getValue()), HashMap::putAll);
}
}
| 3,804 |
5,169 | <gh_stars>1000+
{
"name": "BITLogMacro",
"version": "0.0.5",
"summary": "BITLogMacro.",
"description": "TODO: Add long description of the pod here.",
"homepage": "https://gitee.com/thekingfly/BGLogMacro",
"license": "MIT",
"authors": {
"thekingfly": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://gitee.com/thekingfly/BGLogMacro.git",
"tag": "0.0.5"
},
"source_files": "BITLogMacro/Classes/*.{h,m}",
"exclude_files": "Classes/Exclude",
"frameworks": [
"UIKit",
"Foundation"
],
"dependencies": {
"BGCocoaLumberjack": [
]
}
}
| 285 |
517 | <reponame>finesoft/java
// Copyright 2020 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ==============================================================================
//
// This class has been generated, DO NOT EDIT!
//
package org.tensorflow.op;
import org.tensorflow.Operand;
import org.tensorflow.op.linalg.BandPart;
import org.tensorflow.op.linalg.BatchCholesky;
import org.tensorflow.op.linalg.BatchCholeskyGrad;
import org.tensorflow.op.linalg.BatchMatrixBandPart;
import org.tensorflow.op.linalg.BatchMatrixDeterminant;
import org.tensorflow.op.linalg.BatchMatrixDiag;
import org.tensorflow.op.linalg.BatchMatrixDiagPart;
import org.tensorflow.op.linalg.BatchMatrixInverse;
import org.tensorflow.op.linalg.BatchMatrixSetDiag;
import org.tensorflow.op.linalg.BatchMatrixSolve;
import org.tensorflow.op.linalg.BatchMatrixSolveLs;
import org.tensorflow.op.linalg.BatchMatrixTriangularSolve;
import org.tensorflow.op.linalg.BatchSelfAdjointEig;
import org.tensorflow.op.linalg.BatchSvd;
import org.tensorflow.op.linalg.Cholesky;
import org.tensorflow.op.linalg.CholeskyGrad;
import org.tensorflow.op.linalg.ConjugateTranspose;
import org.tensorflow.op.linalg.Cross;
import org.tensorflow.op.linalg.Det;
import org.tensorflow.op.linalg.Eig;
import org.tensorflow.op.linalg.Einsum;
import org.tensorflow.op.linalg.EuclideanNorm;
import org.tensorflow.op.linalg.Inv;
import org.tensorflow.op.linalg.LoadAndRemapMatrix;
import org.tensorflow.op.linalg.LogMatrixDeterminant;
import org.tensorflow.op.linalg.Lu;
import org.tensorflow.op.linalg.MatMul;
import org.tensorflow.op.linalg.MatrixDiag;
import org.tensorflow.op.linalg.MatrixDiagPart;
import org.tensorflow.op.linalg.MatrixDiagPartV3;
import org.tensorflow.op.linalg.MatrixDiagV3;
import org.tensorflow.op.linalg.MatrixSetDiag;
import org.tensorflow.op.linalg.MatrixSolveLs;
import org.tensorflow.op.linalg.Qr;
import org.tensorflow.op.linalg.QuantizedMatMul;
import org.tensorflow.op.linalg.SelfAdjointEig;
import org.tensorflow.op.linalg.Solve;
import org.tensorflow.op.linalg.Sqrtm;
import org.tensorflow.op.linalg.Svd;
import org.tensorflow.op.linalg.TensorDiag;
import org.tensorflow.op.linalg.TensorDiagPart;
import org.tensorflow.op.linalg.Transpose;
import org.tensorflow.op.linalg.TriangularSolve;
import org.tensorflow.types.TFloat32;
import org.tensorflow.types.TFloat64;
import org.tensorflow.types.TInt32;
import org.tensorflow.types.TInt64;
import org.tensorflow.types.TString;
import org.tensorflow.types.family.TNumber;
import org.tensorflow.types.family.TType;
/**
* An API for building {@code linalg} operations as {@link Op Op}s
*
* @see {@link Ops}
*/
public final class LinalgOps {
private final Scope scope;
private final Ops ops;
LinalgOps(Ops ops) {
this.scope = ops.scope();
this.ops = ops;
}
/**
* Copy a tensor setting everything outside a central band in each innermost matrix to zero.
* The {@code band} part is computed as follows:
* Assume {@code input} has {@code k} dimensions {@code [I, J, K, ..., M, N]}, then the output is a
* tensor with the same shape where
* <p>{@code band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]}.
* <p>The indicator function
* <p>{@code in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= num_upper)}.
* <p>For example:
* <pre>
* # if 'input' is [[ 0, 1, 2, 3]
* # [-1, 0, 1, 2]
* # [-2, -1, 0, 1]
* # [-3, -2, -1, 0]],
*
* tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
* [-1, 0, 1, 2]
* [ 0, -1, 0, 1]
* [ 0, 0, -1, 0]],
*
* tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
* [-1, 0, 1, 0]
* [-2, -1, 0, 1]
* [ 0, -2, -1, 0]]
* </pre>
* <p>Useful special cases:
* <pre>
* tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
* tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
* tf.linalg.band_part(input, 0, 0) ==> Diagonal.
* </pre>
*
* @param <T> data type for {@code band} output
* @param input Rank {@code k} tensor.
* @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
* lower triangle.
* @param numUpper 0-D tensor. Number of superdiagonals to keep. If negative, keep
* entire upper triangle.
* @param <T> data type for {@code MatrixBandPart} output and operands
* @param <U> data type for {@code MatrixBandPart} output and operands
* @return a new instance of BandPart
*/
public <T extends TType, U extends TNumber> BandPart<T> bandPart(Operand<T> input,
Operand<U> numLower, Operand<U> numUpper) {
return BandPart.create(scope, input, numLower, numUpper);
}
/**
* The BatchCholesky operation
*
* @param <T> data type for {@code output} output
* @param input The input value
* @param <T> data type for {@code BatchCholesky} output and operands
* @return a new instance of BatchCholesky
*/
public <T extends TNumber> BatchCholesky<T> batchCholesky(Operand<T> input) {
return BatchCholesky.create(scope, input);
}
/**
* The BatchCholeskyGrad operation
*
* @param <T> data type for {@code output} output
* @param l The l value
* @param grad The grad value
* @param <T> data type for {@code BatchCholeskyGrad} output and operands
* @return a new instance of BatchCholeskyGrad
*/
public <T extends TNumber> BatchCholeskyGrad<T> batchCholeskyGrad(Operand<T> l, Operand<T> grad) {
return BatchCholeskyGrad.create(scope, l, grad);
}
/**
* The BatchMatrixBandPart operation
*
* @param <T> data type for {@code band} output
* @param input The input value
* @param numLower The numLower value
* @param numUpper The numUpper value
* @param <T> data type for {@code BatchMatrixBandPart} output and operands
* @return a new instance of BatchMatrixBandPart
*/
public <T extends TType> BatchMatrixBandPart<T> batchMatrixBandPart(Operand<T> input,
Operand<TInt64> numLower, Operand<TInt64> numUpper) {
return BatchMatrixBandPart.create(scope, input, numLower, numUpper);
}
/**
* The BatchMatrixDeterminant operation
*
* @param <T> data type for {@code output} output
* @param input The input value
* @param <T> data type for {@code BatchMatrixDeterminant} output and operands
* @return a new instance of BatchMatrixDeterminant
*/
public <T extends TType> BatchMatrixDeterminant<T> batchMatrixDeterminant(Operand<T> input) {
return BatchMatrixDeterminant.create(scope, input);
}
/**
* The BatchMatrixDiag operation
*
* @param <T> data type for {@code output} output
* @param diagonal The diagonal value
* @param <T> data type for {@code BatchMatrixDiag} output and operands
* @return a new instance of BatchMatrixDiag
*/
public <T extends TType> BatchMatrixDiag<T> batchMatrixDiag(Operand<T> diagonal) {
return BatchMatrixDiag.create(scope, diagonal);
}
/**
* The BatchMatrixDiagPart operation
*
* @param <T> data type for {@code diagonal} output
* @param input The input value
* @param <T> data type for {@code BatchMatrixDiagPart} output and operands
* @return a new instance of BatchMatrixDiagPart
*/
public <T extends TType> BatchMatrixDiagPart<T> batchMatrixDiagPart(Operand<T> input) {
return BatchMatrixDiagPart.create(scope, input);
}
/**
* The BatchMatrixInverse operation
*
* @param <T> data type for {@code output} output
* @param input The input value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchMatrixInverse} output and operands
* @return a new instance of BatchMatrixInverse
*/
public <T extends TNumber> BatchMatrixInverse<T> batchMatrixInverse(Operand<T> input,
BatchMatrixInverse.Options... options) {
return BatchMatrixInverse.create(scope, input, options);
}
/**
* The BatchMatrixSetDiag operation
*
* @param <T> data type for {@code output} output
* @param input The input value
* @param diagonal The diagonal value
* @param <T> data type for {@code BatchMatrixSetDiag} output and operands
* @return a new instance of BatchMatrixSetDiag
*/
public <T extends TType> BatchMatrixSetDiag<T> batchMatrixSetDiag(Operand<T> input,
Operand<T> diagonal) {
return BatchMatrixSetDiag.create(scope, input, diagonal);
}
/**
* The BatchMatrixSolve operation
*
* @param <T> data type for {@code output} output
* @param matrix The matrix value
* @param rhs The rhs value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchMatrixSolve} output and operands
* @return a new instance of BatchMatrixSolve
*/
public <T extends TNumber> BatchMatrixSolve<T> batchMatrixSolve(Operand<T> matrix, Operand<T> rhs,
BatchMatrixSolve.Options... options) {
return BatchMatrixSolve.create(scope, matrix, rhs, options);
}
/**
* The BatchMatrixSolveLs operation
*
* @param <T> data type for {@code output} output
* @param matrix The matrix value
* @param rhs The rhs value
* @param l2Regularizer The l2Regularizer value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchMatrixSolveLs} output and operands
* @return a new instance of BatchMatrixSolveLs
*/
public <T extends TNumber> BatchMatrixSolveLs<T> batchMatrixSolveLs(Operand<T> matrix,
Operand<T> rhs, Operand<TFloat64> l2Regularizer, BatchMatrixSolveLs.Options... options) {
return BatchMatrixSolveLs.create(scope, matrix, rhs, l2Regularizer, options);
}
/**
* The BatchMatrixTriangularSolve operation
*
* @param <T> data type for {@code output} output
* @param matrix The matrix value
* @param rhs The rhs value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchMatrixTriangularSolve} output and operands
* @return a new instance of BatchMatrixTriangularSolve
*/
public <T extends TNumber> BatchMatrixTriangularSolve<T> batchMatrixTriangularSolve(
Operand<T> matrix, Operand<T> rhs, BatchMatrixTriangularSolve.Options... options) {
return BatchMatrixTriangularSolve.create(scope, matrix, rhs, options);
}
/**
* The BatchSelfAdjointEigV2 operation
*
* @param <T> data type for {@code e} output
* @param input The input value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchSelfAdjointEigV2} output and operands
* @return a new instance of BatchSelfAdjointEig
*/
public <T extends TNumber> BatchSelfAdjointEig<T> batchSelfAdjointEig(Operand<T> input,
BatchSelfAdjointEig.Options... options) {
return BatchSelfAdjointEig.create(scope, input, options);
}
/**
* The BatchSvd operation
*
* @param <T> data type for {@code s} output
* @param input The input value
* @param options carries optional attribute values
* @param <T> data type for {@code BatchSvd} output and operands
* @return a new instance of BatchSvd
*/
public <T extends TType> BatchSvd<T> batchSvd(Operand<T> input, BatchSvd.Options... options) {
return BatchSvd.create(scope, input, options);
}
/**
* Computes the Cholesky decomposition of one or more square matrices.
* The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices.
* <p>The input has to be symmetric and positive definite. Only the lower-triangular
* part of the input will be used for this operation. The upper-triangular part
* will not be read.
* <p>The output is a tensor of the same shape as the input
* containing the Cholesky decompositions for all input submatrices {@code [..., :, :]}.
* <p><strong>Note</strong>: The gradient computation on GPU is faster for large matrices but
* not for large batch dimensions when the submatrices are small. In this
* case it might be faster to use the CPU.
*
* @param <T> data type for {@code output} output
* @param input Shape is {@code [..., M, M]}.
* @param <T> data type for {@code Cholesky} output and operands
* @return a new instance of Cholesky
*/
public <T extends TType> Cholesky<T> cholesky(Operand<T> input) {
return Cholesky.create(scope, input);
}
/**
* Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
* For an explanation see "Differentiation of the Cholesky algorithm" by
* <NAME> http://arxiv.org/abs/1602.07527.
*
* @param <T> data type for {@code output} output
* @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is {@code [..., M, M]}.
* Algorithm depends only on lower triangular part of the innermost matrices of
* this tensor.
* @param grad df/dl where f is some scalar function. Shape is {@code [..., M, M]}.
* Algorithm depends only on lower triangular part of the innermost matrices of
* this tensor.
* @param <T> data type for {@code CholeskyGrad} output and operands
* @return a new instance of CholeskyGrad
*/
public <T extends TNumber> CholeskyGrad<T> choleskyGrad(Operand<T> l, Operand<T> grad) {
return CholeskyGrad.create(scope, l, grad);
}
/**
* Shuffle dimensions of x according to a permutation and conjugate the result.
* The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy:
* {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]}
* {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])}
*
* @param <T> data type for {@code y} output
* @param x The x value
* @param perm The perm value
* @param <T> data type for {@code ConjugateTranspose} output and operands
* @return a new instance of ConjugateTranspose
*/
public <T extends TType> ConjugateTranspose<T> conjugateTranspose(Operand<T> x,
Operand<? extends TNumber> perm) {
return ConjugateTranspose.create(scope, x, perm);
}
/**
* Compute the pairwise cross product.
* {@code a} and {@code b} must be the same shape; they can either be simple 3-element vectors,
* or any shape where the innermost dimension is 3. In the latter case, each pair
* of corresponding 3-element vectors is cross-multiplied independently.
*
* @param <T> data type for {@code product} output
* @param a A tensor containing 3-element vectors.
* @param b Another tensor, of same type and shape as {@code a}.
* @param <T> data type for {@code Cross} output and operands
* @return a new instance of Cross
*/
public <T extends TNumber> Cross<T> cross(Operand<T> a, Operand<T> b) {
return Cross.create(scope, a, b);
}
/**
* Computes the determinant of one or more square matrices.
* The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices. The output is a tensor containing the determinants
* for all input submatrices {@code [..., :, :]}.
*
* @param <T> data type for {@code output} output
* @param input Shape is {@code [..., M, M]}.
* @param <T> data type for {@code MatrixDeterminant} output and operands
* @return a new instance of Det
*/
public <T extends TType> Det<T> det(Operand<T> input) {
return Det.create(scope, input);
}
/**
* Computes the eigen decomposition of one or more square matrices.
* Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in
* {@code input} such that {@code input[..., :, :] = v[..., :, :] * diag(e[..., :])}. The eigenvalues
* are sorted in non-decreasing order.
* <pre>
* # a is a tensor.
* # e is a tensor of eigenvalues.
* # v is a tensor of eigenvectors.
* e, v = eig(a)
* e = eig(a, compute_v=False)
* </pre>
*
* @param <U> data type for {@code e} output
* @param input {@code Tensor} input of shape {@code [N, N]}.
* @param Tout The value of the Tout attribute
* @param options carries optional attribute values
* @param <U> data type for {@code Eig} output and operands
* @return a new instance of Eig
*/
public <U extends TType> Eig<U> eig(Operand<? extends TType> input, Class<U> Tout,
Eig.Options... options) {
return Eig.create(scope, input, Tout, options);
}
/**
* Tensor contraction according to Einstein summation convention.
* Implements generalized Tensor contraction and reduction. Each input Tensor must
* have a corresponding input subscript appearing in the comma-separated left-hand
* side of the equation. The right-hand side of the equation consists of the
* output subscript. The input subscripts and the output subscript should consist
* of zero or more named axis labels and at most one ellipsis ({@code ...}).
* <p>The named axis labels may be any single character other than those having
* special meaning, namely {@code ,.->}. The behavior of this Op is undefined if it
* receives an ill-formatted equation; since the validation is done at
* graph-building time, we omit format validation checks at runtime.
* <p>Note: This Op is <em>not</em> intended to be called by the user; instead users should
* call {@code tf.einsum} directly. It is a hidden Op used by {@code tf.einsum}.
* <p>Operations are applied to the input(s) according to the following rules:
* <p>(a) Generalized Diagonals: For input dimensions corresponding to axis labels
* appearing more than once in the same input subscript, we take the
* generalized ({@code k}-dimensional) diagonal.
* For example, in the equation {@code iii->i} with input shape {@code [3, 3, 3]}, the
* generalized diagonal would consist of {@code 3} elements at indices {@code (0, 0, 0)},
* {@code (1, 1, 1)} and {@code (2, 2, 2)} to create a Tensor of shape {@code [3]}.
* <p>(b) Reduction: Axes corresponding to labels appearing only in one input
* subscript but not in the output subscript are summed over prior to Tensor
* contraction.
* For example, in the equation {@code ab,bc->b}, the axis labels {@code a} and {@code c} are
* the reduction axis labels.
* <p>(c) Batch Dimensions: Axes corresponding to labels appearing in each of the
* input subscripts and also in the output subscript make up the batch
* dimensions in Tensor contraction. Unnamed axis labels corresponding to
* ellipsis ({@code ...}) also correspond to batch dimensions.
* For example, for the equation denoting batch matrix multiplication,
* {@code bij,bjk->bik}, the axis label {@code b} corresponds to a batch dimension.
* <p>(d) Contraction: In case of binary einsum, axes corresponding to labels
* appearing in two different inputs (and not in the output) are contracted
* against each other.
* Considering the batch matrix multiplication equation again
* ({@code bij,bjk->bik}), the contracted axis label is {@code j}.
* <p>(e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
* labels, the opposite operation of (a) is applied. For example, in the
* equation {@code i->iii}, and input shape {@code [3]}, the output of shape {@code [3, 3, 3]}
* are all zeros, except for the (generalized) diagonal which is populated
* with values from the input.
* Note: This operation is not supported by {@code np.einsum} or {@code tf.einsum}; it is
* provided to enable computing the symbolic gradient of {@code tf.einsum}.
* <p>The output subscripts must contain only labels appearing in at least one of the
* input subscripts. Furthermore, all dimensions mapping to the same axis label
* must be equal.
* <p>Any of the input and output subscripts may contain at most a single ellipsis
* ({@code ...}). These ellipsis are mapped against dimensions not corresponding to any
* named axis label. If two inputs contain ellipsis, then they are broadcasted
* according to standard NumPy broadcasting
* <a href="http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html">rules</a> .
* <p>The broadcasted dimensions are placed in the corresponding location of the
* ellipsis in the output subscript. If the broadcasted dimensions are non-empty
* and the output subscripts do not contain ellipsis, then an InvalidArgument error
* is raised.
* <p>{@literal @}compatibility(numpy)<br>
* Similar to <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html">{@code numpy.einsum}</a> .
* <p>Comparison with {@code numpy.einsum}:
* <ul>
* <li>This Op only supports unary and binary forms of {@code numpy.einsum}.</li>
* <li>This Op does not support implicit form. (i.e. equations without {@code ->}).</li>
* <li>This Op also supports repeated indices in the output subscript, which is not
* supported by {@code numpy.einsum}.
* <br>{@literal @}end_compatibility</li>
* </ul>
*
* @param <T> data type for {@code output} output
* @param inputs List of 1 or 2 Tensors.
* @param equation String describing the Einstein Summation operation; in the format of np.einsum.
* @param <T> data type for {@code Einsum} output and operands
* @return a new instance of Einsum
*/
public <T extends TType> Einsum<T> einsum(Iterable<Operand<T>> inputs, String equation) {
return Einsum.create(scope, inputs, equation);
}
/**
* Computes the euclidean norm of elements across dimensions of a tensor.
* Reduces {@code input} along the dimensions given in {@code axis}. Unless
* {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in
* {@code axis}. If {@code keep_dims} is true, the reduced dimensions are
* retained with length 1.
*
* @param <T> data type for {@code output} output
* @param input The tensor to reduce.
* @param axis The dimensions to reduce. Must be in the range
* {@code [-rank(input), rank(input))}.
* @param options carries optional attribute values
* @param <T> data type for {@code EuclideanNorm} output and operands
* @return a new instance of EuclideanNorm
*/
public <T extends TType> EuclideanNorm<T> euclideanNorm(Operand<T> input,
Operand<? extends TNumber> axis, EuclideanNorm.Options... options) {
return EuclideanNorm.create(scope, input, axis, options);
}
/**
* Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
* The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices. The output is a tensor of the same shape as the input
* containing the inverse for all input submatrices {@code [..., :, :]}.
* <p>The op uses LU decomposition with partial pivoting to compute the inverses.
* <p>If a matrix is not invertible there is no guarantee what the op does. It
* may detect the condition and raise an exception or it may simply return a
* garbage result.
*
* @param <T> data type for {@code output} output
* @param input Shape is {@code [..., M, M]}.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixInverse} output and operands
* @return a new instance of Inv
*/
public <T extends TType> Inv<T> inv(Operand<T> input, Inv.Options... options) {
return Inv.create(scope, input, options);
}
/**
* Loads a 2-D (matrix) {@code Tensor} with name {@code old_tensor_name} from the checkpoint
* at {@code ckpt_path} and potentially reorders its rows and columns using the
* specified remappings.
* <p>Most users should use one of the wrapper initializers (such as
* {@code tf.contrib.framework.load_and_remap_matrix_initializer}) instead of this
* function directly.
* <p>The remappings are 1-D tensors with the following properties:
* <ul>
* <li>{@code row_remapping} must have exactly {@code num_rows} entries. Row {@code i} of the output
* matrix will be initialized from the row corresponding to index
* {@code row_remapping[i]} in the old {@code Tensor} from the checkpoint.</li>
* <li>{@code col_remapping} must have either 0 entries (indicating that no column
* reordering is needed) or {@code num_cols} entries. If specified, column {@code j} of the
* output matrix will be initialized from the column corresponding to index
* {@code col_remapping[j]} in the old {@code Tensor} from the checkpoint.</li>
* <li>A value of -1 in either of the remappings signifies a "missing" entry. In that
* case, values from the {@code initializing_values} tensor will be used to fill that
* missing row or column. If {@code row_remapping} has {@code r} missing entries and
* {@code col_remapping} has {@code c} missing entries, then the following condition must be
* true:</li>
* </ul>
* <p>{@code (r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)}
* <p>The remapping tensors can be generated using the GenerateVocabRemapping op.
* <p>As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
* initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
* the value from row i, column j of the old tensor in the checkpoint, the output
* matrix will look like the following:
* <p>[[w(1, 0), w(1, 2), 0.5],
* [w(0, 0), w(0, 2), -0.5],
* [0.25, -0.25, 42]]
*
* @param ckptPath Path to the TensorFlow checkpoint (version 2, {@code TensorBundle}) from
* which the old matrix {@code Tensor} will be loaded.
* @param oldTensorName Name of the 2-D {@code Tensor} to load from checkpoint.
* @param rowRemapping An int {@code Tensor} of row remappings (generally created by
* {@code generate_vocab_remapping}). Even if no row remapping is needed, this must
* still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
* index-valued {@code Tensor} (e.g. [8, 9, 10, ...], for partitioned {@code Variables}).
* @param colRemapping An int {@code Tensor} of column remappings (generally created by
* {@code generate_vocab_remapping}). May be a size-0 {@code Tensor} if only row remapping
* is to be done (e.g. column ordering is the same).
* @param initializingValues A float {@code Tensor} containing values to fill in for cells
* in the output matrix that are not loaded from the checkpoint. Length must be
* exactly the same as the number of missing / new cells.
* @param numRows Number of rows (length of the 1st dimension) in the output matrix.
* @param numCols Number of columns (length of the 2nd dimension) in the output matrix.
* @param options carries optional attribute values
* @return a new instance of LoadAndRemapMatrix
*/
public LoadAndRemapMatrix loadAndRemapMatrix(Operand<TString> ckptPath,
Operand<TString> oldTensorName, Operand<TInt64> rowRemapping, Operand<TInt64> colRemapping,
Operand<TFloat32> initializingValues, Long numRows, Long numCols,
LoadAndRemapMatrix.Options... options) {
return LoadAndRemapMatrix.create(scope, ckptPath, oldTensorName, rowRemapping, colRemapping, initializingValues, numRows, numCols, options);
}
/**
* Computes the sign and the log of the absolute value of the determinant of
* one or more square matrices.
* <p>The input is a tensor of shape {@code [N, M, M]} whose inner-most 2 dimensions
* form square matrices. The outputs are two tensors containing the signs and
* absolute values of the log determinants for all N input submatrices
* {@code [..., :, :]} such that {@code determinant = sign*exp(log_abs_determinant)}.
* The {@code log_abs_determinant} is computed as {@code det(P)*sum(log(diag(LU)))} where {@code LU}
* is the {@code LU} decomposition of the input and {@code P} is the corresponding
* permutation matrix.
*
* @param <T> data type for {@code sign} output
* @param input Shape is {@code [N, M, M]}.
* @param <T> data type for {@code LogMatrixDeterminant} output and operands
* @return a new instance of LogMatrixDeterminant
*/
public <T extends TType> LogMatrixDeterminant<T> logMatrixDeterminant(Operand<T> input) {
return LogMatrixDeterminant.create(scope, input);
}
/**
* Computes the LU decomposition of one or more square matrices.
* The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices.
* <p>The input has to be invertible.
* <p>The output consists of two tensors LU and P containing the LU decomposition
* of all input submatrices {@code [..., :, :]}. LU encodes the lower triangular and
* upper triangular factors.
* <p>For each input submatrix of shape {@code [M, M]}, L is a lower triangular matrix of
* shape {@code [M, M]} with unit diagonal whose entries correspond to the strictly lower
* triangular part of LU. U is a upper triangular matrix of shape {@code [M, M]} whose
* entries correspond to the upper triangular part, including the diagonal, of LU.
* <p>P represents a permutation matrix encoded as a list of indices each between {@code 0}
* and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to
* P, then the L, U and P satisfies P_mat * input = L * U.
*
* @param <T> data type for {@code lu} output
* @param <U> data type for {@code p} output
* @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of
* size {@code [M, M]}.
* @param <T> data type for {@code Lu} output and operands
* @return a new instance of Lu, with default output types
*/
public <T extends TType> Lu<T, TInt32> lu(Operand<T> input) {
return Lu.create(scope, input);
}
/**
* Computes the LU decomposition of one or more square matrices.
* The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices.
* <p>The input has to be invertible.
* <p>The output consists of two tensors LU and P containing the LU decomposition
* of all input submatrices {@code [..., :, :]}. LU encodes the lower triangular and
* upper triangular factors.
* <p>For each input submatrix of shape {@code [M, M]}, L is a lower triangular matrix of
* shape {@code [M, M]} with unit diagonal whose entries correspond to the strictly lower
* triangular part of LU. U is a upper triangular matrix of shape {@code [M, M]} whose
* entries correspond to the upper triangular part, including the diagonal, of LU.
* <p>P represents a permutation matrix encoded as a list of indices each between {@code 0}
* and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to
* P, then the L, U and P satisfies P_mat * input = L * U.
*
* @param <T> data type for {@code lu} output
* @param <U> data type for {@code p} output
* @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of
* size {@code [M, M]}.
* @param outputIdxType The value of the outputIdxType attribute
* @param <T> data type for {@code Lu} output and operands
* @param <U> data type for {@code Lu} output and operands
* @return a new instance of Lu
*/
public <T extends TType, U extends TNumber> Lu<T, U> lu(Operand<T> input,
Class<U> outputIdxType) {
return Lu.create(scope, input, outputIdxType);
}
/**
* Multiply the matrix "a" by the matrix "b".
* The inputs must be two-dimensional matrices and the inner dimension of
* "a" (after being transposed if transpose_a is true) must match the
* outer dimension of "b" (after being transposed if transposed_b is
* true).
* <p><em>Note</em>: The default kernel implementation for MatMul on GPUs uses
* cublas.
*
* @param <T> data type for {@code product} output
* @param a The a value
* @param b The b value
* @param options carries optional attribute values
* @param <T> data type for {@code MatMul} output and operands
* @return a new instance of MatMul
*/
public <T extends TType> MatMul<T> matMul(Operand<T> a, Operand<T> b, MatMul.Options... options) {
return MatMul.create(scope, a, b, options);
}
/**
* Returns a batched diagonal tensor with given batched diagonal values.
* Returns a tensor with the contents in {@code diagonal} as {@code k[0]}-th to {@code k[1]}-th
* diagonals of a matrix, with everything else padded with {@code padding}. {@code num_rows}
* and {@code num_cols} specify the dimension of the innermost matrix of the output. If
* both are not specified, the op assumes the innermost matrix is square and infers
* its size from {@code k} and the innermost dimension of {@code diagonal}. If only one of them
* is specified, the op assumes the unspecified value is the smallest possible
* based on other criteria.
* <p>Let {@code diagonal} have {@code r} dimensions {@code [I, J, ..., L, M, N]}. The output tensor has
* rank {@code r+1} with shape {@code [I, J, ..., L, M, num_rows, num_cols]} when only one
* diagonal is given ({@code k} is an integer or {@code k[0] == k[1]}). Otherwise, it has rank
* {@code r} with shape {@code [I, J, ..., L, num_rows, num_cols]}.
* <p>The second innermost dimension of {@code diagonal} has double meaning.
* When {@code k} is scalar or {@code k[0] == k[1]}, {@code M} is part of the batch size
* [I, J, ..., M], and the output tensor is:
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
* padding_value ; otherwise
* </pre>
* <p>Otherwise, {@code M} is treated as the number of diagonals for the matrix in the
* same batch ({@code M = k[1]-k[0]+1}), and the output tensor is:
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
* padding_value ; otherwise
* </pre>
* <p>where {@code d = n - m}, {@code diag_index = k[1] - d}, and {@code index_in_diag = n - max(d, 0)}.
* <p>For example:
* <pre>
* # The main diagonal.
* diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
* [5, 6, 7, 8]])
* tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
* [0, 2, 0, 0],
* [0, 0, 3, 0],
* [0, 0, 0, 4]],
* [[5, 0, 0, 0],
* [0, 6, 0, 0],
* [0, 0, 7, 0],
* [0, 0, 0, 8]]]
*
* # A superdiagonal (per batch).
* diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
* [4, 5, 6]])
* tf.matrix_diag(diagonal, k = 1)
* ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
* [0, 0, 2, 0],
* [0, 0, 0, 3],
* [0, 0, 0, 0]],
* [[0, 4, 0, 0],
* [0, 0, 5, 0],
* [0, 0, 0, 6],
* [0, 0, 0, 0]]]
*
* # A band of diagonals.
* diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)
* [4, 5, 0]],
* [[6, 7, 9],
* [9, 1, 0]]])
* tf.matrix_diag(diagonals, k = (-1, 0))
* ==> [[[1, 0, 0], # Output shape: (2, 3, 3)
* [4, 2, 0],
* [0, 5, 3]],
* [[6, 0, 0],
* [9, 7, 0],
* [0, 1, 9]]]
*
* # Rectangular matrix.
* diagonal = np.array([1, 2]) # Input shape: (2)
* tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
* ==> [[0, 0, 0, 0], # Output shape: (3, 4)
* [1, 0, 0, 0],
* [0, 2, 0, 0]]
*
* # Rectangular matrix with inferred num_cols and padding_value = 9.
* tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
* ==> [[9, 9], # Output shape: (3, 2)
* [1, 9],
* [9, 2]]
* </pre>
*
* @param <T> data type for {@code output} output
* @param diagonal Rank {@code r}, where {@code r >= 1}
* @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
* diagonal, and negative value means subdiagonals. {@code k} can be a single integer
* (for a single diagonal) or a pair of integers specifying the low and high ends
* of a matrix band. {@code k[0]} must not be larger than {@code k[1]}.
* @param numRows The number of rows of the output matrix. If it is not provided, the op assumes
* the output matrix is a square matrix and infers the matrix size from k and the
* innermost dimension of {@code diagonal}.
* @param numCols The number of columns of the output matrix. If it is not provided, the op
* assumes the output matrix is a square matrix and infers the matrix size from
* k and the innermost dimension of {@code diagonal}.
* @param paddingValue The number to fill the area outside the specified diagonal band with.
* Default is 0.
* @param <T> data type for {@code MatrixDiagV2} output and operands
* @return a new instance of MatrixDiag
*/
public <T extends TType> MatrixDiag<T> matrixDiag(Operand<T> diagonal, Operand<TInt32> k,
Operand<TInt32> numRows, Operand<TInt32> numCols, Operand<T> paddingValue) {
return MatrixDiag.create(scope, diagonal, k, numRows, numCols, paddingValue);
}
/**
* Returns the batched diagonal part of a batched tensor.
* Returns a tensor with the {@code k[0]}-th to {@code k[1]}-th diagonals of the batched
* {@code input}.
* <p>Assume {@code input} has {@code r} dimensions {@code [I, J, ..., L, M, N]}.
* Let {@code max_diag_len} be the maximum length among all diagonals to be extracted,
* {@code max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))}
* Let {@code num_diags} be the number of diagonals to extract,
* {@code num_diags = k[1] - k[0] + 1}.
* <p>If {@code num_diags == 1}, the output tensor is of rank {@code r - 1} with shape
* {@code [I, J, ..., L, max_diag_len]} and values:
* <pre>
* diagonal[i, j, ..., l, n]
* = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
* padding_value ; otherwise.
* </pre>
* <p>where {@code y = max(-k[1], 0)}, {@code x = max(k[1], 0)}.
* <p>Otherwise, the output tensor has rank {@code r} with dimensions
* {@code [I, J, ..., L, num_diags, max_diag_len]} with values:
* <pre>
* diagonal[i, j, ..., l, m, n]
* = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
* padding_value ; otherwise.
* </pre>
* <p>where {@code d = k[1] - m}, {@code y = max(-d, 0)}, and {@code x = max(d, 0)}.
* <p>The input must be at least a matrix.
* <p>For example:
* <pre>
* input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
* [5, 6, 7, 8],
* [9, 8, 7, 6]],
* [[5, 4, 3, 2],
* [1, 2, 3, 4],
* [5, 6, 7, 8]]])
*
* # A main diagonal from each batch.
* tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
* [5, 2, 7]]
*
* # A superdiagonal from each batch.
* tf.matrix_diag_part(input, k = 1)
* ==> [[2, 7, 6], # Output shape: (2, 3)
* [4, 3, 8]]
*
* # A tridiagonal band from each batch.
* tf.matrix_diag_part(input, k = (-1, 1))
* ==> [[[2, 7, 6], # Output shape: (2, 3, 3)
* [1, 6, 7],
* [5, 8, 0]],
* [[4, 3, 8],
* [5, 2, 7],
* [1, 6, 0]]]
*
* # Padding value = 9
* tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
* ==> [[[4, 9, 9], # Output shape: (2, 3, 3)
* [3, 8, 9],
* [2, 7, 6]],
* [[2, 9, 9],
* [3, 4, 9],
* [4, 3, 8]]]
* </pre>
*
* @param <T> data type for {@code diagonal} output
* @param input Rank {@code r} tensor where {@code r >= 2}.
* @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
* diagonal, and negative value means subdiagonals. {@code k} can be a single integer
* (for a single diagonal) or a pair of integers specifying the low and high ends
* of a matrix band. {@code k[0]} must not be larger than {@code k[1]}.
* @param paddingValue The value to fill the area outside the specified diagonal band with.
* Default is 0.
* @param <T> data type for {@code MatrixDiagPartV2} output and operands
* @return a new instance of MatrixDiagPart
*/
public <T extends TType> MatrixDiagPart<T> matrixDiagPart(Operand<T> input, Operand<TInt32> k,
Operand<T> paddingValue) {
return MatrixDiagPart.create(scope, input, k, paddingValue);
}
/**
* Returns the batched diagonal part of a batched tensor.
* Returns a tensor with the {@code k[0]}-th to {@code k[1]}-th diagonals of the batched
* {@code input}.
* <p>Assume {@code input} has {@code r} dimensions {@code [I, J, ..., L, M, N]}.
* Let {@code max_diag_len} be the maximum length among all diagonals to be extracted,
* {@code max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))}
* Let {@code num_diags} be the number of diagonals to extract,
* {@code num_diags = k[1] - k[0] + 1}.
* <p>If {@code num_diags == 1}, the output tensor is of rank {@code r - 1} with shape
* {@code [I, J, ..., L, max_diag_len]} and values:
* <pre>
* diagonal[i, j, ..., l, n]
* = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
* padding_value ; otherwise.
* </pre>
* <p>where {@code y = max(-k[1], 0)}, {@code x = max(k[1], 0)}.
* <p>Otherwise, the output tensor has rank {@code r} with dimensions
* {@code [I, J, ..., L, num_diags, max_diag_len]} with values:
* <pre>
* diagonal[i, j, ..., l, m, n]
* = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
* padding_value ; otherwise.
* </pre>
* <p>where {@code d = k[1] - m}, {@code y = max(-d, 0) - offset}, and {@code x = max(d, 0) - offset}.
* <p>{@code offset} is zero except when the alignment of the diagonal is to the right.
* <pre>
* offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
* and `d >= 0`) or
* (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
* and `d <= 0`)
* 0 ; otherwise
* </pre>
* <p>where {@code diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))}.
* <p>The input must be at least a matrix.
* <p>For example:
* <pre>
* input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
* [5, 6, 7, 8],
* [9, 8, 7, 6]],
* [[5, 4, 3, 2],
* [1, 2, 3, 4],
* [5, 6, 7, 8]]])
*
* # A main diagonal from each batch.
* tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
* [5, 2, 7]]
*
* # A superdiagonal from each batch.
* tf.matrix_diag_part(input, k = 1)
* ==> [[2, 7, 6], # Output shape: (2, 3)
* [4, 3, 8]]
*
* # A band from each batch.
* tf.matrix_diag_part(input, k = (-1, 2))
* ==> [[[0, 3, 8], # Output shape: (2, 4, 3)
* [2, 7, 6],
* [1, 6, 7],
* [5, 8, 0]],
* [[0, 3, 4],
* [4, 3, 8],
* [5, 2, 7],
* [1, 6, 0]]]
*
* # LEFT_RIGHT alignment.
* tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
* ==> [[[3, 8, 0], # Output shape: (2, 4, 3)
* [2, 7, 6],
* [1, 6, 7],
* [0, 5, 8]],
* [[3, 4, 0],
* [4, 3, 8],
* [5, 2, 7],
* [0, 1, 6]]]
*
* # max_diag_len can be shorter than the main diagonal.
* tf.matrix_diag_part(input, k = (-2, -1))
* ==> [[[5, 8],
* [9, 0]],
* [[1, 6],
* [5, 0]]]
*
* # padding_value = 9
* tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
* ==> [[[9, 9, 4], # Output shape: (2, 3, 3)
* [9, 3, 8],
* [2, 7, 6]],
* [[9, 9, 2],
* [9, 3, 4],
* [4, 3, 8]]]
*
* </pre>
*
* @param <T> data type for {@code diagonal} output
* @param input Rank {@code r} tensor where {@code r >= 2}.
* @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
* diagonal, and negative value means subdiagonals. {@code k} can be a single integer
* (for a single diagonal) or a pair of integers specifying the low and high ends
* of a matrix band. {@code k[0]} must not be larger than {@code k[1]}.
* @param paddingValue The value to fill the area outside the specified diagonal band with.
* Default is 0.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixDiagPartV3} output and operands
* @return a new instance of MatrixDiagPartV3
*/
public <T extends TType> MatrixDiagPartV3<T> matrixDiagPartV3(Operand<T> input, Operand<TInt32> k,
Operand<T> paddingValue, MatrixDiagPartV3.Options... options) {
return MatrixDiagPartV3.create(scope, input, k, paddingValue, options);
}
/**
* Returns a batched diagonal tensor with given batched diagonal values.
* Returns a tensor with the contents in {@code diagonal} as {@code k[0]}-th to {@code k[1]}-th
* diagonals of a matrix, with everything else padded with {@code padding}. {@code num_rows}
* and {@code num_cols} specify the dimension of the innermost matrix of the output. If
* both are not specified, the op assumes the innermost matrix is square and infers
* its size from {@code k} and the innermost dimension of {@code diagonal}. If only one of them
* is specified, the op assumes the unspecified value is the smallest possible
* based on other criteria.
* <p>Let {@code diagonal} have {@code r} dimensions {@code [I, J, ..., L, M, N]}. The output tensor has
* rank {@code r+1} with shape {@code [I, J, ..., L, M, num_rows, num_cols]} when only one
* diagonal is given ({@code k} is an integer or {@code k[0] == k[1]}). Otherwise, it has rank
* {@code r} with shape {@code [I, J, ..., L, num_rows, num_cols]}.
* <p>The second innermost dimension of {@code diagonal} has double meaning.
* When {@code k} is scalar or {@code k[0] == k[1]}, {@code M} is part of the batch size
* [I, J, ..., M], and the output tensor is:
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
* padding_value ; otherwise
* </pre>
* <p>Otherwise, {@code M} is treated as the number of diagonals for the matrix in the
* same batch ({@code M = k[1]-k[0]+1}), and the output tensor is:
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
* padding_value ; otherwise
* </pre>
* <p>where {@code d = n - m}, {@code diag_index = [k] - d}, and
* {@code index_in_diag = n - max(d, 0) + offset}.
* <p>{@code offset} is zero except when the alignment of the diagonal is to the right.
* <pre>
* offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
* and `d >= 0`) or
* (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
* and `d <= 0`)
* 0 ; otherwise
* </pre>
* <p>where {@code diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))}.
* <p>For example:
* <pre>
* # The main diagonal.
* diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
* [5, 6, 7, 8]])
* tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
* [0, 2, 0, 0],
* [0, 0, 3, 0],
* [0, 0, 0, 4]],
* [[5, 0, 0, 0],
* [0, 6, 0, 0],
* [0, 0, 7, 0],
* [0, 0, 0, 8]]]
*
* # A superdiagonal (per batch).
* diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
* [4, 5, 6]])
* tf.matrix_diag(diagonal, k = 1)
* ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
* [0, 0, 2, 0],
* [0, 0, 0, 3],
* [0, 0, 0, 0]],
* [[0, 4, 0, 0],
* [0, 0, 5, 0],
* [0, 0, 0, 6],
* [0, 0, 0, 0]]]
*
* # A tridiagonal band (per batch).
* diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
* [1, 2, 3],
* [4, 5, 0]],
* [[0, 2, 3],
* [6, 7, 9],
* [9, 1, 0]]])
* tf.matrix_diag(diagonals, k = (-1, 1))
* ==> [[[1, 8, 0], # Output shape: (2, 3, 3)
* [4, 2, 9],
* [0, 5, 3]],
* [[6, 2, 0],
* [9, 7, 3],
* [0, 1, 9]]]
*
* # LEFT_RIGHT alignment.
* diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
* [1, 2, 3],
* [0, 4, 5]],
* [[2, 3, 0],
* [6, 7, 9],
* [0, 9, 1]]])
* tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
* ==> [[[1, 8, 0], # Output shape: (2, 3, 3)
* [4, 2, 9],
* [0, 5, 3]],
* [[6, 2, 0],
* [9, 7, 3],
* [0, 1, 9]]]
*
* # Rectangular matrix.
* diagonal = np.array([1, 2]) # Input shape: (2)
* tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
* ==> [[0, 0, 0, 0], # Output shape: (3, 4)
* [1, 0, 0, 0],
* [0, 2, 0, 0]]
*
* # Rectangular matrix with inferred num_cols and padding_value = 9.
* tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
* ==> [[9, 9], # Output shape: (3, 2)
* [1, 9],
* [9, 2]]
*
* </pre>
*
* @param <T> data type for {@code output} output
* @param diagonal Rank {@code r}, where {@code r >= 1}
* @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
* diagonal, and negative value means subdiagonals. {@code k} can be a single integer
* (for a single diagonal) or a pair of integers specifying the low and high ends
* of a matrix band. {@code k[0]} must not be larger than {@code k[1]}.
* @param numRows The number of rows of the output matrix. If it is not provided, the op assumes
* the output matrix is a square matrix and infers the matrix size from k and the
* innermost dimension of {@code diagonal}.
* @param numCols The number of columns of the output matrix. If it is not provided, the op
* assumes the output matrix is a square matrix and infers the matrix size from
* k and the innermost dimension of {@code diagonal}.
* @param paddingValue The number to fill the area outside the specified diagonal band with.
* Default is 0.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixDiagV3} output and operands
* @return a new instance of MatrixDiagV3
*/
public <T extends TType> MatrixDiagV3<T> matrixDiagV3(Operand<T> diagonal, Operand<TInt32> k,
Operand<TInt32> numRows, Operand<TInt32> numCols, Operand<T> paddingValue,
MatrixDiagV3.Options... options) {
return MatrixDiagV3.create(scope, diagonal, k, numRows, numCols, paddingValue, options);
}
/**
* Returns a batched matrix tensor with new batched diagonal values.
* Given {@code input} and {@code diagonal}, this operation returns a tensor with the
* same shape and values as {@code input}, except for the specified diagonals of the
* innermost matrices. These will be overwritten by the values in {@code diagonal}.
* <p>{@code input} has {@code r+1} dimensions {@code [I, J, ..., L, M, N]}. When {@code k} is scalar or
* {@code k[0] == k[1]}, {@code diagonal} has {@code r} dimensions {@code [I, J, ..., L, max_diag_len]}.
* Otherwise, it has {@code r+1} dimensions {@code [I, J, ..., L, num_diags, max_diag_len]}.
* {@code num_diags} is the number of diagonals, {@code num_diags = k[1] - k[0] + 1}.
* {@code max_diag_len} is the longest diagonal in the range {@code [k[0], k[1]]},
* {@code max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))}
* <p>The output is a tensor of rank {@code k+1} with dimensions {@code [I, J, ..., L, M, N]}.
* If {@code k} is scalar or {@code k[0] == k[1]}:
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
* input[i, j, ..., l, m, n] ; otherwise
* </pre>
* <p>Otherwise,
* <pre>
* output[i, j, ..., l, m, n]
* = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
* input[i, j, ..., l, m, n] ; otherwise
* </pre>
* <p>where {@code d = n - m}, {@code diag_index = k[1] - d}, and
* {@code index_in_diag = n - max(d, 0) + offset}.
* <p>{@code offset} is zero except when the alignment of the diagonal is to the right.
* <pre>
* offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
* and `d >= 0`) or
* (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
* and `d <= 0`)
* 0 ; otherwise
* </pre>
* <p>where {@code diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))}.
* <p>For example:
* <pre>
* # The main diagonal.
* input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
* [7, 7, 7, 7],
* [7, 7, 7, 7]],
* [[7, 7, 7, 7],
* [7, 7, 7, 7],
* [7, 7, 7, 7]]])
* diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
* [4, 5, 6]])
* tf.matrix_set_diag(input, diagonal)
* ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
* [7, 2, 7, 7],
* [7, 7, 3, 7]],
* [[4, 7, 7, 7],
* [7, 5, 7, 7],
* [7, 7, 6, 7]]]
*
* # A superdiagonal (per batch).
* tf.matrix_set_diag(input, diagonal, k = 1)
* ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
* [7, 7, 2, 7],
* [7, 7, 7, 3]],
* [[7, 4, 7, 7],
* [7, 7, 5, 7],
* [7, 7, 7, 6]]]
*
* # A band of diagonals.
* diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
* [6, 5, 8],
* [1, 2, 3],
* [4, 5, 0]],
* [[0, 1, 2],
* [5, 6, 4],
* [6, 1, 2],
* [3, 4, 0]]])
* tf.matrix_set_diag(input, diagonals, k = (-1, 2))
* ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
* [4, 2, 5, 1],
* [7, 5, 3, 8]],
* [[6, 5, 1, 7],
* [3, 1, 6, 2],
* [7, 4, 2, 4]]]
*
* # LEFT_RIGHT alignment.
* diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
* [6, 5, 8],
* [1, 2, 3],
* [0, 4, 5]],
* [[1, 2, 0],
* [5, 6, 4],
* [6, 1, 2],
* [0, 3, 4]]])
* tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
* ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
* [4, 2, 5, 1],
* [7, 5, 3, 8]],
* [[6, 5, 1, 7],
* [3, 1, 6, 2],
* [7, 4, 2, 4]]]
*
* </pre>
*
* @param <T> data type for {@code output} output
* @param input Rank {@code r+1}, where {@code r >= 1}.
* @param diagonal Rank {@code r} when {@code k} is an integer or {@code k[0] == k[1]}. Otherwise, it has rank {@code r+1}.
* {@code k >= 1}.
* @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
* diagonal, and negative value means subdiagonals. {@code k} can be a single integer
* (for a single diagonal) or a pair of integers specifying the low and high ends
* of a matrix band. {@code k[0]} must not be larger than {@code k[1]}.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixSetDiagV3} output and operands
* @return a new instance of MatrixSetDiag
*/
public <T extends TType> MatrixSetDiag<T> matrixSetDiag(Operand<T> input, Operand<T> diagonal,
Operand<TInt32> k, MatrixSetDiag.Options... options) {
return MatrixSetDiag.create(scope, input, diagonal, k, options);
}
/**
* Solves one or more linear least-squares problems.
* {@code matrix} is a tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions
* form real or complex matrices of size {@code [M, N]}. {@code Rhs} is a tensor of the same
* type as {@code matrix} and shape {@code [..., M, K]}.
* The output is a tensor shape {@code [..., N, K]} where each output matrix solves
* each of the equations
* {@code matrix[..., :, :]} * {@code output[..., :, :]} = {@code rhs[..., :, :]}
* in the least squares sense.
* <p>We use the following notation for (complex) matrix and right-hand sides
* in the batch:
* <p>{@code matrix}=\(A \in \mathbb{C}^{m \times n}\),
* {@code rhs}=\(B \in \mathbb{C}^{m \times k}\),
* {@code output}=\(X \in \mathbb{C}^{n \times k}\),
* {@code l2_regularizer}=\(\lambda \in \mathbb{R}\).
* <p>If {@code fast} is {@code True}, then the solution is computed by solving the normal
* equations using Cholesky decomposition. Specifically, if \(m \ge n\) then
* \(X = (A^H A + \lambda I)^{-1} A^H B\), which solves the least-squares
* problem \(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||<em>F^2\).
* If \(m \lt n\) then {@code output} is computed as
* \(X = A^H (A A^H + \lambda I)^{-1} B\), which (for \(\lambda = 0\)) is the
* minimum-norm solution to the under-determined linear system, i.e.
* \(X = \mathrm{argmin}</em>{Z \in \mathbb{C}^{n \times k} } ||Z||<em>F^2 \),
* subject to \(A Z = B\). Notice that the fast path is only numerically stable
* when \(A\) is numerically full rank and has a condition number
* \(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon</em>{mach} } }\) or \(\lambda\) is
* sufficiently large.
* <p>If {@code fast} is {@code False} an algorithm based on the numerically robust complete
* orthogonal decomposition is used. This computes the minimum-norm
* least-squares solution, even when \(A\) is rank deficient. This path is
* typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then
* {@code l2_regularizer} is ignored.
*
* @param <T> data type for {@code output} output
* @param matrix Shape is {@code [..., M, N]}.
* @param rhs Shape is {@code [..., M, K]}.
* @param l2Regularizer Scalar tensor.
* <p>{@literal @}compatibility(numpy)<br>
* Equivalent to np.linalg.lstsq
* <br>{@literal @}end_compatibility
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixSolveLs} output and operands
* @return a new instance of MatrixSolveLs
*/
public <T extends TType> MatrixSolveLs<T> matrixSolveLs(Operand<T> matrix, Operand<T> rhs,
Operand<TFloat64> l2Regularizer, MatrixSolveLs.Options... options) {
return MatrixSolveLs.create(scope, matrix, rhs, l2Regularizer, options);
}
/**
* Computes the QR decompositions of one or more matrices.
* Computes the QR decomposition of each inner matrix in {@code tensor} such that
* {@code tensor[..., :, :] = q[..., :, :] * r[..., :,:])}
* <p>Currently, the gradient for the QR decomposition is well-defined only when
* the first {@code P} columns of the inner matrix are linearly independent, where
* {@code P} is the minimum of {@code M} and {@code N}, the 2 inner-most dimmensions of {@code tensor}.
* <pre>
* # a is a tensor.
* # q is a tensor of orthonormal matrices.
* # r is a tensor of upper triangular matrices.
* q, r = qr(a)
* q_full, r_full = qr(a, full_matrices=True)
* </pre>
*
* @param <T> data type for {@code q} output
* @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions
* form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}.
* @param options carries optional attribute values
* @param <T> data type for {@code Qr} output and operands
* @return a new instance of Qr
*/
public <T extends TType> Qr<T> qr(Operand<T> input, Qr.Options... options) {
return Qr.create(scope, input, options);
}
/**
* Perform a quantized matrix multiplication of {@code a} by the matrix {@code b}.
* The inputs must be two-dimensional matrices and the inner dimension of
* {@code a} (after being transposed if {@code transpose_a} is non-zero) must match the
* outer dimension of {@code b} (after being transposed if {@code transposed_b} is
* non-zero).
*
* @param <V> data type for {@code out} output
* @param a Must be a two-dimensional tensor.
* @param b Must be a two-dimensional tensor.
* @param minA The float value that the lowest quantized {@code a} value represents.
* @param maxA The float value that the highest quantized {@code a} value represents.
* @param minB The float value that the lowest quantized {@code b} value represents.
* @param maxB The float value that the highest quantized {@code b} value represents.
* @param Toutput The value of the Toutput attribute
* @param Tactivation The type of output produced by activation function
* following this operation.
* @param options carries optional attribute values
* @param <V> data type for {@code QuantizedMatMul} output and operands
* @param <W> data type for {@code QuantizedMatMul} output and operands
* @return a new instance of QuantizedMatMul
*/
public <V extends TNumber, W extends TNumber> QuantizedMatMul<V> quantizedMatMul(
Operand<? extends TNumber> a, Operand<? extends TNumber> b, Operand<TFloat32> minA,
Operand<TFloat32> maxA, Operand<TFloat32> minB, Operand<TFloat32> maxB, Class<V> Toutput,
Class<W> Tactivation, QuantizedMatMul.Options... options) {
return QuantizedMatMul.create(scope, a, b, minA, maxA, minB, maxB, Toutput, Tactivation, options);
}
/**
* Computes the eigen decomposition of one or more square self-adjoint matrices.
* Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
* {@code input} such that {@code input[..., :, :] = v[..., :, :] * diag(e[..., :])}. The eigenvalues
* are sorted in non-decreasing order.
* <pre>
* # a is a tensor.
* # e is a tensor of eigenvalues.
* # v is a tensor of eigenvectors.
* e, v = self_adjoint_eig(a)
* e = self_adjoint_eig(a, compute_v=False)
* </pre>
*
* @param <T> data type for {@code e} output
* @param input {@code Tensor} input of shape {@code [N, N]}.
* @param options carries optional attribute values
* @param <T> data type for {@code SelfAdjointEigV2} output and operands
* @return a new instance of SelfAdjointEig
*/
public <T extends TType> SelfAdjointEig<T> selfAdjointEig(Operand<T> input,
SelfAdjointEig.Options... options) {
return SelfAdjointEig.create(scope, input, options);
}
/**
* Solves systems of linear equations.
* {@code Matrix} is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices. {@code Rhs} is a tensor of shape {@code [..., M, K]}. The {@code output} is
* a tensor shape {@code [..., M, K]}. If {@code adjoint} is {@code False} then each output matrix
* satisfies {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}.
* If {@code adjoint} is {@code True} then each output matrix satisfies
* {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}.
*
* @param <T> data type for {@code output} output
* @param matrix Shape is {@code [..., M, M]}.
* @param rhs Shape is {@code [..., M, K]}.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixSolve} output and operands
* @return a new instance of Solve
*/
public <T extends TType> Solve<T> solve(Operand<T> matrix, Operand<T> rhs,
Solve.Options... options) {
return Solve.create(scope, matrix, rhs, options);
}
/**
* Computes the matrix square root of one or more square matrices:
* matmul(sqrtm(A), sqrtm(A)) = A
* <p>The input matrix should be invertible. If the input matrix is real, it should
* have no eigenvalues which are real and negative (pairs of complex conjugate
* eigenvalues are allowed).
* <p>The matrix square root is computed by first reducing the matrix to
* quasi-triangular form with the real Schur decomposition. The square root
* of the quasi-triangular matrix is then computed directly. Details of
* the algorithm can be found in: <NAME>, "Computing real
* square roots of a real matrix", Linear Algebra Appl., 1987.
* <p>The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions
* form square matrices. The output is a tensor of the same shape as the input
* containing the matrix square root for all input submatrices {@code [..., :, :]}.
*
* @param <T> data type for {@code output} output
* @param input Shape is {@code [..., M, M]}.
* @param <T> data type for {@code MatrixSquareRoot} output and operands
* @return a new instance of Sqrtm
*/
public <T extends TType> Sqrtm<T> sqrtm(Operand<T> input) {
return Sqrtm.create(scope, input);
}
/**
* Computes the singular value decompositions of one or more matrices.
* Computes the SVD of each inner matrix in {@code input} such that
* {@code input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])}
* <pre>
* # a is a tensor containing a batch of matrices.
* # s is a tensor of singular values for each matrix.
* # u is the tensor containing the left singular vectors for each matrix.
* # v is the tensor containing the right singular vectors for each matrix.
* s, u, v = svd(a)
* s, _, _ = svd(a, compute_uv=False)
* </pre>
*
* @param <T> data type for {@code s} output
* @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions
* form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}.
* @param options carries optional attribute values
* @param <T> data type for {@code Svd} output and operands
* @return a new instance of Svd
*/
public <T extends TType> Svd<T> svd(Operand<T> input, Svd.Options... options) {
return Svd.create(scope, input, options);
}
/**
* Returns a diagonal tensor with a given diagonal values.
* Given a {@code diagonal}, this operation returns a tensor with the {@code diagonal} and
* everything else padded with zeros. The diagonal is computed as follows:
* <p>Assume {@code diagonal} has dimensions [D1,..., Dk], then the output is a tensor of
* rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
* <p>{@code output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]} and 0 everywhere else.
* <p>For example:
* <pre>
* # 'diagonal' is [1, 2, 3, 4]
* tf.diag(diagonal) ==> [[1, 0, 0, 0]
* [0, 2, 0, 0]
* [0, 0, 3, 0]
* [0, 0, 0, 4]]
* </pre>
*
* @param <T> data type for {@code output} output
* @param diagonal Rank k tensor where k is at most 1.
* @param <T> data type for {@code Diag} output and operands
* @return a new instance of TensorDiag
*/
public <T extends TType> TensorDiag<T> tensorDiag(Operand<T> diagonal) {
return TensorDiag.create(scope, diagonal);
}
/**
* Returns the diagonal part of the tensor.
* This operation returns a tensor with the {@code diagonal} part
* of the {@code input}. The {@code diagonal} part is computed as follows:
* <p>Assume {@code input} has dimensions {@code [D1,..., Dk, D1,..., Dk]}, then the output is a
* tensor of rank {@code k} with dimensions {@code [D1,..., Dk]} where:
* <p>{@code diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]}.
* <p>For example:
* <pre>
* # 'input' is [[1, 0, 0, 0]
* [0, 2, 0, 0]
* [0, 0, 3, 0]
* [0, 0, 0, 4]]
*
* tf.diag_part(input) ==> [1, 2, 3, 4]
* </pre>
*
* @param <T> data type for {@code diagonal} output
* @param input Rank k tensor where k is even and not zero.
* @param <T> data type for {@code DiagPart} output and operands
* @return a new instance of TensorDiagPart
*/
public <T extends TType> TensorDiagPart<T> tensorDiagPart(Operand<T> input) {
return TensorDiagPart.create(scope, input);
}
/**
* Shuffle dimensions of x according to a permutation.
* The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy:
* {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]}
*
* @param <T> data type for {@code y} output
* @param x The x value
* @param perm The perm value
* @param <T> data type for {@code Transpose} output and operands
* @return a new instance of Transpose
*/
public <T extends TType> Transpose<T> transpose(Operand<T> x, Operand<? extends TNumber> perm) {
return Transpose.create(scope, x, perm);
}
/**
* Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
* {@code matrix} is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form
* square matrices. If {@code lower} is {@code True} then the strictly upper triangular part
* of each inner-most matrix is assumed to be zero and not accessed.
* If {@code lower} is False then the strictly lower triangular part of each inner-most
* matrix is assumed to be zero and not accessed.
* {@code rhs} is a tensor of shape {@code [..., M, N]}.
* <p>The output is a tensor of shape {@code [..., M, N]}. If {@code adjoint} is
* {@code True} then the innermost matrices in {@code output} satisfy matrix equations
* {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}.
* If {@code adjoint} is {@code False} then the strictly then the innermost matrices in
* {@code output} satisfy matrix equations
* {@code adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]}.
* <p>Note, the batch shapes for the inputs only need to broadcast.
* <p>Example:
* <pre>
*
* a = tf.constant([[3, 0, 0, 0],
* [2, 1, 0, 0],
* [1, 0, 1, 0],
* [1, 1, 1, 1]], dtype=tf.float32)
*
* b = tf.constant([[4],
* [2],
* [4],
* [2]], dtype=tf.float32)
*
* x = tf.linalg.triangular_solve(a, b, lower=True)
* x
* # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
* # array([[ 1.3333334 ],
* # [-0.66666675],
* # [ 2.6666665 ],
* # [-1.3333331 ]], dtype=float32)>
*
* # in python3 one can use `a@x`
* tf.matmul(a, x)
* # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
* # array([[4. ],
* # [2. ],
* # [4. ],
* # [1.9999999]], dtype=float32)>
* </pre>
*
* @param <T> data type for {@code output} output
* @param matrix Shape is {@code [..., M, M]}.
* @param rhs Shape is {@code [..., M, K]}.
* @param options carries optional attribute values
* @param <T> data type for {@code MatrixTriangularSolve} output and operands
* @return a new instance of TriangularSolve
*/
public <T extends TType> TriangularSolve<T> triangularSolve(Operand<T> matrix, Operand<T> rhs,
TriangularSolve.Options... options) {
return TriangularSolve.create(scope, matrix, rhs, options);
}
/**
* Get the parent {@link Ops} object.
*/
public final Ops ops() {
return ops;
}
}
| 31,115 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.