Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,238 | @Deprecated
public class ShippingServiceImpl implements ShippingService {
protected ShippingModule shippingModule;
@Override
public FulfillmentGroup calculateShippingForFulfillmentGroup(FulfillmentGroup fulfillmentGroup) throws FulfillmentPriceException {
FulfillmentGroup group = shippingModule.calculateShippingForFulfillmentGroup(fulfillmentGroup);
return group;
}
public ShippingModule getShippingModule() {
return shippingModule;
}
public void setShippingModule(ShippingModule shippingModule) {
this.shippingModule = shippingModule;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_ShippingServiceImpl.java |
3,257 | public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorSource {
private final IndexNumericFieldData<?> indexFieldData;
private final Object missingValue;
private final SortMode sortMode;
public LongValuesComparatorSource(IndexNumericFieldData<?> indexFieldData, @Nullable Object missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public SortField.Type reducedType() {
return SortField.Type.LONG;
}
@Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
assert fieldname.equals(indexFieldData.getFieldNames().indexName());
final long dMissingValue = (Long) missingObject(missingValue, reversed);
return new LongValuesComparator(indexFieldData, dMissingValue, numHits, sortMode);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_LongValuesComparatorSource.java |
1,892 | boolean b = h1.executeTransaction(options, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
try {
final TransactionalMap<String, Integer> txMap = context.getMap("default");
assertEquals("value0", txMap.getForUpdate("var"));
latch1.countDown();
latch2.await(100, TimeUnit.SECONDS);
} catch (Exception e) {
}
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
90 | {
@Override
protected void configure( GraphDatabaseBuilder builder )
{
builder.setConfig( GraphDatabaseSettings.cache_type, "none" );
};
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ReadTransactionLogWritingTest.java |
1,012 | public interface GiftWrapOrderItem extends DiscreteOrderItem {
public List<OrderItem> getWrappedItems();
public void setWrappedItems(List<OrderItem> wrappedItems);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_GiftWrapOrderItem.java |
624 | @RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
public class SplitBrainHandlerTest {
@Before
@After
public void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
@Test(timeout = 100000)
public void testSplitBrainMulticast() throws Exception {
splitBrain(true);
}
@Test(timeout = 100000)
public void testSplitBrainTCP() throws Exception {
splitBrain(false);
}
private void splitBrain(boolean multicast) throws Exception {
Config c1 = new Config();
c1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(multicast);
c1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(!multicast);
c1.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1");
c1.getNetworkConfig().getInterfaces().clear();
c1.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
c1.getNetworkConfig().getInterfaces().setEnabled(true);
Config c2 = new Config();
c2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(multicast);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(!multicast);
c2.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1");
c2.getNetworkConfig().getInterfaces().clear();
c2.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
c2.getNetworkConfig().getInterfaces().setEnabled(true);
c1.getGroupConfig().setName("differentGroup");
c2.getGroupConfig().setName("sameGroup");
c1.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "5");
c1.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "3");
c2.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "5");
c2.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "3");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
LifecycleCountingListener l = new LifecycleCountingListener();
h2.getLifecycleService().addLifecycleListener(l);
assertEquals(1, h1.getCluster().getMembers().size());
assertEquals(1, h2.getCluster().getMembers().size());
c1.getGroupConfig().setName("sameGroup");
assertTrue(l.waitFor(LifecycleState.MERGED, 30));
assertEquals(1, l.getCount(LifecycleState.MERGING));
assertEquals(1, l.getCount(LifecycleState.MERGED));
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
private class LifecycleCountingListener implements LifecycleListener {
Map<LifecycleState, AtomicInteger> counter = new ConcurrentHashMap<LifecycleState, AtomicInteger>();
BlockingQueue<LifecycleState> eventQueue = new LinkedBlockingQueue<LifecycleState>();
LifecycleCountingListener() {
for (LifecycleEvent.LifecycleState state : LifecycleEvent.LifecycleState.values()) {
counter.put(state, new AtomicInteger(0));
}
}
public void stateChanged(LifecycleEvent event) {
counter.get(event.getState()).incrementAndGet();
eventQueue.offer(event.getState());
}
int getCount(LifecycleEvent.LifecycleState state) {
return counter.get(state).get();
}
boolean waitFor(LifecycleEvent.LifecycleState state, int seconds) {
long remainingMillis = TimeUnit.SECONDS.toMillis(seconds);
while (remainingMillis >= 0) {
LifecycleEvent.LifecycleState received = null;
try {
long now = Clock.currentTimeMillis();
received = eventQueue.poll(remainingMillis, TimeUnit.MILLISECONDS);
remainingMillis -= (Clock.currentTimeMillis() - now);
} catch (InterruptedException e) {
return false;
}
if (received != null && received == state) {
return true;
}
}
return false;
}
}
@Test
public void testSplitBrain() throws InterruptedException {
Config config = new Config();
config.getGroupConfig().setName("split");
config.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "5");
config.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "5");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
final CountDownLatch latch = new CountDownLatch(1);
h3.getLifecycleService().addLifecycleListener(new LifecycleListener() {
public void stateChanged(LifecycleEvent event) {
if (event.getState() == LifecycleState.MERGED) {
latch.countDown();
}
}
});
closeConnectionBetween(h1, h3);
closeConnectionBetween(h2, h3);
Thread.sleep(1000);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
assertEquals(1, h3.getCluster().getMembers().size());
assertTrue(latch.await(30, TimeUnit.SECONDS));
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
}
private void closeConnectionBetween(HazelcastInstance h1, HazelcastInstance h2) {
if (h1 == null || h2 == null) return;
final Node n1 = TestUtil.getNode(h1);
final Node n2 = TestUtil.getNode(h2);
n1.clusterService.removeAddress(n2.address);
n2.clusterService.removeAddress(n1.address);
}
@Test(timeout = 180000)
@Category(ProblematicTest.class)
public void testTcpIpSplitBrainJoinsCorrectCluster() throws Exception {
// This port selection ensures that when h3 restarts it will try to join h4 instead of joining the nodes in cluster one
Config c1 = buildConfig(false, 15702);
Config c2 = buildConfig(false, 15704);
Config c3 = buildConfig(false, 15703);
Config c4 = buildConfig(false, 15701);
List<String> clusterOneMembers = Arrays.asList("127.0.0.1:15702", "127.0.0.1:15704");
List<String> clusterTwoMembers = Arrays.asList("127.0.0.1:15703", "127.0.0.1:15701");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterOneMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterOneMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterTwoMembers);
c4.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterTwoMembers);
final CountDownLatch latch = new CountDownLatch(2);
c3.addListenerConfig(new ListenerConfig(new LifecycleListener() {
public void stateChanged(final LifecycleEvent event) {
if (event.getState() == LifecycleState.MERGED) {
latch.countDown();
}
}
}));
c4.addListenerConfig(new ListenerConfig(new LifecycleListener() {
public void stateChanged(final LifecycleEvent event) {
if (event.getState() == LifecycleState.MERGED) {
latch.countDown();
}
}
}));
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
HazelcastInstance h4 = Hazelcast.newHazelcastInstance(c4);
// We should have two clusters of two
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
assertEquals(2, h3.getCluster().getMembers().size());
assertEquals(2, h4.getCluster().getMembers().size());
List<String> allMembers = Arrays.asList("127.0.0.1:15701", "127.0.0.1:15704", "127.0.0.1:15703",
"127.0.0.1:15702");
/*
* This simulates restoring a network connection between h3 and the
* other cluster. But it only make h3 aware of the other cluster so for
* h4 to restart it will have to be notified by h3.
*/
h3.getConfig().getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
h4.getConfig().getNetworkConfig().getJoin().getTcpIpConfig().clear().setMembers(Collections.<String> emptyList());
assertTrue(latch.await(60, TimeUnit.SECONDS));
// Both nodes from cluster two should have joined cluster one
assertEquals(4, h1.getCluster().getMembers().size());
assertEquals(4, h2.getCluster().getMembers().size());
assertEquals(4, h3.getCluster().getMembers().size());
assertEquals(4, h4.getCluster().getMembers().size());
}
@Test(timeout = 180000)
public void testTcpIpSplitBrainStillWorksWhenTargetDisappears() throws Exception {
// The ports are ordered like this so h3 will always attempt to merge with h1
Config c1 = buildConfig(false, 25701);
Config c2 = buildConfig(false, 25704);
Config c3 = buildConfig(false, 25703);
List<String> clusterOneMembers = Arrays.asList("127.0.0.1:25701");
List<String> clusterTwoMembers = Arrays.asList("127.0.0.1:25704");
List<String> clusterThreeMembers = Arrays.asList("127.0.0.1:25703");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterOneMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterTwoMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(clusterThreeMembers);
final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
final CountDownLatch latch = new CountDownLatch(1);
c3.addListenerConfig(new ListenerConfig(new LifecycleListener() {
public void stateChanged(final LifecycleEvent event) {
if (event.getState() == LifecycleState.MERGING) {
h1.shutdown();
} else if (event.getState() == LifecycleState.MERGED) {
latch.countDown();
}
}
}));
final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
// We should have three clusters of one
assertEquals(1, h1.getCluster().getMembers().size());
assertEquals(1, h2.getCluster().getMembers().size());
assertEquals(1, h3.getCluster().getMembers().size());
List<String> allMembers = Arrays.asList("127.0.0.1:25701", "127.0.0.1:25704", "127.0.0.1:25703");
h3.getConfig().getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
assertTrue(latch.await(60, TimeUnit.SECONDS));
// Both nodes from cluster two should have joined cluster one
assertFalse(h1.getLifecycleService().isRunning());
assertEquals(2, h2.getCluster().getMembers().size());
assertEquals(2, h3.getCluster().getMembers().size());
}
@Test
/**
* Test for issue #247
*/
public void testMultiJoinsIssue247() throws Exception {
Config c1 = buildConfig(false, 15701).setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "0");
Config c2 = buildConfig(false, 15702).setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "0");
Config c3 = buildConfig(false, 15703).setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "0");
Config c4 = buildConfig(false, 15704).setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "0");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(Arrays.asList("127.0.0.1:15701"));
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(Arrays.asList("127.0.0.1:15702"));
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(Arrays.asList("127.0.0.1:15703"));
c4.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(Arrays.asList("127.0.0.1:15701, 127.0.0.1:15702, 127.0.0.1:15703, 127.0.0.1:15704"));
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
// First three nodes are up. All should be in separate clusters.
assertEquals(1, h1.getCluster().getMembers().size());
assertEquals(1, h2.getCluster().getMembers().size());
assertEquals(1, h3.getCluster().getMembers().size());
HazelcastInstance h4 = Hazelcast.newHazelcastInstance(c4);
// Fourth node is up. Should join one of the other three clusters.
int numNodesWithTwoMembers = 0;
if (h1.getCluster().getMembers().size() == 2) {
numNodesWithTwoMembers++;
}
if (h2.getCluster().getMembers().size() == 2) {
numNodesWithTwoMembers++;
}
if (h3.getCluster().getMembers().size() == 2) {
numNodesWithTwoMembers++;
}
if (h4.getCluster().getMembers().size() == 2) {
numNodesWithTwoMembers++;
}
Member h4Member = h4.getCluster().getLocalMember();
int numNodesThatKnowAboutH4 = 0;
if (h1.getCluster().getMembers().contains(h4Member)) {
numNodesThatKnowAboutH4++;
}
if (h2.getCluster().getMembers().contains(h4Member)) {
numNodesThatKnowAboutH4++;
}
if (h3.getCluster().getMembers().contains(h4Member)) {
numNodesThatKnowAboutH4++;
}
if (h4.getCluster().getMembers().contains(h4Member)) {
numNodesThatKnowAboutH4++;
}
/*
* At this point h4 should have joined a single node out of the other
* three. There should be two clusters of one and one cluster of two. h4
* should only be in one cluster.
*
*/
assertEquals(2, h4.getCluster().getMembers().size());
assertEquals(2, numNodesWithTwoMembers);
assertEquals(2, numNodesThatKnowAboutH4);
}
private static Config buildConfig(boolean multicastEnabled, int port) {
Config c = new Config();
c.getGroupConfig().setName("group").setPassword("pass");
c.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "10");
c.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "5");
final NetworkConfig networkConfig = c.getNetworkConfig();
networkConfig.setPort(port).setPortAutoIncrement(false);
networkConfig.getJoin().getMulticastConfig().setEnabled(multicastEnabled);
networkConfig.getJoin().getTcpIpConfig().setEnabled(!multicastEnabled);
return c;
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_cluster_SplitBrainHandlerTest.java |
1,781 | public static class InternalPolygonBuilder extends BasePolygonBuilder<InternalPolygonBuilder> {
private final MultiPolygonBuilder collection;
private InternalPolygonBuilder(MultiPolygonBuilder collection) {
super();
this.collection = collection;
this.shell = new Ring<InternalPolygonBuilder>(this);
}
@Override
public MultiPolygonBuilder close() {
super.close();
return collection;
}
} | 0true
| src_main_java_org_elasticsearch_common_geo_builders_MultiPolygonBuilder.java |
595 | public class IndicesSegmentsRequest extends BroadcastOperationRequest<IndicesSegmentsRequest> {
public IndicesSegmentsRequest() {
this(Strings.EMPTY_ARRAY);
}
public IndicesSegmentsRequest(String... indices) {
super(indices);
indicesOptions(IndicesOptions.fromOptions(false, false, true, false));
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_segments_IndicesSegmentsRequest.java |
1,337 | public class ClusterStateUpdateResponse {
private final boolean acknowledged;
public ClusterStateUpdateResponse(boolean acknowledged) {
this.acknowledged = acknowledged;
}
/**
* Whether the cluster state update was acknowledged or not
*/
public boolean isAcknowledged() {
return acknowledged;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_ack_ClusterStateUpdateResponse.java |
310 | new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java |
1,096 | @SuppressWarnings("serial")
public class AboutDialog extends JDialog {
private static ImageIcon mctLogoIcon = new ImageIcon(ClassLoader.getSystemResource("images/mctlogo.png"));
public AboutDialog(JFrame frame) {
super(frame);
setDefaultCloseOperation(DISPOSE_ON_CLOSE);
Image image = mctLogoIcon.getImage().getScaledInstance(320, 80, Image.SCALE_SMOOTH);
JLabel label = new JLabel(new ImageIcon(image));
JPanel labelPanel = new JPanel();
labelPanel.setBackground(Color.white);
labelPanel.add(label, BorderLayout.CENTER);
labelPanel.setBorder(new EmptyBorder(5,5,5,5));
Container contentPane = getContentPane();
contentPane.setLayout(new BorderLayout());
contentPane.add(labelPanel, BorderLayout.NORTH);
// Modified the AboutDialog to add the Version and Build numbers to the screen - JOe...
JTextArea license = new JTextArea(100, 100);
license.setText("Mission Control Technologies, Copyright (c) 2009-2012, United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.\n\nMission Control Technologies is a collaborative environment developed at NASA Ames Research Center. The MCT platform is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this application except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0.\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nMCT includes source code licensed under additional open source licenses. See About MCT Licenses or the MCT Open Source Licenses file included with this distribution for additional information.");
license.setLineWrap(true);
license.setWrapStyleWord(true);
license.setEditable(false);
JPanel licensePanel = new JPanel(new GridLayout(0, 1));
licensePanel.add(license);
licensePanel.setBackground(Color.white);
licensePanel.setBorder(BorderFactory.createEmptyBorder(20,40, 20, 40));
contentPane.add(licensePanel, BorderLayout.CENTER);
JPanel panel = new JPanel();
panel.setBackground(Color.white);
JButton close = new JButton("Close");
close.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
AboutDialog.this.setVisible(false);
}
});
panel.add(close);
contentPane.add(panel, BorderLayout.SOUTH);
setBackground(Color.WHITE);
setSize(400, 600);
setResizable(false);
setLocationRelativeTo(frame);
setTitle("About MCT");
}
public static String getBuildNumber() {
String buildnumber = "Not Found";
try {
Properties p = new Properties();
p.load(ClassLoader.getSystemResourceAsStream("properties/version.properties"));
buildnumber = p.getProperty("build.number");
} catch (Exception e) {
// if not found, just ignore any exceptions - it's not critical...
}
return buildnumber;
}
} | 1no label
| platform_src_main_java_gov_nasa_arc_mct_gui_dialogs_AboutDialog.java |
18 | public interface BiFun<A,B,T> { T apply(A a, B b); } | 0true
| src_main_java_jsr166e_CompletableFuture.java |
3,668 | public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
IdFieldMapper.Builder builder = id();
parseField(builder, builder.name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("path")) {
builder.path(fieldNode.toString());
}
}
return builder;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_IdFieldMapper.java |
450 | public class PropertyType {
public static class AdminPresentation {
public static final String FRIENDLYNAME = "friendlyName";
public static final String SECURITYLEVEL = "securityLevel";
public static final String ORDER = "order";
public static final String GRIDORDER = "gridOrder";
public static final String VISIBILITY = "visibility";
public static final String FIELDTYPE = "fieldType";
public static final String GROUP = "group";
public static final String GROUPORDER = "groupOrder";
public static final String GROUPCOLLAPSED = "groupCollapsed";
public static final String TAB = "tab";
public static final String TABORDER = "tabOrder";
public static final String LARGEENTRY = "largeEntry";
public static final String PROMINENT = "prominent";
public static final String COLUMNWIDTH = "columnWidth";
public static final String BROADLEAFENUMERATION = "broadleafEnumeration";
public static final String REQUIREDOVERRIDE = "requiredOverride";
public static final String EXCLUDED = "excluded";
public static final String TOOLTIP = "tooltip";
public static final String HELPTEXT = "helpText";
public static final String HINT = "hint";
public static final String SHOWIFPROPERTY = "showIfProperty";
public static final String CURRENCYCODEFIELD = "currencyCodeField";
public static final String RULEIDENTIFIER = "ruleIdentifier";
public static final String READONLY = "readOnly";
public static final String VALIDATIONCONFIGURATIONS = "validationConfigurations";
}
public static class AdminPresentationToOneLookup {
public static final String LOOKUPDISPLAYPROPERTY = "lookupDisplayProperty";
public static final String USESERVERSIDEINSPECTIONCACHE = "useServerSideInspectionCache";
public static final String LOOKUPTYPE = "lookupType";
public static final String CUSTOMCRITERIA = "customCriteria";
public static final String FORCEPOPULATECHILDPROPERTIES = "forcePopulateChildProperties";
}
public static class AdminPresentationDataDrivenEnumeration {
public static final String OPTIONLISTENTITY = "optionListEntity";
public static final String OPTIONVALUEFIELDNAME = "optionValueFieldName";
public static final String OPTIONDISPLAYFIELDNAME = "optionDisplayFieldName";
public static final String OPTIONCANEDITVALUES = "optionCanEditValues";
public static final String OPTIONFILTERPARAMS = "optionFilterParams";
}
public static class AdminPresentationAdornedTargetCollection {
public static final String FRIENDLYNAME = "friendlyName";
public static final String SECURITYLEVEL = "securityLevel";
public static final String EXCLUDED = "excluded";
public static final String SHOWIFPROPERTY = "showIfProperty";
public static final String READONLY = "readOnly";
public static final String USESERVERSIDEINSPECTIONCACHE = "useServerSideInspectionCache";
public static final String PARENTOBJECTPROPERTY = "parentObjectProperty";
public static final String PARENTOBJECTIDPROPERTY = "parentObjectIdProperty";
public static final String TARGETOBJECTPROPERTY = "targetObjectProperty";
public static final String MAINTAINEDADORNEDTARGETFIELDS = "maintainedAdornedTargetFields";
public static final String GRIDVISIBLEFIELDS = "gridVisibleFields";
public static final String TARGETOBJECTIDPROPERTY = "targetObjectIdProperty";
public static final String JOINENTITYCLASS = "joinEntityClass";
public static final String SORTPROPERTY = "sortProperty";
public static final String SORTASCENDING = "sortAscending";
public static final String IGNOREADORNEDPROPERTIES = "ignoreAdornedProperties";
public static final String ORDER = "order";
public static final String TAB = "tab";
public static final String TABORDER = "tabOrder";
public static final String CUSTOMCRITERIA = "customCriteria";
public static final String CURRENCYCODEFIELD = "currencyCodeField";
public static final String OPERATIONTYPES = "operationTypes";
}
public static class AdminPresentationCollection {
public static final String FRIENDLYNAME = "friendlyName";
public static final String SECURITYLEVEL = "securityLevel";
public static final String EXCLUDED = "excluded";
public static final String READONLY = "readOnly";
public static final String USESERVERSIDEINSPECTIONCACHE = "useServerSideInspectionCache";
public static final String ADDTYPE = "addType";
public static final String MANYTOFIELD = "manyToField";
public static final String ORDER = "order";
public static final String TAB = "tab";
public static final String TABORDER = "tabOrder";
public static final String CUSTOMCRITERIA = "customCriteria";
public static final String OPERATIONTYPES = "operationTypes";
public static final String SHOWIFPROPERTY = "showIfProperty";
public static final String CURRENCYCODEFIELD = "currencyCodeField";
}
public static class AdminPresentationMap {
public static final String FRIENDLYNAME = "friendlyName";
public static final String SECURITYLEVEL = "securityLevel";
public static final String EXCLUDED = "excluded";
public static final String READONLY = "readOnly";
public static final String USESERVERSIDEINSPECTIONCACHE = "useServerSideInspectionCache";
public static final String ORDER = "order";
public static final String TAB = "tab";
public static final String TABORDER = "tabOrder";
public static final String KEYCLASS = "keyClass";
public static final String MAPKEYVALUEPROPERTY = "mapKeyValueProperty";
public static final String KEYPROPERTYFRIENDLYNAME = "keyPropertyFriendlyName";
public static final String VALUECLASS = "valueClass";
public static final String DELETEENTITYUPONREMOVE = "deleteEntityUponRemove";
public static final String VALUEPROPERTYFRIENDLYNAME = "valuePropertyFriendlyName";
public static final String ISSIMPLEVALUE = "isSimpleValue";
public static final String MEDIAFIELD = "mediaField";
public static final String KEYS = "keys";
public static final String FORCEFREEFORMKEYS = "forceFreeFormKeys";
public static final String MANYTOFIELD = "manyToField";
public static final String MAPKEYOPTIONENTITYCLASS = "mapKeyOptionEntityClass";
public static final String MAPKEYOPTIONENTITYDISPLAYFIELD = "mapKeyOptionEntityDisplayField";
public static final String MAPKEYOPTIONENTITYVALUEFIELD = "mapKeyOptionEntityValueField";
public static final String CUSTOMCRITERIA = "customCriteria";
public static final String OPERATIONTYPES = "operationTypes";
public static final String SHOWIFPROPERTY = "showIfProperty";
public static final String CURRENCYCODEFIELD = "currencyCodeField";
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_presentation_override_PropertyType.java |
2,574 | clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
final int prevMinimumMasterNode = ZenDiscovery.this.electMaster.minimumMasterNodes();
ZenDiscovery.this.electMaster.minimumMasterNodes(minimumMasterNodes);
// check if we have enough master nodes, if not, we need to move into joining the cluster again
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
return rejoin(currentState, "not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + minimumMasterNodes + "]");
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
}); | 1no label
| src_main_java_org_elasticsearch_discovery_zen_ZenDiscovery.java |
899 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
int shardIndex = -1;
for (final ShardIterator shardIt : shardsIts) {
shardIndex++;
final ShardRouting shard = shardIt.firstOrNull();
if (shard != null) {
if (shard.currentNodeId().equals(nodes.localNodeId())) {
performFirstPhase(shardIndex, shardIt);
}
}
}
}
}); | 1no label
| src_main_java_org_elasticsearch_action_search_type_TransportSearchTypeAction.java |
3,435 | public static class Translog {
private long startTime = 0;
private long time;
private volatile int currentTranslogOperations = 0;
public long startTime() {
return this.startTime;
}
public void startTime(long startTime) {
this.startTime = startTime;
}
public long time() {
return this.time;
}
public void time(long time) {
this.time = time;
}
public void addTranslogOperations(int count) {
this.currentTranslogOperations += count;
}
public int currentTranslogOperations() {
return this.currentTranslogOperations;
}
} | 0true
| src_main_java_org_elasticsearch_index_gateway_RecoveryStatus.java |
1,536 | public class OrderMapReduce {
public static final String CLASS = Tokens.makeNamespace(OrderMapReduce.class) + ".class";
public static final String KEY = Tokens.makeNamespace(OrderMapReduce.class) + ".key";
public static final String TYPE = Tokens.makeNamespace(OrderMapReduce.class) + ".type";
public static final String ELEMENT_KEY = Tokens.makeNamespace(OrderMapReduce.class) + ".elementKey";
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass,
final String key,
final Class<? extends WritableComparable> type,
final String elementKey) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(OrderMapReduce.CLASS, klass, Element.class);
configuration.set(OrderMapReduce.KEY, key);
configuration.setClass(OrderMapReduce.TYPE, type, WritableComparable.class);
configuration.set(OrderMapReduce.ELEMENT_KEY, elementKey);
return configuration;
}
public static Class<? extends WritableComparator> createComparator(final TransformPipe.Order order, final Class<? extends WritableComparable> comparable) {
Class<? extends WritableComparator> comparatorClass = null;
if (comparable.equals(LongWritable.class))
comparatorClass = order.equals(TransformPipe.Order.INCR) ? LongWritable.Comparator.class : LongWritable.DecreasingComparator.class;
else if (comparable.equals(IntWritable.class))
comparatorClass = order.equals(TransformPipe.Order.INCR) ? IntWritable.Comparator.class : WritableComparators.DecreasingIntComparator.class;
else if (comparable.equals(FloatWritable.class))
comparatorClass = order.equals(TransformPipe.Order.INCR) ? FloatWritable.Comparator.class : WritableComparators.DecreasingFloatComparator.class;
else if (comparable.equals(DoubleWritable.class))
comparatorClass = order.equals(TransformPipe.Order.INCR) ? DoubleWritable.Comparator.class : WritableComparators.DecreasingDoubleComparator.class;
else if (comparable.equals(Text.class))
comparatorClass = order.equals(TransformPipe.Order.INCR) ? Text.Comparator.class : WritableComparators.DecreasingTextComparator.class;
return comparatorClass;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, WritableComparable, Text> {
private String key;
private boolean isVertex;
private WritableHandler handler;
private String elementKey;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.key = context.getConfiguration().get(KEY);
this.handler = new WritableHandler(context.getConfiguration().getClass(TYPE, Text.class, WritableComparable.class));
this.elementKey = context.getConfiguration().get(ELEMENT_KEY);
this.outputs = new SafeMapperOutputs(context);
}
private Text text = new Text();
private WritableComparable writable;
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, WritableComparable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
this.text.set(ElementPicker.getPropertyAsString(value, this.elementKey));
final Object temp = ElementPicker.getProperty(value, this.key);
if (this.key.equals(Tokens._COUNT)) {
this.writable = this.handler.set(temp);
context.write(this.writable, this.text);
} else if (temp instanceof Number) {
this.writable = this.handler.set(multiplyPathCount((Number) temp, value.pathCount()));
context.write(this.writable, this.text);
} else {
this.writable = this.handler.set(temp);
for (int i = 0; i < value.pathCount(); i++) {
context.write(this.writable, this.text);
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
this.text.set(ElementPicker.getPropertyAsString(edge, this.elementKey));
final Object temp = ElementPicker.getProperty(edge, this.key);
if (this.key.equals(Tokens._COUNT)) {
this.writable = this.handler.set(temp);
context.write(this.writable, this.text);
} else if (temp instanceof Number) {
this.writable = this.handler.set(multiplyPathCount((Number) temp, edge.pathCount()));
context.write(this.writable, this.text);
} else {
this.writable = this.handler.set(temp);
for (int i = 0; i < edge.pathCount(); i++) {
context.write(this.writable, this.text);
}
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, WritableComparable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
public static class Reduce extends Reducer<WritableComparable, Text, Text, WritableComparable> {
private SafeReducerOutputs outputs;
@Override
public void setup(final Reducer<WritableComparable, Text, Text, WritableComparable>.Context context) throws IOException, InterruptedException {
this.outputs = new SafeReducerOutputs(context);
}
@Override
public void reduce(final WritableComparable key, final Iterable<Text> values, final Reducer<WritableComparable, Text, Text, WritableComparable>.Context context) throws IOException, InterruptedException {
for (final Text value : values) {
this.outputs.write(Tokens.SIDEEFFECT, value, key);
}
}
@Override
public void cleanup(final Reducer<WritableComparable, Text, Text, WritableComparable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
private static Number multiplyPathCount(final Number value, final Long pathCount) {
if (value instanceof Long)
return (Long) value * pathCount;
else if (value instanceof Integer)
return (Integer) value * pathCount;
else if (value instanceof Double)
return (Double) value * pathCount;
else if (value instanceof Float)
return (Float) value * pathCount;
else
return value.doubleValue() * pathCount;
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_OrderMapReduce.java |
3,226 | public class PagedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
@Override
protected FieldDataType getFieldDataType() {
return new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes"));
}
} | 0true
| src_test_java_org_elasticsearch_index_fielddata_PagedBytesStringFieldDataTests.java |
2,192 | public class MoreLikeThisQueryTests extends ElasticsearchTestCase {
@Test
public void testSimple() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
indexWriter.commit();
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new TextField("text", "lucene", Field.Store.YES));
indexWriter.addDocument(document);
document = new Document();
document.add(new TextField("_id", "2", Field.Store.YES));
document.add(new TextField("text", "lucene release", Field.Store.YES));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER);
mltQuery.setLikeText("lucene");
mltQuery.setMinTermFrequency(1);
mltQuery.setMinDocFreq(1);
long count = Lucene.count(searcher, mltQuery);
assertThat(count, equalTo(2l));
reader.close();
indexWriter.close();
}
} | 0true
| src_test_java_org_elasticsearch_common_lucene_search_MoreLikeThisQueryTests.java |
123 | {
@Override
public boolean matchesSafely( LogEntry.Start entry )
{
return entry != null && entry.getIdentifier() == identifier && entry.getMasterId() == masterId
&& entry.getLocalId() == localId;
}
@Override
public void describeTo( Description description )
{
description.appendText( "Start[" + identifier + ",xid=<Any Xid>,master=" + masterId + ",me=" + localId
+ ",time=<Any Date>]" );
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogMatchers.java |
574 | private static final class ODocumentWrapper {
private final ODocument document;
private ODocumentWrapper(ODocument document) {
this.document = document;
}
@Override
public int hashCode() {
int hashCode = document.getIdentity().hashCode();
for (Object field : document.fieldValues())
hashCode = 31 * hashCode + field.hashCode();
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == document)
return true;
if (obj.getClass() != document.getClass())
return false;
final ODocument anotherDocument = (ODocument) obj;
if (!document.getIdentity().equals(anotherDocument.getIdentity()))
return false;
final String[] filedNames = document.fieldNames();
final String[] anotherFieldNames = anotherDocument.fieldNames();
if (filedNames.length != anotherFieldNames.length)
return false;
for (final String fieldName : filedNames) {
final Object fieldValue = document.field(fieldName);
final Object anotherFieldValue = anotherDocument.field(fieldName);
if (fieldValue == null && anotherFieldValue != null)
return false;
if (fieldValue != null && !fieldValue.equals(anotherFieldValue))
return false;
}
return true;
}
@Override
public String toString() {
return document.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_ODocumentFieldsHashSet.java |
1,340 | Future future = executorService.submit(new Callable<String>() {
@Override
public String call() {
try {
return "success";
} finally {
latch1.countDown();
}
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
287 | public class FailedNodeException extends ElasticsearchException {
private final String nodeId;
public FailedNodeException(String nodeId, String msg, Throwable cause) {
super(msg, cause);
this.nodeId = nodeId;
}
public String nodeId() {
return this.nodeId;
}
} | 0true
| src_main_java_org_elasticsearch_action_FailedNodeException.java |
2,054 | public final class ScopeBinding implements Element {
private final Object source;
private final Class<? extends Annotation> annotationType;
private final Scope scope;
ScopeBinding(Object source, Class<? extends Annotation> annotationType, Scope scope) {
this.source = checkNotNull(source, "source");
this.annotationType = checkNotNull(annotationType, "annotationType");
this.scope = checkNotNull(scope, "scope");
}
public Object getSource() {
return source;
}
public Class<? extends Annotation> getAnnotationType() {
return annotationType;
}
public Scope getScope() {
return scope;
}
public <T> T acceptVisitor(ElementVisitor<T> visitor) {
return visitor.visit(this);
}
public void applyTo(Binder binder) {
binder.withSource(getSource()).bindScope(annotationType, scope);
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_spi_ScopeBinding.java |
1,502 | new NoneGatewayAllocator(), new ShardsAllocator() {
@Override
public boolean rebalance(RoutingAllocation allocation) {
return false;
}
@Override
public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return false;
}
@Override
public void applyStartedShards(StartedRerouteAllocation allocation) {
}
@Override
public void applyFailedShards(FailedRerouteAllocation allocation) {
}
/*
* // this allocator tries to rebuild this scenario where a rebalance is
* // triggered solely by the primary overload on node [1] where a shard
* // is rebalanced to node 0
routing_nodes:
-----node_id[0][V]
--------[test][0], node[0], [R], s[STARTED]
--------[test][4], node[0], [R], s[STARTED]
-----node_id[1][V]
--------[test][0], node[1], [P], s[STARTED]
--------[test][1], node[1], [P], s[STARTED]
--------[test][3], node[1], [R], s[STARTED]
-----node_id[2][V]
--------[test][1], node[2], [R], s[STARTED]
--------[test][2], node[2], [R], s[STARTED]
--------[test][4], node[2], [P], s[STARTED]
-----node_id[3][V]
--------[test][2], node[3], [P], s[STARTED]
--------[test][3], node[3], [P], s[STARTED]
---- unassigned
*/
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
boolean changed = !unassigned.isEmpty();
for (MutableShardRouting sr : unassigned) {
switch (sr.id()) {
case 0:
if (sr.primary()) {
allocation.routingNodes().assign(sr, "node1");
} else {
allocation.routingNodes().assign(sr, "node0");
}
break;
case 1:
if (sr.primary()) {
allocation.routingNodes().assign(sr, "node1");
} else {
allocation.routingNodes().assign(sr, "node2");
}
break;
case 2:
if (sr.primary()) {
allocation.routingNodes().assign(sr, "node3");
} else {
allocation.routingNodes().assign(sr, "node2");
}
break;
case 3:
if (sr.primary()) {
allocation.routingNodes().assign(sr, "node3");
} else {
allocation.routingNodes().assign(sr, "node1");
}
break;
case 4:
if (sr.primary()) {
allocation.routingNodes().assign(sr, "node2");
} else {
allocation.routingNodes().assign(sr, "node0");
}
break;
}
}
unassigned.clear();
return changed;
}
}), ClusterInfoService.EMPTY); | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_BalanceConfigurationTests.java |
193 | public class TxManagerTest
{
@Test
public void settingTmNotOkShouldAttachCauseToSubsequentErrors() throws Exception
{
// Given
XaDataSourceManager mockXaManager = mock( XaDataSourceManager.class );
File txLogDir = TargetDirectory.forTest( fs.get(), getClass() ).cleanDirectory( "log" );
KernelHealth kernelHealth = new KernelHealth( panicGenerator, logging );
TxManager txm = new TxManager( txLogDir, mockXaManager, DEV_NULL, fs.get(), null, null,
kernelHealth, monitors );
txm.doRecovery(); // Make the txm move to an ok state
String msg = "These kinds of throwables, breaking our transaction managers, are why we can't have nice things.";
// When
txm.setTmNotOk( new Throwable( msg ) );
// Then
try
{
txm.begin();
fail( "Should have thrown SystemException." );
}
catch ( SystemException topLevelException )
{
assertThat( "TM should forward a cause.", topLevelException.getCause(), is( Throwable.class ) );
assertThat( "Cause should be the original cause", topLevelException.getCause().getMessage(), is( msg ) );
}
}
@Test
public void shouldNotSetTmNotOKForFailureInCommitted() throws Throwable
{
/*
* I.e. when the commit has been done and the TxIdGenerator#committed method is called and fails,
* it should not put the TM in not OK state. However that exception should still be propagated to
* the user.
*/
// GIVEN
File directory = TargetDirectory.forTest( fs.get(), getClass() ).cleanDirectory( "dir" );
TransactionStateFactory stateFactory = new TransactionStateFactory( logging );
TxIdGenerator txIdGenerator = mock( TxIdGenerator.class );
doThrow( RuntimeException.class ).when( txIdGenerator )
.committed( any( XaDataSource.class ), anyInt(), anyLong(), any( Integer.class ) );
stateFactory.setDependencies( mock( LockManager.class ),
mock( NodeManager.class ), mock( RemoteTxHook.class ), txIdGenerator );
XaDataSourceManager xaDataSourceManager = life.add( new XaDataSourceManager( DEV_NULL ) );
KernelHealth kernelHealth = new KernelHealth( panicGenerator, logging );
AbstractTransactionManager txManager = life.add( new TxManager( directory, xaDataSourceManager,
logging.getMessagesLog( TxManager.class ), fs.get(), stateFactory, xidFactory, kernelHealth, monitors ) );
XaFactory xaFactory = new XaFactory( new Config(), txIdGenerator, txManager, fs.get(), monitors,
logging, ALWAYS_VALID, NO_PRUNING, kernelHealth );
DummyXaDataSource dataSource = new DummyXaDataSource( UTF8.encode( "0xDDDDDE" ), "dummy", xaFactory,
stateFactory, new File( directory, "log" ) );
xaDataSourceManager.registerDataSource( dataSource );
life.start();
txManager.doRecovery();
// WHEN
txManager.begin();
dataSource.getXaConnection().enlistResource( txManager.getTransaction() );
txManager.commit();
// THEN tx manager should still work here
assertThat( logging.toString(), containsString( "Commit notification failed" ) );
doNothing().when( txIdGenerator )
.committed( any( XaDataSource.class ), anyInt(), anyLong(), any( Integer.class ) );
txManager.begin();
txManager.rollback();
// and of course kernel should be healthy
kernelHealth.assertHealthy( AssertionError.class );
}
@Rule
public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private final KernelPanicEventGenerator panicGenerator = new KernelPanicEventGenerator(
new KernelEventHandlers(StringLogger.DEV_NULL) );
private final Monitors monitors = new Monitors();
private final Logging logging = new BufferingLogging();
private final Factory<byte[]> xidFactory = new Factory<byte[]>()
{
private final AtomicInteger id = new AtomicInteger();
@Override
public byte[] newInstance()
{
return ("test" + id.incrementAndGet()).getBytes();
}
};
private final LifeSupport life = new LifeSupport();
@After
public void after()
{
life.shutdown();
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TxManagerTest.java |
3,389 | public class PagedBytesEstimator implements PerValueEstimator {
private final AtomicReaderContext context;
private final MemoryCircuitBreaker breaker;
private long estimatedBytes;
PagedBytesEstimator(AtomicReaderContext context, MemoryCircuitBreaker breaker) {
this.breaker = breaker;
this.context = context;
}
/**
* @return the number of bytes for the term based on the length and ordinal overhead
*/
public long bytesPerValue(BytesRef term) {
long bytes = term.length;
// 64 bytes for miscellaneous overhead
bytes += 64;
// Seems to be about a 1.5x compression per term/ord, plus 1 for some wiggle room
bytes = (long) ((double) bytes / 1.5) + 1;
return bytes;
}
/**
* @return the estimate for loading the entire term set into field data, or 0 if unavailable
*/
public long estimateStringFieldData() {
try {
AtomicReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
Fields fields = reader.fields();
final Terms fieldTerms = fields.terms(getFieldNames().indexName());
if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) {
final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats();
long totalTermBytes = stats.totalTermBytes;
if (logger.isTraceEnabled()) {
logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}",
totalTermBytes, terms.size(), terms.getSumDocFreq());
}
long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq());
return totalBytes;
}
} catch (Exception e) {
logger.warn("Unable to estimate memory overhead", e);
}
return 0;
}
/**
* Determine whether the BlockTreeTermsReader.FieldReader can be used
* for estimating the field data, adding the estimate to the circuit
* breaker if it can, otherwise wrapping the terms in a
* RamAccountingTermsEnum to be estimated on a per-term basis.
*
* @param terms terms to be estimated
* @return A possibly wrapped TermsEnum for the terms
* @throws IOException
*/
public TermsEnum beforeLoad(Terms terms) throws IOException {
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
FilterSettingFields.ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO,
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
AtomicReader reader = context.reader();
// Check if one of the following is present:
// - The OrdinalsBuilder overhead has been tweaked away from the default
// - A field data filter is present
// - A regex filter is present
if (acceptableTransientOverheadRatio != OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MAX, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN_SEGMENT_SIZE, 0d) != 0d ||
fieldDataType.getSettings().get(FilterSettingFields.REGEX_PATTERN) != null) {
if (logger.isTraceEnabled()) {
logger.trace("Filter exists, can't circuit break normally, using RamAccountingTermsEnum");
}
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
} else {
estimatedBytes = this.estimateStringFieldData();
// If we weren't able to estimate, wrap in the RamAccountingTermsEnum
if (estimatedBytes == 0) {
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
}
breaker.addEstimateBytesAndMaybeBreak(estimatedBytes);
return filter(terms, reader);
}
}
/**
* Adjust the circuit breaker now that terms have been loaded, getting
* the actual used either from the parameter (if estimation worked for
* the entire set), or from the TermsEnum if it has been wrapped in a
* RamAccountingTermsEnum.
*
* @param termsEnum terms that were loaded
* @param actualUsed actual field data memory usage
*/
public void afterLoad(TermsEnum termsEnum, long actualUsed) {
if (termsEnum instanceof RamAccountingTermsEnum) {
estimatedBytes = ((RamAccountingTermsEnum) termsEnum).getTotalBytes();
}
breaker.addWithoutBreaking(-(estimatedBytes - actualUsed));
}
/**
* Adjust the breaker when no terms were actually loaded, but the field
* data takes up space regardless. For instance, when ordinals are
* used.
* @param actualUsed bytes actually used
*/
public void adjustForNoTerms(long actualUsed) {
breaker.addWithoutBreaking(actualUsed);
}
} | 1no label
| src_main_java_org_elasticsearch_index_fielddata_plain_PagedBytesIndexFieldData.java |
1,080 | @Service("blMergeCartService")
public class MergeCartServiceImpl implements MergeCartService {
@Resource(name = "blOrderService")
protected OrderService orderService;
@Resource(name = "blOrderItemService")
protected OrderItemService orderItemService;
@Resource(name = "blFulfillmentGroupService")
protected FulfillmentGroupService fulfillmentGroupService;
@Resource(name = "blMergeCartServiceExtensionManager")
protected MergeCartServiceExtensionManager extensionManager;
@Override
public MergeCartResponse mergeCart(Customer customer, Order anonymousCart)
throws PricingException, RemoveFromCartException {
return mergeCart(customer, anonymousCart, true);
}
@Override
public ReconstructCartResponse reconstructCart(Customer customer) throws PricingException, RemoveFromCartException {
return reconstructCart(customer, true);
}
@Override
public MergeCartResponse mergeCart(Customer customer, Order anonymousCart, boolean priceOrder)
throws PricingException, RemoveFromCartException {
MergeCartResponse mergeCartResponse = new MergeCartResponse();
mergeCartResponse.setMerged(false); // We no longer merge items, only transition cart states
// We need to make sure that the old, saved customer cart is reconstructed with availability concerns in mind
ReconstructCartResponse reconstructCartResponse = reconstructCart(customer, false);
mergeCartResponse.setRemovedItems(reconstructCartResponse.getRemovedItems());
Order customerCart = reconstructCartResponse.getOrder();
if (anonymousCart != null && customerCart != null && anonymousCart.equals(customerCart)) {
// The carts are the same, use either ensuring it's owned by the current customer
setNewCartOwnership(anonymousCart, customer);
mergeCartResponse.setOrder(anonymousCart);
} else if (anonymousCart == null || anonymousCart.getOrderItems().size() == 0) {
// The anonymous cart is of no use, use the customer cart
mergeCartResponse.setOrder(customerCart);
// The anonymous cart is owned by a different customer, so there is no chance for a single customer to have
// multiple IN_PROCESS carts. We can go ahead and clean up this empty cart anyway since it's empty
if (anonymousCart != null) {
orderService.cancelOrder(anonymousCart);
}
} else if (customerCart == null || customerCart.getOrderItems().size() == 0) {
// Delete the saved customer order since it is completely empty anyway. We do not want 2 IN_PROCESS orders
// hanging around
if (customerCart != null) {
orderService.cancelOrder(customerCart);
}
// The customer cart is of no use, use the anonymous cart
setNewCartOwnership(anonymousCart, customer);
mergeCartResponse.setOrder(anonymousCart);
} else {
// Both carts have some items. The anonymous cart will always be the more recent one by definition
// Save off the old customer cart and use the anonymous cart
setSavedCartAttributes(customerCart);
orderService.save(customerCart, false);
setNewCartOwnership(anonymousCart, customer);
mergeCartResponse.setOrder(anonymousCart);
}
if (mergeCartResponse.getOrder() != null) {
Order savedCart = orderService.save(mergeCartResponse.getOrder(), priceOrder);
mergeCartResponse.setOrder(savedCart);
}
return mergeCartResponse;
}
@Override
public ReconstructCartResponse reconstructCart(Customer customer, boolean priceOrder)
throws PricingException, RemoveFromCartException {
ReconstructCartResponse reconstructCartResponse = new ReconstructCartResponse();
Order customerCart = orderService.findCartForCustomer(customer);
if (customerCart != null) {
List<OrderItem> itemsToRemove = new ArrayList<OrderItem>();
for (OrderItem orderItem : customerCart.getOrderItems()) {
if (orderItem instanceof DiscreteOrderItem) {
DiscreteOrderItem doi = (DiscreteOrderItem) orderItem;
if (!checkActive(doi) || !checkInventory(doi) || !checkOtherValidity(orderItem)) {
itemsToRemove.add(orderItem);
}
} else if (orderItem instanceof BundleOrderItem) {
BundleOrderItem bundleOrderItem = (BundleOrderItem) orderItem;
for (DiscreteOrderItem doi : bundleOrderItem.getDiscreteOrderItems()) {
if (!checkActive(doi) || !checkInventory(doi) || !checkOtherValidity(orderItem)) {
itemsToRemove.add(doi.getBundleOrderItem());
}
}
}
}
//Remove any giftwrap items who have one or more wrapped item members that have been removed
for (OrderItem orderItem : customerCart.getOrderItems()) {
if (orderItem instanceof GiftWrapOrderItem) {
for (OrderItem wrappedItem : ((GiftWrapOrderItem) orderItem).getWrappedItems()) {
if (itemsToRemove.contains(wrappedItem)) {
itemsToRemove.add(orderItem);
break;
}
}
}
}
for (OrderItem item : itemsToRemove) {
orderService.removeItem(customerCart.getId(), item.getId(), false);
}
reconstructCartResponse.setRemovedItems(itemsToRemove);
customerCart = orderService.save(customerCart, priceOrder);
}
reconstructCartResponse.setOrder(customerCart);
return reconstructCartResponse;
}
protected void setSavedCartAttributes(Order cart) {
SimpleDateFormat sdf = new SimpleDateFormat("MMM dd, ''yy");
Date cartLastUpdated = cart.getAuditable().getDateUpdated();
cart.setName("Previously saved cart - " + sdf.format(cartLastUpdated));
cart.setStatus(OrderStatus.NAMED);
}
protected void setNewCartOwnership(Order cart, Customer customer) {
cart.setCustomer(customer);
// copy the customer's email to this order, overriding any previously set email
if (cart != null && StringUtils.isNotBlank(customer.getEmailAddress())) {
cart.setEmailAddress(customer.getEmailAddress());
}
extensionManager.getProxy().setNewCartOwnership(cart, customer);
}
/**
* @param orderItem
* @return whether or not the discrete order item's sku is active
*/
protected boolean checkActive(DiscreteOrderItem orderItem) {
return orderItem.getSku().isActive(orderItem.getProduct(), orderItem.getCategory());
}
/**
* By default, Broadleaf does not provide an inventory check. This is set up as an extension point if your
* application needs it.
*
* @param orderItem
* @return whether or not the item is in stock
*/
protected boolean checkInventory(DiscreteOrderItem orderItem) {
return true;
}
/**
* By default, Broadleaf does not provide additional validity checks. This is set up as an extension point if your
* application needs it.
*
* @param orderItem
* @return whether or not the orderItem is valid
*/
protected boolean checkOtherValidity(OrderItem orderItem) {
return true;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_MergeCartServiceImpl.java |
3,279 | public final class OrdinalsBuilder implements Closeable {
/**
* Default acceptable overhead ratio. {@link OrdinalsBuilder} memory usage is mostly transient so it is likely a better trade-off to
* trade memory for speed in order to resize less often.
*/
public static final float DEFAULT_ACCEPTABLE_OVERHEAD_RATIO = PackedInts.FAST;
/**
* The following structure is used to store ordinals. The idea is to store ords on levels of increasing sizes. Level 0 stores
* 1 value and 1 pointer to level 1. Level 1 stores 2 values and 1 pointer to level 2, ..., Level n stores 2**n values and
* 1 pointer to level n+1. If at some point an ordinal or a pointer has 0 as a value, this means that there are no remaining
* values. On the first level, ordinals.get(docId) is the first ordinal for docId or 0 if the document has no ordinals. On
* subsequent levels, the first 2^level slots are reserved and all have 0 as a value.
* <pre>
* Example for an index of 3 docs (O=ordinal, P = pointer)
* Level 0:
* ordinals [1] [4] [2]
* nextLevelSlices 2 0 1
* Level 1:
* ordinals [0 0] [2 0] [3 4]
* nextLevelSlices 0 0 1
* Level 2:
* ordinals [0 0 0 0] [5 0 0 0]
* nextLevelSlices 0 0
* </pre>
* On level 0, all documents have an ordinal: 0 has 1, 1 has 4 and 2 has 2 as a first ordinal, this means that we need to read
* nextLevelEntries to get the index of their ordinals on the next level. The entry for document 1 is 0, meaning that we have
* already read all its ordinals. On the contrary 0 and 2 have more ordinals which are stored at indices 2 and 1. Let's continue
* with document 2: it has 2 more ordinals on level 1: 3 and 4 and its next level index is 1 meaning that there are remaining
* ordinals on the next level. On level 2 at index 1, we can read [5 0 0 0] meaning that 5 is an ordinal as well, but the
* fact that it is followed by zeros means that there are no more ordinals. In the end, document 2 has 2, 3, 4 and 5 as ordinals.
* <p/>
* In addition to these structures, there is another array which stores the current position (level + slice + offset in the slice)
* in order to be able to append data in constant time.
*/
private static class OrdinalsStore {
private static final int PAGE_SIZE = 1 << 12;
/**
* Number of slots at <code>level</code>
*/
private static int numSlots(int level) {
return 1 << level;
}
private static int slotsMask(int level) {
return numSlots(level) - 1;
}
/**
* Encode the position for the given level and offset. The idea is to encode the level using unary coding in the lower bits and
* then the offset in the higher bits.
*/
private static long position(int level, long offset) {
assert level >= 1;
return (1 << (level - 1)) | (offset << level);
}
/**
* Decode the level from an encoded position.
*/
private static int level(long position) {
return 1 + Long.numberOfTrailingZeros(position);
}
/**
* Decode the offset from the position.
*/
private static long offset(long position, int level) {
return position >>> level;
}
/**
* Get the ID of the slice given an offset.
*/
private static long sliceID(int level, long offset) {
return offset >>> level;
}
/**
* Compute the first offset of the given slice.
*/
private static long startOffset(int level, long slice) {
return slice << level;
}
/**
* Compute the number of ordinals stored for a value given its current position.
*/
private static int numOrdinals(int level, long offset) {
return (1 << level) + (int) (offset & slotsMask(level));
}
// Current position
private PagedGrowableWriter positions;
// First level (0) of ordinals and pointers to the next level
private final GrowableWriter firstOrdinals;
private PagedGrowableWriter firstNextLevelSlices;
// Ordinals and pointers for other levels, starting at 1
private final PagedGrowableWriter[] ordinals;
private final PagedGrowableWriter[] nextLevelSlices;
private final int[] sizes;
private final int startBitsPerValue;
private final float acceptableOverheadRatio;
OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) {
this.startBitsPerValue = startBitsPerValue;
this.acceptableOverheadRatio = acceptableOverheadRatio;
positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio);
// over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc...
ordinals = new PagedGrowableWriter[24];
nextLevelSlices = new PagedGrowableWriter[24];
sizes = new int[24];
Arrays.fill(sizes, 1); // reserve the 1st slice on every level
}
/**
* Allocate a new slice and return its ID.
*/
private long newSlice(int level) {
final long newSlice = sizes[level]++;
// Lazily allocate ordinals
if (ordinals[level] == null) {
ordinals[level] = new PagedGrowableWriter(8L * numSlots(level), PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
} else {
ordinals[level] = ordinals[level].grow(sizes[level] * numSlots(level));
if (nextLevelSlices[level] != null) {
nextLevelSlices[level] = nextLevelSlices[level].grow(sizes[level]);
}
}
return newSlice;
}
public int addOrdinal(int docID, long ordinal) {
final long position = positions.get(docID);
if (position == 0L) { // on the first level
// 0 or 1 ordinal
if (firstOrdinals.get(docID) == 0L) {
firstOrdinals.set(docID, ordinal);
return 1;
} else {
final long newSlice = newSlice(1);
if (firstNextLevelSlices == null) {
firstNextLevelSlices = new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio);
}
firstNextLevelSlices.set(docID, newSlice);
final long offset = startOffset(1, newSlice);
ordinals[1].set(offset, ordinal);
positions.set(docID, position(1, offset)); // current position is on the 1st level and not allocated yet
return 2;
}
} else {
int level = level(position);
long offset = offset(position, level);
assert offset != 0L;
if (((offset + 1) & slotsMask(level)) == 0L) {
// reached the end of the slice, allocate a new one on the next level
final long newSlice = newSlice(level + 1);
if (nextLevelSlices[level] == null) {
nextLevelSlices[level] = new PagedGrowableWriter(sizes[level], PAGE_SIZE, 1, acceptableOverheadRatio);
}
nextLevelSlices[level].set(sliceID(level, offset), newSlice);
++level;
offset = startOffset(level, newSlice);
assert (offset & slotsMask(level)) == 0L;
} else {
// just go to the next slot
++offset;
}
ordinals[level].set(offset, ordinal);
final long newPosition = position(level, offset);
positions.set(docID, newPosition);
return numOrdinals(level, offset);
}
}
public void appendOrdinals(int docID, LongsRef ords) {
// First level
final long firstOrd = firstOrdinals.get(docID);
if (firstOrd == 0L) {
return;
}
ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + 1);
ords.longs[ords.offset + ords.length++] = firstOrd;
if (firstNextLevelSlices == null) {
return;
}
long sliceID = firstNextLevelSlices.get(docID);
if (sliceID == 0L) {
return;
}
// Other levels
for (int level = 1; ; ++level) {
final int numSlots = numSlots(level);
ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + numSlots);
final long offset = startOffset(level, sliceID);
for (int j = 0; j < numSlots; ++j) {
final long ord = ordinals[level].get(offset + j);
if (ord == 0L) {
return;
}
ords.longs[ords.offset + ords.length++] = ord;
}
if (nextLevelSlices[level] == null) {
return;
}
sliceID = nextLevelSlices[level].get(sliceID);
if (sliceID == 0L) {
return;
}
}
}
}
private final int maxDoc;
private long currentOrd = 0;
private int numDocsWithValue = 0;
private int numMultiValuedDocs = 0;
private int totalNumOrds = 0;
private OrdinalsStore ordinals;
private final LongsRef spare;
public OrdinalsBuilder(long numTerms, int maxDoc, float acceptableOverheadRatio) throws IOException {
this.maxDoc = maxDoc;
int startBitsPerValue = 8;
if (numTerms >= 0) {
startBitsPerValue = PackedInts.bitsRequired(numTerms);
}
ordinals = new OrdinalsStore(maxDoc, startBitsPerValue, acceptableOverheadRatio);
spare = new LongsRef();
}
public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException {
this(-1, maxDoc, acceptableOverheadRatio);
}
public OrdinalsBuilder(int maxDoc) throws IOException {
this(maxDoc, DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
}
/**
* Returns a shared {@link LongsRef} instance for the given doc ID holding all ordinals associated with it.
*/
public LongsRef docOrds(int docID) {
spare.offset = spare.length = 0;
ordinals.appendOrdinals(docID, spare);
return spare;
}
/**
* Return a {@link PackedInts.Reader} instance mapping every doc ID to its first ordinal if it exists and 0 otherwise.
*/
public PackedInts.Reader getFirstOrdinals() {
return ordinals.firstOrdinals;
}
/**
* Advances the {@link OrdinalsBuilder} to the next ordinal and
* return the current ordinal.
*/
public long nextOrdinal() {
return ++currentOrd;
}
/**
* Retruns the current ordinal or <tt>0</tt> if this build has not been advanced via
* {@link #nextOrdinal()}.
*/
public long currentOrdinal() {
return currentOrd;
}
/**
* Associates the given document id with the current ordinal.
*/
public OrdinalsBuilder addDoc(int doc) {
totalNumOrds++;
final int numValues = ordinals.addOrdinal(doc, currentOrd);
if (numValues == 1) {
++numDocsWithValue;
} else if (numValues == 2) {
++numMultiValuedDocs;
}
return this;
}
/**
* Returns <code>true</code> iff this builder contains a document ID that is associated with more than one ordinal. Otherwise <code>false</code>;
*/
public boolean isMultiValued() {
return numMultiValuedDocs > 0;
}
/**
* Returns the number distinct of document IDs with one or more values.
*/
public int getNumDocsWithValue() {
return numDocsWithValue;
}
/**
* Returns the number distinct of document IDs associated with exactly one value.
*/
public int getNumSingleValuedDocs() {
return numDocsWithValue - numMultiValuedDocs;
}
/**
* Returns the number distinct of document IDs associated with two or more values.
*/
public int getNumMultiValuesDocs() {
return numMultiValuedDocs;
}
/**
* Returns the number of document ID to ordinal pairs in this builder.
*/
public int getTotalNumOrds() {
return totalNumOrds;
}
/**
* Returns the number of distinct ordinals in this builder.
*/
public long getNumOrds() {
return currentOrd;
}
/**
* Builds a {@link FixedBitSet} where each documents bit is that that has one or more ordinals associated with it.
* if every document has an ordinal associated with it this method returns <code>null</code>
*/
public FixedBitSet buildDocsWithValuesSet() {
if (numDocsWithValue == maxDoc) {
return null;
}
final FixedBitSet bitSet = new FixedBitSet(maxDoc);
for (int docID = 0; docID < maxDoc; ++docID) {
if (ordinals.firstOrdinals.get(docID) != 0) {
bitSet.set(docID);
}
}
return bitSet;
}
/**
* Builds an {@link Ordinals} instance from the builders current state.
*/
public Ordinals build(Settings settings) {
final float acceptableOverheadRatio = settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FASTEST);
if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getNumOrds(), acceptableOverheadRatio)) {
// MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields
return new MultiOrdinals(this, acceptableOverheadRatio);
} else {
return new SinglePackedOrdinals(this, acceptableOverheadRatio);
}
}
/**
* Returns the maximum document ID this builder can associate with an ordinal
*/
public int maxDoc() {
return maxDoc;
}
/**
* A {@link TermsEnum} that iterates only full precision prefix coded 64 bit values.
*
* @see #buildFromTerms(TermsEnum, Bits)
*/
public static TermsEnum wrapNumeric64Bit(TermsEnum termsEnum) {
return new FilteredTermsEnum(termsEnum, false) {
@Override
protected AcceptStatus accept(BytesRef term) throws IOException {
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/**
* A {@link TermsEnum} that iterates only full precision prefix coded 32 bit values.
*
* @see #buildFromTerms(TermsEnum, Bits)
*/
public static TermsEnum wrapNumeric32Bit(TermsEnum termsEnum) {
return new FilteredTermsEnum(termsEnum, false) {
@Override
protected AcceptStatus accept(BytesRef term) throws IOException {
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/**
* This method iterates all terms in the given {@link TermsEnum} and
* associates each terms ordinal with the terms documents. The caller must
* exhaust the returned {@link BytesRefIterator} which returns all values
* where the first returned value is associted with the ordinal <tt>1</tt>
* etc.
* <p>
* If the {@link TermsEnum} contains prefix coded numerical values the terms
* enum should be wrapped with either {@link #wrapNumeric32Bit(TermsEnum)}
* or {@link #wrapNumeric64Bit(TermsEnum)} depending on its precision. If
* the {@link TermsEnum} is not wrapped the returned
* {@link BytesRefIterator} will contain partial precision terms rather than
* only full-precision terms.
* </p>
*/
public BytesRefIterator buildFromTerms(final TermsEnum termsEnum) throws IOException {
return new BytesRefIterator() {
private DocsEnum docsEnum = null;
@Override
public BytesRef next() throws IOException {
BytesRef ref;
if ((ref = termsEnum.next()) != null) {
docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
nextOrdinal();
int docId;
while ((docId = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
addDoc(docId);
}
}
return ref;
}
@Override
public Comparator<BytesRef> getComparator() {
return termsEnum.getComparator();
}
};
}
/**
* Closes this builder and release all resources.
*/
@Override
public void close() throws IOException {
ordinals = null;
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_ordinals_OrdinalsBuilder.java |
247 | @Category(SerialTests.class)
public class AstyanaxLogTest extends KCVSLogTest {
@BeforeClass
public static void startCassandra() {
CassandraStorageSetup.startCleanEmbedded();
}
@Override
public KeyColumnValueStoreManager openStorageManager() throws BackendException {
return new AstyanaxStoreManager(CassandraStorageSetup.getAstyanaxConfiguration(getClass().getSimpleName()));
}
} | 0true
| titan-cassandra_src_test_java_com_thinkaurelius_titan_diskstorage_cassandra_astyanax_AstyanaxLogTest.java |
908 | public abstract class AbstractBaseProcessor implements BaseProcessor {
private static final Log LOG = LogFactory.getLog(AbstractBaseProcessor.class);
private static final Map EXPRESSION_CACHE = new LRUMap(1000);
@Resource(name = "blOfferTimeZoneProcessor")
protected OfferTimeZoneProcessor offerTimeZoneProcessor;
protected CandidatePromotionItems couldOfferApplyToOrderItems(Offer offer, List<PromotableOrderItem> promotableOrderItems) {
CandidatePromotionItems candidates = new CandidatePromotionItems();
if (offer.getQualifyingItemCriteria() == null || offer.getQualifyingItemCriteria().size() == 0) {
candidates.setMatchedQualifier(true);
} else {
for (OfferItemCriteria criteria : offer.getQualifyingItemCriteria()) {
checkForItemRequirements(candidates, criteria, promotableOrderItems, true);
if (!candidates.isMatchedQualifier()) {
break;
}
}
}
if (offer.getType().equals(OfferType.ORDER_ITEM) && offer.getTargetItemCriteria() != null) {
for (OfferItemCriteria criteria : offer.getTargetItemCriteria()) {
checkForItemRequirements(candidates, criteria, promotableOrderItems, false);
if (!candidates.isMatchedTarget()) {
break;
}
}
}
if (candidates.isMatchedQualifier()) {
if (! meetsItemQualifierSubtotal(offer, candidates)) {
candidates.setMatchedQualifier(false);
}
}
return candidates;
}
private boolean isEmpty(Collection<? extends Object> collection) {
return (collection == null || collection.size() == 0);
}
private boolean hasPositiveValue(Money money) {
return (money != null && money.greaterThan(Money.ZERO));
}
protected boolean meetsItemQualifierSubtotal(Offer offer, CandidatePromotionItems candidateItem) {
Money qualifyingSubtotal = offer.getQualifyingItemSubTotal();
if (! hasPositiveValue(qualifyingSubtotal)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Offer " + offer.getName() + " does not have an item subtotal requirement.");
}
return true;
}
if (isEmpty(offer.getQualifyingItemCriteria())) {
if (OfferType.ORDER_ITEM.equals(offer.getType())) {
if (LOG.isWarnEnabled()) {
LOG.warn("Offer " + offer.getName() + " has a subtotal item requirement but no item qualification criteria.");
}
return false;
} else {
// Checking if targets meet subtotal for item offer with no item criteria.
Money accumulatedTotal = null;
for (PromotableOrderItem orderItem : candidateItem.getAllCandidateTargets()) {
Money itemPrice = orderItem.getCurrentBasePrice().multiply(orderItem.getQuantity());
accumulatedTotal = accumulatedTotal==null?itemPrice:accumulatedTotal.add(itemPrice);
if (accumulatedTotal.greaterThan(qualifyingSubtotal)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Offer " + offer.getName() + " meets qualifying item subtotal.");
}
return true;
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Offer " + offer.getName() + " does not meet qualifying item subtotal.");
}
} else {
if (candidateItem.getCandidateQualifiersMap() != null) {
Money accumulatedTotal = null;
Set<PromotableOrderItem> usedItems = new HashSet<PromotableOrderItem>();
for (OfferItemCriteria criteria : candidateItem.getCandidateQualifiersMap().keySet()) {
List<PromotableOrderItem> promotableItems = candidateItem.getCandidateQualifiersMap().get(criteria);
if (promotableItems != null) {
for (PromotableOrderItem item : promotableItems) {
if (!usedItems.contains(item)) {
usedItems.add(item);
Money itemPrice = item.getCurrentBasePrice().multiply(item.getQuantity());
accumulatedTotal = accumulatedTotal==null?itemPrice:accumulatedTotal.add(itemPrice);
if (accumulatedTotal.greaterThan(qualifyingSubtotal)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Offer " + offer.getName() + " meets the item subtotal requirement.");
}
return true;
}
}
}
}
}
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Offer " + offer.getName() + " does not meet the item subtotal qualifications.");
}
return false;
}
protected void checkForItemRequirements(CandidatePromotionItems candidates, OfferItemCriteria criteria, List<PromotableOrderItem> promotableOrderItems, boolean isQualifier) {
boolean matchFound = false;
int criteriaQuantity = criteria.getQuantity();
if (criteriaQuantity > 0) {
// If matches are found, add the candidate items to a list and store it with the itemCriteria
// for this promotion.
for (PromotableOrderItem item : promotableOrderItems) {
if (couldOrderItemMeetOfferRequirement(criteria, item)) {
if (isQualifier) {
candidates.addQualifier(criteria, item);
} else {
candidates.addTarget(criteria, item);
}
matchFound = true;
}
}
}
if (isQualifier) {
candidates.setMatchedQualifier(matchFound);
} else {
candidates.setMatchedTarget(matchFound);
}
}
protected boolean couldOrderItemMeetOfferRequirement(OfferItemCriteria criteria, PromotableOrderItem orderItem) {
boolean appliesToItem = false;
if (criteria.getMatchRule() != null && criteria.getMatchRule().trim().length() != 0) {
HashMap<String, Object> vars = new HashMap<String, Object>();
orderItem.updateRuleVariables(vars);
Boolean expressionOutcome = executeExpression(criteria.getMatchRule(), vars);
if (expressionOutcome != null && expressionOutcome) {
appliesToItem = true;
}
} else {
appliesToItem = true;
}
return appliesToItem;
}
/**
* Private method used by couldOfferApplyToOrder to execute the MVEL expression in the
* appliesToOrderRules to determine if this offer can be applied.
*
* @param expression
* @param vars
* @return a Boolean object containing the result of executing the MVEL expression
*/
public Boolean executeExpression(String expression, Map<String, Object> vars) {
try {
Serializable exp;
synchronized (EXPRESSION_CACHE) {
exp = (Serializable) EXPRESSION_CACHE.get(expression);
if (exp == null) {
ParserContext context = new ParserContext();
context.addImport("OfferType", OfferType.class);
context.addImport("FulfillmentType", FulfillmentType.class);
context.addImport("MVEL", MVEL.class);
context.addImport("MvelHelper", MvelHelper.class);
// StringBuffer completeExpression = new StringBuffer(functions.toString());
// completeExpression.append(" ").append(expression);
exp = MVEL.compileExpression(expression, context);
EXPRESSION_CACHE.put(expression, exp);
}
}
Object test = MVEL.executeExpression(exp, vars);
return (Boolean) test;
} catch (Exception e) {
//Unable to execute the MVEL expression for some reason
//Return false, but notify about the bad expression through logs
LOG.warn("Unable to parse and/or execute an mvel expression. Reporting to the logs and returning false " +
"for the match expression:" + expression, e);
return false;
}
}
/**
* We were not able to meet all of the ItemCriteria for a promotion, but some of the items were
* marked as qualifiers or targets. This method removes those items from being used as targets or
* qualifiers so they are eligible for other promotions.
* @param priceDetails
*/
protected void clearAllNonFinalizedQuantities(List<PromotableOrderItemPriceDetail> priceDetails) {
for (PromotableOrderItemPriceDetail priceDetail : priceDetails) {
priceDetail.clearAllNonFinalizedQuantities();
}
}
/**
* Updates the finalQuanties for the PromotionDiscounts and PromotionQualifiers.
* Called after we have confirmed enough qualifiers and targets for the promotion.
* @param priceDetails
*/
protected void finalizeQuantities(List<PromotableOrderItemPriceDetail> priceDetails) {
for (PromotableOrderItemPriceDetail priceDetail : priceDetails) {
priceDetail.finalizeQuantities();
}
}
/**
* Checks to see if the discountQty matches the detailQty. If not, splits the
* priceDetail.
*
* @param priceDetails
*/
protected void splitDetailsIfNecessary(List<PromotableOrderItemPriceDetail> priceDetails) {
for (PromotableOrderItemPriceDetail priceDetail : priceDetails) {
PromotableOrderItemPriceDetail splitDetail = priceDetail.splitIfNecessary();
if (splitDetail != null) {
priceDetail.getPromotableOrderItem().getPromotableOrderItemPriceDetails().add(splitDetail);
}
}
}
@Override
public List<Offer> filterOffers(List<Offer> offers, Customer customer) {
List<Offer> filteredOffers = new ArrayList<Offer>();
if (offers != null && !offers.isEmpty()) {
filteredOffers = removeOutOfDateOffers(offers);
filteredOffers = removeTimePeriodOffers(filteredOffers);
filteredOffers = removeInvalidRequestOffers(filteredOffers);
filteredOffers = removeInvalidCustomerOffers(filteredOffers, customer);
}
return filteredOffers;
}
protected List<Offer> removeInvalidRequestOffers(List<Offer> offers) {
RequestDTO requestDTO = null;
if (BroadleafRequestContext.getBroadleafRequestContext() != null) {
requestDTO = BroadleafRequestContext.getBroadleafRequestContext().getRequestDTO();
}
List<Offer> offersToRemove = new ArrayList<Offer>();
for (Offer offer : offers) {
if (!couldOfferApplyToRequestDTO(offer, requestDTO)) {
offersToRemove.add(offer);
}
}
// remove all offers in the offersToRemove list from original offers list
for (Offer offer : offersToRemove) {
offers.remove(offer);
}
return offers;
}
protected boolean couldOfferApplyToRequestDTO(Offer offer, RequestDTO requestDTO) {
boolean appliesToRequestRule = false;
String rule = null;
OfferRule customerRule = offer.getOfferMatchRules().get(OfferRuleType.REQUEST.getType());
if (customerRule != null) {
rule = customerRule.getMatchRule();
}
if (rule != null) {
HashMap<String, Object> vars = new HashMap<String, Object>();
vars.put("request", requestDTO);
Boolean expressionOutcome = executeExpression(rule, vars);
if (expressionOutcome != null && expressionOutcome) {
appliesToRequestRule = true;
}
} else {
appliesToRequestRule = true;
}
return appliesToRequestRule;
}
/**
* Removes all offers that are not within the timezone and timeperiod of the offer.
* If an offer does not fall within the timezone or timeperiod rule,
* that offer will be removed.
*
* @param offers
* @return List of Offers within the timezone or timeperiod of the offer
*/
protected List<Offer> removeTimePeriodOffers(List<Offer> offers) {
List<Offer> offersToRemove = new ArrayList<Offer>();
for (Offer offer : offers) {
if (!couldOfferApplyToTimePeriod(offer)) {
offersToRemove.add(offer);
}
}
// remove all offers in the offersToRemove list from original offers list
for (Offer offer : offersToRemove) {
offers.remove(offer);
}
return offers;
}
protected boolean couldOfferApplyToTimePeriod(Offer offer) {
boolean appliesToTimePeriod = false;
String rule = null;
OfferRule timeRule = offer.getOfferMatchRules().get(OfferRuleType.TIME.getType());
if (timeRule != null) {
rule = timeRule.getMatchRule();
}
if (rule != null) {
TimeZone timeZone = getOfferTimeZoneProcessor().getTimeZone(offer);
TimeDTO timeDto = new TimeDTO(SystemTime.asCalendar(timeZone));
HashMap<String, Object> vars = new HashMap<String, Object>();
vars.put("time", timeDto);
Boolean expressionOutcome = executeExpression(rule, vars);
if (expressionOutcome != null && expressionOutcome) {
appliesToTimePeriod = true;
}
} else {
appliesToTimePeriod = true;
}
return appliesToTimePeriod;
}
/**
* Removes all out of date offers. If an offer does not have a start date, or the start
* date is a later date, that offer will be removed. Offers without a start date should
* not be processed. If the offer has a end date that has already passed, that offer
* will be removed. Offers without a end date will be processed if the start date
* is prior to the transaction date.
*
* @param offers
* @return List of Offers with valid dates
*/
protected List<Offer> removeOutOfDateOffers(List<Offer> offers){
List<Offer> offersToRemove = new ArrayList<Offer>();
for (Offer offer : offers) {
TimeZone timeZone = getOfferTimeZoneProcessor().getTimeZone(offer);
Calendar current = timeZone == null ? SystemTime.asCalendar() : SystemTime.asCalendar(timeZone);
Calendar start = null;
if (offer.getStartDate() != null) {
LocalDateTime startDate = new LocalDateTime(offer.getStartDate());
start = timeZone == null ? new GregorianCalendar() : new GregorianCalendar(timeZone);
start.set(Calendar.YEAR, startDate.getYear());
start.set(Calendar.MONTH, startDate.getMonthOfYear() - 1);
start.set(Calendar.DAY_OF_MONTH, startDate.getDayOfMonth());
start.set(Calendar.HOUR_OF_DAY, startDate.getHourOfDay());
start.set(Calendar.MINUTE, startDate.getMinuteOfHour());
start.set(Calendar.SECOND, startDate.getSecondOfMinute());
start.get(Calendar.HOUR_OF_DAY);//do not delete this line
start.get(Calendar.MINUTE);
if (LOG.isTraceEnabled()) {
LOG.debug("Offer: " + offer.getName() + " timeZone:" + timeZone + " startTime:" + start.getTime() + " currentTime:" + current.getTime());
}
}
Calendar end = null;
if (offer.getEndDate() != null) {
LocalDateTime endDate = new LocalDateTime(offer.getEndDate());
end = timeZone == null ? new GregorianCalendar() : new GregorianCalendar(timeZone);
end.set(Calendar.YEAR, endDate.getYear());
end.set(Calendar.MONTH, endDate.getMonthOfYear() - 1);
end.set(Calendar.DAY_OF_MONTH, endDate.getDayOfMonth());
end.set(Calendar.HOUR_OF_DAY, endDate.getHourOfDay());
end.set(Calendar.MINUTE, endDate.getMinuteOfHour());
end.set(Calendar.SECOND, endDate.getSecondOfMinute());
end.get(Calendar.HOUR_OF_DAY);//do not delete this line
end.get(Calendar.MINUTE);
if (LOG.isTraceEnabled()) {
LOG.debug("Offer: " + offer.getName() + " endTime:" + start.getTime());
}
}
if ((offer.getStartDate() == null) || (start.after(current))) {
offersToRemove.add(offer);
} else if (offer.getEndDate() != null && end.before(current)) {
offersToRemove.add(offer);
}
}
// remove all offers in the offersToRemove list from original offers list
for (Offer offer : offersToRemove) {
offers.remove(offer);
}
return offers;
// return offers;
}
/**
* Private method that takes in a list of Offers and removes all Offers from the list that
* does not apply to this customer.
*
* @param offers
* @param customer
* @return List of Offers that apply to this customer
*/
protected List<Offer> removeInvalidCustomerOffers(List<Offer> offers, Customer customer){
List<Offer> offersToRemove = new ArrayList<Offer>();
for (Offer offer : offers) {
if (!couldOfferApplyToCustomer(offer, customer)) {
offersToRemove.add(offer);
}
}
// remove all offers in the offersToRemove list from original offers list
for (Offer offer : offersToRemove) {
offers.remove(offer);
}
return offers;
}
/**
* Private method which executes the appliesToCustomerRules in the Offer to determine if this Offer
* can be applied to the Customer.
*
* @param offer
* @param customer
* @return true if offer can be applied, otherwise false
*/
protected boolean couldOfferApplyToCustomer(Offer offer, Customer customer) {
boolean appliesToCustomer = false;
String rule = null;
if (!StringUtils.isEmpty(offer.getAppliesToCustomerRules())) {
rule = offer.getAppliesToCustomerRules();
} else {
OfferRule customerRule = offer.getOfferMatchRules().get(OfferRuleType.CUSTOMER.getType());
if (customerRule != null) {
rule = customerRule.getMatchRule();
}
}
if (rule != null) {
HashMap<String, Object> vars = new HashMap<String, Object>();
vars.put("customer", customer);
Boolean expressionOutcome = executeExpression(rule, vars);
if (expressionOutcome != null && expressionOutcome) {
appliesToCustomer = true;
}
} else {
appliesToCustomer = true;
}
return appliesToCustomer;
}
public OfferTimeZoneProcessor getOfferTimeZoneProcessor() {
return offerTimeZoneProcessor;
}
public void setOfferTimeZoneProcessor(OfferTimeZoneProcessor offerTimeZoneProcessor) {
this.offerTimeZoneProcessor = offerTimeZoneProcessor;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_processor_AbstractBaseProcessor.java |
1,070 | public class TransportUpdateAction extends TransportInstanceSingleOperationAction<UpdateRequest, UpdateResponse> {
private final TransportDeleteAction deleteAction;
private final TransportIndexAction indexAction;
private final AutoCreateIndex autoCreateIndex;
private final TransportCreateIndexAction createIndexAction;
private final UpdateHelper updateHelper;
@Inject
public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction,
UpdateHelper updateHelper) {
super(settings, threadPool, clusterService, transportService);
this.indexAction = indexAction;
this.deleteAction = deleteAction;
this.createIndexAction = createIndexAction;
this.updateHelper = updateHelper;
this.autoCreateIndex = new AutoCreateIndex(settings);
}
@Override
protected String transportAction() {
return UpdateAction.NAME;
}
@Override
protected String executor() {
return ThreadPool.Names.INDEX;
}
@Override
protected UpdateRequest newRequest() {
return new UpdateRequest();
}
@Override
protected UpdateResponse newResponse() {
return new UpdateResponse();
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, UpdateRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, UpdateRequest request) {
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
}
@Override
protected boolean retryOnFailure(Throwable e) {
return TransportActions.isShardNotAvailableException(e);
}
@Override
protected boolean resolveRequest(ClusterState state, UpdateRequest request, ActionListener<UpdateResponse> listener) {
MetaData metaData = clusterService.state().metaData();
String aliasOrIndex = request.index();
request.routing((metaData.resolveIndexRouting(request.routing(), aliasOrIndex)));
request.index(metaData.concreteIndex(request.index()));
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
if (request.routing() == null && state.getMetaData().routingRequired(request.index(), request.type())) {
throw new RoutingMissingException(request.index(), request.type(), request.id());
}
return true;
}
@Override
protected void doExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
request.beforeLocalFork(); // we fork on another thread...
createIndexAction.execute(new CreateIndexRequest(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse result) {
innerExecute(request, listener);
}
@Override
public void onFailure(Throwable e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// we have the index, do it
try {
innerExecute(request, listener);
} catch (Throwable e1) {
listener.onFailure(e1);
}
} else {
listener.onFailure(e);
}
}
});
} else {
innerExecute(request, listener);
}
}
private void innerExecute(final UpdateRequest request, final ActionListener<UpdateResponse> listener) {
super.doExecute(request, listener);
}
@Override
protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) throws ElasticsearchException {
if (request.shardId() != -1) {
return clusterState.routingTable().index(request.index()).shard(request.shardId()).primaryShardIt();
}
ShardIterator shardIterator = clusterService.operationRouting()
.indexShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing());
ShardRouting shard;
while ((shard = shardIterator.nextOrNull()) != null) {
if (shard.primary()) {
return new PlainShardIterator(shardIterator.shardId(), ImmutableList.of(shard));
}
}
return new PlainShardIterator(shardIterator.shardId(), ImmutableList.<ShardRouting>of());
}
@Override
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener) throws ElasticsearchException {
shardOperation(request, listener, 0);
}
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) throws ElasticsearchException {
final UpdateHelper.Result result = updateHelper.prepare(request);
switch (result.operation()) {
case UPSERT:
IndexRequest upsertRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
final BytesReference upsertSourceBytes = upsertRequest.source();
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated());
if (request.fields() != null && request.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {
update.setGetResult(null);
}
listener.onResponse(update);
}
@Override
public void onFailure(Throwable e) {
e = ExceptionsHelper.unwrapCause(e);
if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) {
if (retryCount < request.retryOnConflict()) {
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
shardOperation(request, listener, retryCount + 1);
}
});
return;
}
}
listener.onFailure(e);
}
});
break;
case INDEX:
IndexRequest indexRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
final BytesReference indexSourceBytes = indexRequest.source();
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated());
update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
listener.onResponse(update);
}
@Override
public void onFailure(Throwable e) {
e = ExceptionsHelper.unwrapCause(e);
if (e instanceof VersionConflictEngineException) {
if (retryCount < request.retryOnConflict()) {
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
shardOperation(request, listener, retryCount + 1);
}
});
return;
}
}
listener.onFailure(e);
}
});
break;
case DELETE:
DeleteRequest deleteRequest = result.action();
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse response) {
UpdateResponse update = new UpdateResponse(response.getIndex(), response.getType(), response.getId(), response.getVersion(), false);
update.setGetResult(updateHelper.extractGetResult(request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
listener.onResponse(update);
}
@Override
public void onFailure(Throwable e) {
e = ExceptionsHelper.unwrapCause(e);
if (e instanceof VersionConflictEngineException) {
if (retryCount < request.retryOnConflict()) {
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
shardOperation(request, listener, retryCount + 1);
}
});
return;
}
}
listener.onFailure(e);
}
});
break;
case NONE:
UpdateResponse update = result.action();
listener.onResponse(update);
break;
default:
throw new ElasticsearchIllegalStateException("Illegal operation " + result.operation());
}
}
} | 1no label
| src_main_java_org_elasticsearch_action_update_TransportUpdateAction.java |
2,387 | public class MemberStateImpl implements MemberState {
public static final int DEFAULT_PARTITION_COUNT = 271;
Address address = new Address();
Map<String, Long> runtimeProps = new HashMap<String, Long>();
Map<String, LocalMapStatsImpl> mapStats = new HashMap<String, LocalMapStatsImpl>();
Map<String, LocalMultiMapStatsImpl> multiMapStats = new HashMap<String, LocalMultiMapStatsImpl>();
Map<String, LocalQueueStatsImpl> queueStats = new HashMap<String, LocalQueueStatsImpl>();
Map<String, LocalTopicStatsImpl> topicStats = new HashMap<String, LocalTopicStatsImpl>();
Map<String, LocalExecutorStatsImpl> executorStats = new HashMap<String, LocalExecutorStatsImpl>();
List<Integer> partitions = new ArrayList<Integer>(DEFAULT_PARTITION_COUNT);
@Override
public void writeData(ObjectDataOutput out) throws IOException {
address.writeData(out);
out.writeInt(mapStats.size());
for (Map.Entry<String, LocalMapStatsImpl> entry : mapStats.entrySet()) {
out.writeUTF(entry.getKey());
entry.getValue().writeData(out);
}
out.writeInt(multiMapStats.size());
for (Map.Entry<String, LocalMultiMapStatsImpl> entry : multiMapStats.entrySet()) {
out.writeUTF(entry.getKey());
entry.getValue().writeData(out);
}
out.writeInt(queueStats.size());
for (Map.Entry<String, LocalQueueStatsImpl> entry : queueStats.entrySet()) {
out.writeUTF(entry.getKey());
entry.getValue().writeData(out);
}
out.writeInt(topicStats.size());
for (Map.Entry<String, LocalTopicStatsImpl> entry : topicStats.entrySet()) {
out.writeUTF(entry.getKey());
entry.getValue().writeData(out);
}
out.writeInt(executorStats.size());
for (Map.Entry<String, LocalExecutorStatsImpl> entry : executorStats.entrySet()) {
out.writeUTF(entry.getKey());
entry.getValue().writeData(out);
}
out.writeInt(runtimeProps.size());
for (Map.Entry<String, Long> entry : runtimeProps.entrySet()) {
out.writeUTF(entry.getKey());
out.writeLong(entry.getValue());
}
out.writeInt(partitions.size());
for (Integer lsPartition : partitions) {
out.writeInt(lsPartition);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
address.readData(in);
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
LocalMapStatsImpl impl = new LocalMapStatsImpl();
impl.readData(in);
mapStats.put(name, impl);
}
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
LocalMultiMapStatsImpl impl = new LocalMultiMapStatsImpl();
impl.readData(in);
multiMapStats.put(name, impl);
}
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
LocalQueueStatsImpl impl = new LocalQueueStatsImpl();
impl.readData(in);
queueStats.put(name, impl);
}
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
LocalTopicStatsImpl impl = new LocalTopicStatsImpl();
impl.readData(in);
topicStats.put(name, impl);
}
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
LocalExecutorStatsImpl impl = new LocalExecutorStatsImpl();
impl.readData(in);
executorStats.put(name, impl);
}
for (int i = in.readInt(); i > 0; i--) {
String name = in.readUTF();
runtimeProps.put(name, in.readLong());
}
for (int i = in.readInt(); i > 0; i--) {
partitions.add(in.readInt());
}
}
public void clearPartitions() {
partitions.clear();
}
public void addPartition(int partitionId) {
partitions.add(partitionId);
}
@Override
public List<Integer> getPartitions() {
return partitions;
}
@Override
public Map<String, Long> getRuntimeProps() {
return runtimeProps;
}
public void setRuntimeProps(Map<String, Long> runtimeProps) {
this.runtimeProps = runtimeProps;
}
@Override
public LocalMapStats getLocalMapStats(String mapName) {
return mapStats.get(mapName);
}
@Override
public LocalMultiMapStats getLocalMultiMapStats(String mapName) {
return multiMapStats.get(mapName);
}
@Override
public LocalQueueStats getLocalQueueStats(String queueName) {
return queueStats.get(queueName);
}
@Override
public LocalTopicStats getLocalTopicStats(String topicName) {
return topicStats.get(topicName);
}
@Override
public LocalExecutorStats getLocalExecutorStats(String executorName) {
return executorStats.get(executorName);
}
@Override
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public void putLocalMapStats(String name, LocalMapStatsImpl localMapStats) {
mapStats.put(name, localMapStats);
}
public void putLocalMultiMapStats(String name, LocalMultiMapStatsImpl localMultiMapStats) {
multiMapStats.put(name, localMultiMapStats);
}
public void putLocalQueueStats(String name, LocalQueueStatsImpl localQueueStats) {
queueStats.put(name, localQueueStats);
}
public void putLocalTopicStats(String name, LocalTopicStatsImpl localTopicStats) {
topicStats.put(name, localTopicStats);
}
public void putLocalExecutorStats(String name, LocalExecutorStatsImpl localExecutorStats) {
executorStats.put(name, localExecutorStats);
}
@Override
public int hashCode() {
int result = address != null ? address.hashCode() : 0;
result = 31 * result + (mapStats != null ? mapStats.hashCode() : 0);
result = 31 * result + (multiMapStats != null ? multiMapStats.hashCode() : 0);
result = 31 * result + (queueStats != null ? queueStats.hashCode() : 0);
result = 31 * result + (topicStats != null ? topicStats.hashCode() : 0);
result = 31 * result + (executorStats != null ? executorStats.hashCode() : 0);
result = 31 * result + (partitions != null ? partitions.hashCode() : 0);
return result;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MemberStateImpl that = (MemberStateImpl) o;
if (address != null ? !address.equals(that.address) : that.address != null) {
return false;
}
if (executorStats != null ? !executorStats.equals(that.executorStats) : that.executorStats != null) {
return false;
}
if (mapStats != null ? !mapStats.equals(that.mapStats) : that.mapStats != null) {
return false;
}
if (multiMapStats != null ? !multiMapStats.equals(that.multiMapStats) : that.multiMapStats != null) {
return false;
}
if (partitions != null ? !partitions.equals(that.partitions) : that.partitions != null) {
return false;
}
if (queueStats != null ? !queueStats.equals(that.queueStats) : that.queueStats != null) {
return false;
}
if (runtimeProps != null ? !runtimeProps.equals(that.runtimeProps) : that.runtimeProps != null) {
return false;
}
if (topicStats != null ? !topicStats.equals(that.topicStats) : that.topicStats != null) {
return false;
}
return true;
}
@Override
public String toString() {
return "MemberStateImpl{"
+ "address=" + address
+ ", runtimeProps=" + runtimeProps
+ ", mapStats=" + mapStats
+ ", multiMapStats=" + multiMapStats
+ ", queueStats=" + queueStats
+ ", topicStats=" + topicStats
+ ", executorStats=" + executorStats
+ ", partitions=" + partitions
+ '}';
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_monitor_impl_MemberStateImpl.java |
781 | public class CollectionTxnRemoveOperation extends CollectionBackupAwareOperation {
private long itemId;
private transient CollectionItem item;
public CollectionTxnRemoveOperation() {
}
public CollectionTxnRemoveOperation(String name, long itemId) {
super(name);
this.itemId = itemId;
}
@Override
public boolean shouldBackup() {
return true;
}
@Override
public Operation getBackupOperation() {
return new CollectionTxnRemoveBackupOperation(name, itemId);
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_TXN_REMOVE;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
item = getOrCreateContainer().commitRemove(itemId);
}
@Override
public void afterRun() throws Exception {
if (item != null) {
publishEvent(ItemEventType.REMOVED, (Data) item.getValue());
}
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionTxnRemoveOperation.java |
153 | assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertEquals(1, listener.events.size());
MembershipEvent event = listener.events.get(0);
assertEquals(MembershipEvent.MEMBER_ADDED, event.getEventType());
assertEquals(server2.getCluster().getLocalMember(), event.getMember());
assertEquals(getMembers(server1, server2), event.getMembers());
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_MembershipListenerTest.java |
1,210 | SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit)));
}
}, | 0true
| src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java |
1,513 | public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
@Test
public void testDoNotAllocateFromPrimary() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
assertThat(routingTable.index("test").shards().size(), equalTo(5));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
}
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", getPreviousVersion())))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
}
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
}
}
@Test
public void testRandom() {
AllocationService service = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build());
logger.info("Building initial routing table");
MetaData.Builder builder = MetaData.builder();
RoutingTable.Builder rtBuilder = RoutingTable.builder();
int numIndices = between(1, 20);
for (int i = 0; i < numIndices; i++) {
builder.put(IndexMetaData.builder("test_" + i).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
}
MetaData metaData = builder.build();
for (int i = 0; i < numIndices; i++) {
rtBuilder.addAsNew(metaData.index("test_" + i));
}
RoutingTable routingTable = rtBuilder.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(routingTable.allShards().size()));
List<DiscoveryNode> nodes = new ArrayList<DiscoveryNode>();
int nodeIdx = 0;
int iters = atLeast(10);
for (int i = 0; i < iters; i++) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
int numNodes = between(1, 20);
if (nodes.size() > numNodes) {
Collections.shuffle(nodes, getRandom());
nodes = nodes.subList(0, numNodes);
} else {
for (int j = nodes.size(); j < numNodes; j++) {
if (frequently()) {
nodes.add(newNode("node" + (nodeIdx++), randomBoolean() ? getPreviousVersion() : Version.CURRENT));
} else {
nodes.add(newNode("node" + (nodeIdx++), randomVersion()));
}
}
}
for (DiscoveryNode node : nodes) {
nodesBuilder.put(node);
}
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
clusterState = stabilize(clusterState, service);
}
}
@Test
public void testRollingRestart() {
AllocationService service = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
assertThat(routingTable.index("test").shards().size(), equalTo(5));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("old0", getPreviousVersion()))
.put(newNode("old1", getPreviousVersion()))
.put(newNode("old2", getPreviousVersion()))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("old0", getPreviousVersion()))
.put(newNode("old1", getPreviousVersion()))
.put(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node0", getPreviousVersion()))
.put(newNode("new1"))
.put(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("new2"))
.put(newNode("new1"))
.put(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
routingTable = clusterState.routingTable();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
}
}
private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
logger.trace("RoutingNodes: {}", clusterState.routingNodes().prettyPrint());
RoutingTable routingTable = service.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
RoutingNodes routingNodes = clusterState.routingNodes();
assertRecoveryNodeVersions(routingNodes);
logger.info("complete rebalancing");
RoutingTable prev = routingTable;
boolean stable = false;
for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests
logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
if (stable = (routingTable == prev)) {
break;
}
assertRecoveryNodeVersions(routingNodes);
prev = routingTable;
}
logger.info("stabilized success [{}]", stable);
assertThat(stable, is(true));
return clusterState;
}
private final void assertRecoveryNodeVersions(RoutingNodes routingNodes) {
logger.trace("RoutingNodes: {}", routingNodes.prettyPrint());
List<MutableShardRouting> mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING);
for (MutableShardRouting r : mutableShardRoutings) {
String toId = r.relocatingNodeId();
String fromId = r.currentNodeId();
assertThat(fromId, notNullValue());
assertThat(toId, notNullValue());
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
}
mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.INITIALIZING);
for (MutableShardRouting r : mutableShardRoutings) {
if (r.initializing() && r.relocatingNodeId() == null && !r.primary()) {
MutableShardRouting primary = routingNodes.activePrimary(r);
assertThat(primary, notNullValue());
String fromId = primary.currentNodeId();
String toId = r.currentNodeId();
logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
}
}
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_NodeVersionAllocationDeciderTests.java |
430 | public class TransportClusterStateAction extends TransportMasterNodeReadOperationAction<ClusterStateRequest, ClusterStateResponse> {
private final ClusterName clusterName;
@Inject
public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ClusterName clusterName) {
super(settings, transportService, clusterService, threadPool);
this.clusterName = clusterName;
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return ClusterStateAction.NAME;
}
@Override
protected ClusterStateRequest newRequest() {
return new ClusterStateRequest();
}
@Override
protected ClusterStateResponse newResponse() {
return new ClusterStateResponse();
}
@Override
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) throws ElasticsearchException {
ClusterState currentState = clusterService.state();
logger.trace("Serving cluster state request using version {}", currentState.version());
ClusterState.Builder builder = ClusterState.builder();
builder.version(currentState.version());
if (request.nodes()) {
builder.nodes(currentState.nodes());
}
if (request.routingTable()) {
if (request.indices().length > 0) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for (String filteredIndex : request.indices()) {
if (currentState.routingTable().getIndicesRouting().containsKey(filteredIndex)) {
routingTableBuilder.add(currentState.routingTable().getIndicesRouting().get(filteredIndex));
}
}
builder.routingTable(routingTableBuilder);
} else {
builder.routingTable(currentState.routingTable());
}
builder.allocationExplanation(currentState.allocationExplanation());
}
if (request.blocks()) {
builder.blocks(currentState.blocks());
}
if (request.metaData()) {
MetaData.Builder mdBuilder;
if (request.indices().length == 0 && request.indexTemplates().length == 0) {
mdBuilder = MetaData.builder(currentState.metaData());
} else {
mdBuilder = MetaData.builder();
}
if (request.indices().length > 0) {
String[] indices = currentState.metaData().concreteIndicesIgnoreMissing(request.indices());
for (String filteredIndex : indices) {
IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex);
if (indexMetaData != null) {
mdBuilder.put(indexMetaData, false);
}
}
}
if (request.indexTemplates().length > 0) {
for (String templateName : request.indexTemplates()) {
IndexTemplateMetaData indexTemplateMetaData = currentState.metaData().templates().get(templateName);
if (indexTemplateMetaData != null) {
mdBuilder.put(indexTemplateMetaData);
}
}
}
builder.metaData(mdBuilder);
}
listener.onResponse(new ClusterStateResponse(clusterName, builder.build()));
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_state_TransportClusterStateAction.java |
205 | Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
final OClusterPosition result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readClusterPosition();
if (network.getSrvProtocolVersion() >= 11)
network.readVersion();
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
}; | 0true
| client_src_main_java_com_orientechnologies_orient_client_remote_OStorageRemote.java |
1,696 | runnable = new Runnable() { public void run() { map.replace("key", "oldValue", null); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
1,558 | @Retention(RetentionPolicy.RUNTIME)
public @interface ManagedAnnotation {
String value();
boolean operation() default false;
} | 0true
| hazelcast_src_main_java_com_hazelcast_jmx_ManagedAnnotation.java |
1,509 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ManagedContextInstanceAwareTest extends HazelcastTestSupport {
@Test
public void test(){
Config config = new Config();
ManagedContextImpl managedContext = new ManagedContextImpl();
config.setManagedContext(managedContext);
HazelcastInstance hz = createHazelcastInstance(config);
assertNotNull("hazelcastInstance should have been set",managedContext.hz);
}
private class ManagedContextImpl implements ManagedContext, HazelcastInstanceAware {
private HazelcastInstance hz;
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
this.hz= hazelcastInstance;
}
@Override
public Object initialize(Object obj) {
return null;
}
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_instance_ManagedContextInstanceAwareTest.java |
3,753 | private static class LocalCacheEntry {
volatile boolean dirty;
volatile boolean reload;
boolean removed;
private Object value;
} | 1no label
| hazelcast-wm_src_main_java_com_hazelcast_web_WebFilter.java |
1,472 | public class OSQLFunctionInE extends OSQLFunctionMove {
public static final String NAME = "inE";
public OSQLFunctionInE() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return v2e(graph, iRecord, Direction.IN, iLabels);
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionInE.java |
286 | list.setLabelProvider(new StorageLabelProvider() {
@Override
public String getText(Object element) {
for (IEditorPart part: EditorUtil.getDirtyEditors()) {
if (getFile(part.getEditorInput())==element) {
return "*" + super.getText(element);
}
}
return super.getText(element);
}
}); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_RecentFilesPopup.java |
144 | public static class Order {
public static final int Rules = 1000;
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java |
2,300 | return new Recycler.V<T>() {
@Override
public boolean release() throws ElasticsearchException {
synchronized (lock) {
return delegate.release();
}
}
@Override
public T v() {
return delegate.v();
}
@Override
public boolean isRecycled() {
return delegate.isRecycled();
}
}; | 0true
| src_main_java_org_elasticsearch_common_recycler_Recyclers.java |
1,348 | completableFuture.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
reference2.set(response);
latch2.countDown();
}
@Override
public void onFailure(Throwable t) {
reference2.set(t);
latch2.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
1,441 | public static class Entry {
private final State state;
private final SnapshotId snapshotId;
private final ImmutableMap<ShardId, ShardRestoreStatus> shards;
private final ImmutableList<String> indices;
/**
* Creates new restore metadata
*
* @param snapshotId snapshot id
* @param state current state of the restore process
* @param indices list of indices being restored
* @param shards list of shards being restored and thier current restore status
*/
public Entry(SnapshotId snapshotId, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardRestoreStatus> shards) {
this.snapshotId = snapshotId;
this.state = state;
this.indices = indices;
if (shards == null) {
this.shards = ImmutableMap.of();
} else {
this.shards = shards;
}
}
/**
* Returns snapshot id
*
* @return snapshot id
*/
public SnapshotId snapshotId() {
return this.snapshotId;
}
/**
* Returns list of shards that being restore and their status
*
* @return list of shards
*/
public ImmutableMap<ShardId, ShardRestoreStatus> shards() {
return this.shards;
}
/**
* Returns current restore state
*
* @return restore state
*/
public State state() {
return state;
}
/**
* Returns list of indices
*
* @return list of indices
*/
public ImmutableList<String> indices() {
return indices;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry entry = (Entry) o;
if (!indices.equals(entry.indices)) return false;
if (!snapshotId.equals(entry.snapshotId)) return false;
if (!shards.equals(entry.shards)) return false;
if (state != entry.state) return false;
return true;
}
@Override
public int hashCode() {
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
return result;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_RestoreMetaData.java |
318 | public class NodesHotThreadsRequest extends NodesOperationRequest<NodesHotThreadsRequest> {
int threads = 3;
String type = "cpu";
TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS);
int snapshots = 10;
/**
* Get hot threads from nodes based on the nodes ids specified. If none are passed, hot
* threads for all nodes is used.
*/
public NodesHotThreadsRequest(String... nodesIds) {
super(nodesIds);
}
public int threads() {
return this.threads;
}
public NodesHotThreadsRequest threads(int threads) {
this.threads = threads;
return this;
}
public NodesHotThreadsRequest type(String type) {
this.type = type;
return this;
}
public String type() {
return this.type;
}
public NodesHotThreadsRequest interval(TimeValue interval) {
this.interval = interval;
return this;
}
public TimeValue interval() {
return this.interval;
}
public int snapshots() {
return this.snapshots;
}
public NodesHotThreadsRequest snapshots(int snapshots) {
this.snapshots = snapshots;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
threads = in.readInt();
type = in.readString();
interval = TimeValue.readTimeValue(in);
snapshots = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeInt(threads);
out.writeString(type);
interval.writeTo(out);
out.writeInt(snapshots);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodesHotThreadsRequest.java |
853 | private class TransportHandler extends BaseTransportRequestHandler<SearchScrollRequest> {
@Override
public SearchScrollRequest newInstance() {
return new SearchScrollRequest();
}
@Override
public void messageReceived(SearchScrollRequest request, final TransportChannel channel) throws Exception {
// no need for a threaded listener
request.listenerThreaded(false);
execute(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response for search", e1);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
} | 0true
| src_main_java_org_elasticsearch_action_search_TransportSearchScrollAction.java |
68 | private static final class ByteArrayKey
{
private final byte[] bytes;
private ByteArrayKey( byte[] bytes )
{
this.bytes = bytes;
}
@Override
public int hashCode()
{
return Arrays.hashCode( bytes );
}
@Override
public boolean equals( Object obj )
{
return obj instanceof ByteArrayKey && Arrays.equals( bytes, ((ByteArrayKey)obj).bytes );
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TxLog.java |
49 | public interface QueryDescription {
/**
* Returns a string representation of the entire query
* @return
*/
@Override
public String toString();
/**
* Returns how many individual queries are combined into this query, meaning, how many
* queries will be executed in one batch.
*
* @return
*/
public int getNoCombinedQueries();
/**
* Returns the number of sub-queries this query is comprised of. Each sub-query represents one OR clause, i.e.,
* the union of each sub-query's result is the overall result.
*
* @return
*/
public int getNoSubQueries();
/**
* Returns a list of all sub-queries that comprise this query
* @return
*/
public List<? extends SubQuery> getSubQueries();
/**
* Represents one sub-query of this query. Each sub-query represents one OR clause.
*/
public interface SubQuery {
/**
* Whether this query is fitted, i.e. whether the returned results must be filtered in-memory.
* @return
*/
public boolean isFitted();
/**
* Whether this query respects the sort order of parent query or requires sorting in-memory.
* @return
*/
public boolean isSorted();
}
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_QueryDescription.java |
92 | public class ReadOnlyTxManager extends AbstractTransactionManager
implements Lifecycle
{
private ThreadLocalWithSize<ReadOnlyTransactionImpl> txThreadMap;
private int eventIdentifierCounter = 0;
private XaDataSourceManager xaDsManager = null;
private final StringLogger logger;
private final Factory<byte[]> xidGlobalIdFactory;
public ReadOnlyTxManager( XaDataSourceManager xaDsManagerToUse, Factory<byte[]> xidGlobalIdFactory,
StringLogger logger )
{
xaDsManager = xaDsManagerToUse;
this.xidGlobalIdFactory = xidGlobalIdFactory;
this.logger = logger;
}
synchronized int getNextEventIdentifier()
{
return eventIdentifierCounter++;
}
@Override
public void init()
{
}
@Override
public void start()
{
txThreadMap = new ThreadLocalWithSize<>();
}
@Override
public void stop()
{
}
@Override
public void shutdown()
{
}
@Override
public void begin() throws NotSupportedException
{
if ( txThreadMap.get() != null )
{
throw new NotSupportedException(
"Nested transactions not supported" );
}
txThreadMap.set( new ReadOnlyTransactionImpl( xidGlobalIdFactory.newInstance(), this, logger ) );
}
@Override
public void commit() throws RollbackException, HeuristicMixedException,
IllegalStateException
{
ReadOnlyTransactionImpl tx = txThreadMap.get();
if ( tx == null )
{
throw new IllegalStateException( "Not in transaction" );
}
if ( tx.getStatus() != Status.STATUS_ACTIVE
&& tx.getStatus() != Status.STATUS_MARKED_ROLLBACK )
{
throw new IllegalStateException( "Tx status is: "
+ getTxStatusAsString( tx.getStatus() ) );
}
tx.doBeforeCompletion();
if ( tx.getStatus() == Status.STATUS_ACTIVE )
{
commit( tx );
}
else if ( tx.getStatus() == Status.STATUS_MARKED_ROLLBACK )
{
rollbackCommit( tx );
}
else
{
throw new IllegalStateException( "Tx status is: "
+ getTxStatusAsString( tx.getStatus() ) );
}
}
private void commit( ReadOnlyTransactionImpl tx )
{
if ( tx.getResourceCount() == 0 )
{
tx.setStatus( Status.STATUS_COMMITTED );
}
tx.doAfterCompletion();
txThreadMap.remove();
tx.setStatus( Status.STATUS_NO_TRANSACTION );
}
private void rollbackCommit( ReadOnlyTransactionImpl tx )
throws HeuristicMixedException, RollbackException
{
try
{
tx.doRollback();
}
catch ( XAException e )
{
logger.error( "Unable to rollback marked transaction. "
+ "Some resources may be commited others not. "
+ "Neo4j kernel should be SHUTDOWN for "
+ "resource maintance and transaction recovery ---->", e );
throw Exceptions.withCause(
new HeuristicMixedException( "Unable to rollback " + " ---> error code for rollback: "
+ e.errorCode ), e );
}
tx.doAfterCompletion();
txThreadMap.remove();
tx.setStatus( Status.STATUS_NO_TRANSACTION );
throw new RollbackException(
"Failed to commit, transaction rolled back" );
}
@Override
public void rollback() throws IllegalStateException, SystemException
{
ReadOnlyTransactionImpl tx = txThreadMap.get();
if ( tx == null )
{
throw new IllegalStateException( "Not in transaction" );
}
if ( tx.getStatus() == Status.STATUS_ACTIVE ||
tx.getStatus() == Status.STATUS_MARKED_ROLLBACK ||
tx.getStatus() == Status.STATUS_PREPARING )
{
tx.doBeforeCompletion();
try
{
tx.doRollback();
}
catch ( XAException e )
{
logger.error("Unable to rollback marked or active transaction. "
+ "Some resources may be commited others not. "
+ "Neo4j kernel should be SHUTDOWN for "
+ "resource maintance and transaction recovery ---->", e );
throw Exceptions.withCause( new SystemException( "Unable to rollback "
+ " ---> error code for rollback: " + e.errorCode ), e );
}
tx.doAfterCompletion();
txThreadMap.remove();
tx.setStatus( Status.STATUS_NO_TRANSACTION );
}
else
{
throw new IllegalStateException( "Tx status is: "
+ getTxStatusAsString( tx.getStatus() ) );
}
}
@Override
public int getStatus()
{
ReadOnlyTransactionImpl tx = txThreadMap.get();
if ( tx != null )
{
return tx.getStatus();
}
return Status.STATUS_NO_TRANSACTION;
}
@Override
public Transaction getTransaction()
{
return txThreadMap.get();
}
@Override
public void resume( Transaction tx ) throws IllegalStateException
{
if ( txThreadMap.get() != null )
{
throw new IllegalStateException( "Transaction already associated" );
}
if ( tx != null )
{
ReadOnlyTransactionImpl txImpl = (ReadOnlyTransactionImpl) tx;
if ( txImpl.getStatus() != Status.STATUS_NO_TRANSACTION )
{
txImpl.markAsActive();
txThreadMap.set( txImpl );
}
}
}
@Override
public Transaction suspend()
{
ReadOnlyTransactionImpl tx = txThreadMap.get();
txThreadMap.remove();
if ( tx != null )
{
tx.markAsSuspended();
}
return tx;
}
@Override
public void setRollbackOnly() throws IllegalStateException
{
ReadOnlyTransactionImpl tx = txThreadMap.get();
if ( tx == null )
{
throw new IllegalStateException( "Not in transaction" );
}
tx.setRollbackOnly();
}
@Override
public void setTransactionTimeout( int seconds )
{
}
byte[] getBranchId( XAResource xaRes )
{
if ( xaRes instanceof XaResource )
{
byte branchId[] = ((XaResource) xaRes).getBranchId();
if ( branchId != null )
{
return branchId;
}
}
return xaDsManager.getBranchId( xaRes );
}
String getTxStatusAsString( int status )
{
switch ( status )
{
case Status.STATUS_ACTIVE:
return "STATUS_ACTIVE";
case Status.STATUS_NO_TRANSACTION:
return "STATUS_NO_TRANSACTION";
case Status.STATUS_PREPARING:
return "STATUS_PREPARING";
case Status.STATUS_PREPARED:
return "STATUS_PREPARED";
case Status.STATUS_COMMITTING:
return "STATUS_COMMITING";
case Status.STATUS_COMMITTED:
return "STATUS_COMMITED";
case Status.STATUS_ROLLING_BACK:
return "STATUS_ROLLING_BACK";
case Status.STATUS_ROLLEDBACK:
return "STATUS_ROLLEDBACK";
case Status.STATUS_UNKNOWN:
return "STATUS_UNKNOWN";
case Status.STATUS_MARKED_ROLLBACK:
return "STATUS_MARKED_ROLLBACK";
default:
return "STATUS_UNKNOWN(" + status + ")";
}
}
@Override
public int getEventIdentifier()
{
TransactionImpl tx = (TransactionImpl) getTransaction();
if ( tx != null )
{
return tx.getEventIdentifier();
}
return -1;
}
@Override
public void doRecovery() throws Throwable
{
}
@Override
public TransactionState getTransactionState()
{
return TransactionState.NO_STATE;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_ReadOnlyTxManager.java |
2,106 | public class HandlesStreamsTests extends ElasticsearchTestCase {
@Test
public void testSharedStringHandles() throws Exception {
String test1 = "test1";
String test2 = "test2";
String test3 = "test3";
String test4 = "test4";
String test5 = "test5";
String test6 = "test6";
BytesStreamOutput bout = new BytesStreamOutput();
HandlesStreamOutput out = new HandlesStreamOutput(bout);
out.writeString(test1);
out.writeString(test1);
out.writeString(test2);
out.writeString(test3);
out.writeSharedString(test4);
out.writeSharedString(test4);
out.writeSharedString(test5);
out.writeSharedString(test6);
BytesStreamInput bin = new BytesStreamInput(bout.bytes());
HandlesStreamInput in = new HandlesStreamInput(bin);
String s1 = in.readString();
String s2 = in.readString();
String s3 = in.readString();
String s4 = in.readString();
String s5 = in.readSharedString();
String s6 = in.readSharedString();
String s7 = in.readSharedString();
String s8 = in.readSharedString();
assertThat(s1, equalTo(test1));
assertThat(s2, equalTo(test1));
assertThat(s3, equalTo(test2));
assertThat(s4, equalTo(test3));
assertThat(s5, equalTo(test4));
assertThat(s6, equalTo(test4));
assertThat(s7, equalTo(test5));
assertThat(s8, equalTo(test6));
assertThat(s1, not(sameInstance(s2)));
assertThat(s5, sameInstance(s6));
}
} | 0true
| src_test_java_org_elasticsearch_common_io_streams_HandlesStreamsTests.java |
352 | static class NodeShutdownRequest extends TransportRequest {
boolean exit;
NodeShutdownRequest() {
}
NodeShutdownRequest(NodesShutdownRequest request) {
super(request);
this.exit = request.exit();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
exit = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(exit);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_TransportNodesShutdownAction.java |
1,511 | public static class Combiner extends Reducer<LongWritable, Holder, LongWritable, Holder> {
private Direction direction;
private Configuration faunusConf;
private static final Logger log =
LoggerFactory.getLogger(Combiner.class);
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
if (!faunusConf.has(LINK_DIRECTION)) {
Iterator<Entry<String, String>> it = context.getConfiguration().iterator();
log.error("Broken configuration missing {}", LINK_DIRECTION);
log.error("---- Start config dump ----");
while (it.hasNext()) {
Entry<String,String> ent = it.next();
log.error("k:{} -> v:{}", ent.getKey(), ent.getValue());
}
log.error("---- End config dump ----");
throw new NullPointerException();
}
direction = faunusConf.get(LINK_DIRECTION).opposite();
}
private final Holder<FaunusVertex> holder = new Holder<FaunusVertex>();
@Override
public void reduce(final LongWritable key, final Iterable<Holder> values, final Reducer<LongWritable, Holder, LongWritable, Holder>.Context context) throws IOException, InterruptedException {
long edgesCreated = 0;
final FaunusVertex vertex = new FaunusVertex(faunusConf, key.get());
char outTag = 'x';
for (final Holder holder : values) {
final char tag = holder.getTag();
if (tag == 'v') {
vertex.addAll((FaunusVertex) holder.get());
outTag = 'v';
} else if (tag == 'e') {
vertex.addEdge(direction, (StandardFaunusEdge) holder.get());
edgesCreated++;
} else {
vertex.addEdges(Direction.BOTH, (FaunusVertex) holder.get());
}
}
context.write(key, holder.set(outTag, vertex));
if (direction.equals(OUT)) {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_CREATED, edgesCreated);
} else {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_CREATED, edgesCreated);
}
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_LinkMapReduce.java |
1,272 | public class FaunusSerializer {
// This is volatile to support double-checked locking
private static volatile Serializer standardSerializer;
private final FaunusSchemaManager types;
private final boolean trackState;
private final boolean trackPaths;
private final Configuration configuration;
private static final Logger log =
LoggerFactory.getLogger(FaunusSerializer.class);
public FaunusSerializer(final Configuration configuration) {
Preconditions.checkNotNull(configuration);
this.types = FaunusSchemaManager.getTypeManager(configuration);
this.configuration = configuration;
this.trackState = configuration.get(TitanHadoopConfiguration.PIPELINE_TRACK_STATE);
this.trackPaths = configuration.get(TitanHadoopConfiguration.PIPELINE_TRACK_PATHS);
}
public void writeVertex(final FaunusVertex vertex, final DataOutput out) throws IOException {
//Need to write the id up front for the comparator
WritableUtils.writeVLong(out, vertex.id);
Schema schema = new Schema();
vertex.updateSchema(schema);
schema.writeSchema(out);
writePathElement(vertex, schema, out);
writeEdges(vertex, vertex.inAdjacency, out, Direction.IN, schema);
FaunusVertexLabel vl = (FaunusVertexLabel)vertex.getVertexLabel();
out.writeUTF(vl.isDefault()?"":vl.getName());
}
public void readVertex(final FaunusVertex vertex, final DataInput in) throws IOException {
WritableUtils.readVLong(in);
Schema schema = readSchema(in);
readPathElement(vertex, schema, in);
vertex.inAdjacency = readEdges(vertex, in, Direction.IN, schema);
String labelName = in.readUTF();
vertex.setVertexLabel(StringUtils.isBlank(labelName)?FaunusVertexLabel.DEFAULT_VERTEXLABEL:
types.getVertexLabel(labelName));
}
public void writeEdge(final StandardFaunusEdge edge, final DataOutput out) throws IOException {
writePathElement(edge, out);
WritableUtils.writeVLong(out, edge.inVertex);
WritableUtils.writeVLong(out, edge.outVertex);
writeFaunusType(edge.getType(), out);
}
public void readEdge(final StandardFaunusEdge edge, final DataInput in) throws IOException {
readPathElement(edge, in);
edge.inVertex = WritableUtils.readVLong(in);
edge.outVertex = WritableUtils.readVLong(in);
edge.setLabel((FaunusEdgeLabel)readFaunusType(in));
}
public void writeProperty(final StandardFaunusProperty property, final DataOutput out) throws IOException {
writePathElement(property, out);
WritableUtils.writeVLong(out, property.vertexid);
serializeObject(out,property.getValue());
writeFaunusType(property.getType(), out);
}
public void readProperty(final StandardFaunusProperty property, final DataInput in) throws IOException {
readPathElement(property, in);
property.vertexid = WritableUtils.readVLong(in);
property.value = deserializeObject(in);
property.setKey((FaunusPropertyKey)readFaunusType(in));
}
private void readPathElement(final FaunusPathElement element, final DataInput in) throws IOException {
readPathElement(element, null, in);
}
private void writePathElement(final FaunusPathElement element, final DataOutput out) throws IOException {
writePathElement(element, null, out);
}
private void readPathElement(final FaunusPathElement element, Schema schema, final DataInput in) throws IOException {
readElement(element, schema, in);
if (trackPaths) {
List<List<MicroElement>> paths = readElementPaths(in);
element.tracker = new FaunusPathElement.Tracker(paths,
(element instanceof FaunusVertex) ? new FaunusVertex.MicroVertex(element.id) : new StandardFaunusEdge.MicroEdge(element.id));
log.trace("readPathElement element={} paths={}", element, paths);
} else {
element.pathCounter = WritableUtils.readVLong(in);
element.tracker = FaunusPathElement.DEFAULT_TRACK;
}
}
private void writePathElement(final FaunusPathElement element, final Schema schema, final DataOutput out) throws IOException {
writeElement(element, schema, out);
if (trackPaths)
writeElementPaths(element.tracker.paths, out);
else
WritableUtils.writeVLong(out, element.pathCounter);
}
private void readElement(final FaunusElement element, Schema schema, final DataInput in) throws IOException {
element.id = WritableUtils.readVLong(in);
if (trackState) element.setLifeCycle(in.readByte());
element.outAdjacency = readEdges(element,in,Direction.OUT,schema);
}
private void writeElement(final FaunusElement element, final Schema schema, final DataOutput out) throws IOException {
Preconditions.checkArgument(trackState || !element.isRemoved());
WritableUtils.writeVLong(out, element.id);
if (trackState) out.writeByte(element.getLifeCycle());
writeEdges(element, element.outAdjacency, out, Direction.OUT, schema);
}
private void serializeObject(final DataOutput out, Object value) throws IOException {
final com.thinkaurelius.titan.graphdb.database.serialize.DataOutput o = getStandardSerializer().getDataOutput(40);
o.writeClassAndObject(value);
final StaticBuffer buffer = o.getStaticBuffer();
WritableUtils.writeVInt(out, buffer.length());
out.write(buffer.as(StaticBuffer.ARRAY_FACTORY));
}
private Object deserializeObject(final DataInput in) throws IOException {
int byteLength = WritableUtils.readVInt(in);
byte[] bytes = new byte[byteLength];
in.readFully(bytes);
final ReadBuffer buffer = new ReadArrayBuffer(bytes);
return getStandardSerializer().readClassAndObject(buffer);
}
/**
* Return the StandardSerializer singleton shared between all instances of FaunusSerializer.
*
* If it has not yet been initialized, then the singleton is created using the maximum
* Kryo buffer size configured in the calling FaunusSerializer.
*
* @return
*/
private Serializer getStandardSerializer() {
if (null == standardSerializer) { // N.B. standardSerializer is volatile
synchronized (FaunusSerializer.class) {
if (null == standardSerializer) {
int maxOutputBufSize = configuration.get(KRYO_MAX_OUTPUT_SIZE);
standardSerializer = new StandardSerializer(true, maxOutputBufSize);
}
}
}
// TODO consider checking whether actual output buffer size matches config, create new StandardSerializer if mismatched? Might not be worth it
return standardSerializer;
}
private <T extends FaunusRelation> Iterable<T> filterDeletedRelations(Iterable<T> elements) {
if (trackState) return elements;
else return Iterables.filter(elements, new Predicate<T>() {
@Override
public boolean apply(@Nullable T element) {
return !element.isRemoved();
}
});
}
private SetMultimap<FaunusRelationType, FaunusRelation> readEdges(final FaunusElement element, final DataInput in, final Direction direction, final Schema schema) throws IOException {
final SetMultimap<FaunusRelationType, FaunusRelation> adjacency = HashMultimap.create();
int numTypes = WritableUtils.readVInt(in);
for (int i = 0; i < numTypes; i++) {
FaunusRelationType type;
if (schema == null) type = readFaunusType(in);
else type = schema.getType(WritableUtils.readVLong(in));
final int size = WritableUtils.readVInt(in);
for (int j = 0; j < size; j++) {
FaunusRelation relation;
if (element instanceof FaunusVertex) {
if (type.isEdgeLabel()) {
final StandardFaunusEdge edge = new StandardFaunusEdge(configuration);
edge.setLabel((FaunusEdgeLabel)type);
readPathElement(edge, schema, in);
long otherId = WritableUtils.readVLong(in);
switch (direction) {
case IN:
edge.inVertex = element.getLongId();
edge.outVertex = otherId;
break;
case OUT:
edge.outVertex = element.getLongId();
edge.inVertex = otherId;
break;
default:
throw ExceptionFactory.bothIsNotSupported();
}
relation = edge;
log.trace("readEdges edge={} paths={}", edge, edge.tracker.paths);
} else {
assert type.isPropertyKey() && direction==Direction.OUT;
final StandardFaunusProperty property = new StandardFaunusProperty(configuration);
property.setKey((FaunusPropertyKey) type);
readPathElement(property, schema, in);
property.value = deserializeObject(in);
relation = property;
}
} else {
byte lifecycle = trackState?in.readByte():-1;
if (type.isEdgeLabel()) {
relation = new SimpleFaunusEdge((FaunusEdgeLabel)type,new FaunusVertex(configuration,WritableUtils.readVLong(in)));
} else {
assert type.isPropertyKey() && direction==Direction.OUT;
relation = new SimpleFaunusProperty((FaunusPropertyKey)type,deserializeObject(in));
}
if (trackState) relation.setLifeCycle(lifecycle);
}
adjacency.put(type, relation);
}
}
if (adjacency.isEmpty()) return FaunusElement.EMPTY_ADJACENCY;
return adjacency;
}
private void writeEdges(final FaunusElement element, final SetMultimap<FaunusRelationType, FaunusRelation> edges, final DataOutput out, final Direction direction, final Schema schema) throws IOException {
Map<FaunusRelationType, Integer> counts = Maps.newHashMap();
int typeCount = 0;
for (FaunusRelationType type : edges.keySet()) {
int count = IterablesUtil.size(filterDeletedRelations(edges.get(type)));
counts.put(type, count);
if (count > 0) typeCount++;
}
WritableUtils.writeVInt(out, typeCount);
for (FaunusRelationType type : edges.keySet()) {
if (counts.get(type) == 0) continue;
if (schema == null) writeFaunusType(type, out);
else WritableUtils.writeVLong(out, schema.getTypeId(type));
WritableUtils.writeVInt(out, counts.get(type));
Iterable<FaunusRelation> subset = filterDeletedRelations(edges.get(type));
for (final FaunusRelation rel : subset) {
if (element instanceof FaunusVertex) {
assert rel instanceof StandardFaunusRelation;
writePathElement((StandardFaunusRelation)rel,schema,out);
} else {
assert rel instanceof SimpleFaunusRelation;
if (trackState) out.writeByte(((SimpleFaunusRelation)rel).getLifeCycle());
}
if (rel.isEdge()) {
WritableUtils.writeVLong(out, ((FaunusEdge)rel).getVertexId(direction.opposite()));
} else {
serializeObject(out,((FaunusProperty)rel).getValue());
}
}
}
}
private void writeElementPaths(final List<List<MicroElement>> paths, final DataOutput out) throws IOException {
if (null == paths) {
WritableUtils.writeVInt(out, 0);
} else {
WritableUtils.writeVInt(out, paths.size());
for (final List<MicroElement> path : paths) {
WritableUtils.writeVInt(out, path.size());
for (MicroElement element : path) {
if (element instanceof FaunusVertex.MicroVertex)
out.writeChar('v');
else
out.writeChar('e');
WritableUtils.writeVLong(out, element.getId());
}
}
}
}
private List<List<MicroElement>> readElementPaths(final DataInput in) throws IOException {
int pathsSize = WritableUtils.readVInt(in);
if (pathsSize == 0)
return new ArrayList<List<MicroElement>>();
else {
final List<List<MicroElement>> paths = new ArrayList<List<MicroElement>>(pathsSize);
for (int i = 0; i < pathsSize; i++) {
int pathSize = WritableUtils.readVInt(in);
final List<MicroElement> path = new ArrayList<MicroElement>(pathSize);
for (int j = 0; j < pathSize; j++) {
char type = in.readChar();
if (type == 'v')
path.add(new FaunusVertex.MicroVertex(WritableUtils.readVLong(in)));
else
path.add(new StandardFaunusEdge.MicroEdge(WritableUtils.readVLong(in)));
}
paths.add(path);
}
return paths;
}
}
private void writeFaunusType(final FaunusRelationType type, final DataOutput out) throws IOException {
out.writeByte(type.isPropertyKey()?0:1);
out.writeUTF(type.getName());
}
private FaunusRelationType readFaunusType(final DataInput in) throws IOException {
int type = in.readByte();
String typeName = in.readUTF();
assert type==0 || type==1;
if (type==0) return types.getOrCreatePropertyKey(typeName);
else return types.getOrCreateEdgeLabel(typeName);
}
class Schema {
private final BiMap<FaunusRelationType, Long> localTypes;
private long count = 1;
private Schema() {
this(8);
}
private Schema(int size) {
this.localTypes = HashBiMap.create(size);
}
void add(String type) {
this.add(types.getRelationType(type));
}
void add(FaunusRelationType type) {
if (!localTypes.containsKey(type)) localTypes.put(type, count++);
}
void addAll(Iterable<FaunusRelationType> types) {
for (FaunusRelationType type : types) add(type);
}
long getTypeId(FaunusRelationType type) {
Long id = localTypes.get(type);
Preconditions.checkArgument(id != null, "Type is not part of the schema: " + type);
return id;
}
FaunusRelationType getType(long id) {
FaunusRelationType type = localTypes.inverse().get(id);
Preconditions.checkArgument(type != null, "Type is not part of the schema: " + id);
return type;
}
private void add(FaunusRelationType type, long index) {
Preconditions.checkArgument(!localTypes.containsValue(index));
localTypes.put(type, index);
count = index + 1;
}
private void writeSchema(final DataOutput out) throws IOException {
WritableUtils.writeVInt(out, localTypes.size());
for (Map.Entry<FaunusRelationType, Long> entry : localTypes.entrySet()) {
writeFaunusType(entry.getKey(), out);
WritableUtils.writeVLong(out, entry.getValue());
}
}
}
private Schema readSchema(final DataInput in) throws IOException {
int size = WritableUtils.readVInt(in);
Schema schema = new Schema(size);
for (int i = 0; i < size; i++) {
schema.add(readFaunusType(in), WritableUtils.readVLong(in));
}
return schema;
}
static {
WritableComparator.define(FaunusPathElement.class, new Comparator());
}
public static class Comparator extends WritableComparator {
public Comparator() {
super(FaunusPathElement.class);
}
@Override
public int compare(final byte[] element1, final int start1, final int length1, final byte[] element2, final int start2, final int length2) {
try {
return Long.valueOf(readVLong(element1, start1)).compareTo(readVLong(element2, start2));
} catch (IOException e) {
return -1;
}
}
@Override
public int compare(final WritableComparable a, final WritableComparable b) {
if (a instanceof FaunusElement && b instanceof FaunusElement)
return ((Long) (((FaunusElement) a).getLongId())).compareTo(((FaunusElement) b).getLongId());
else
return super.compare(a, b);
}
}
//################################################
// Serialization for vanilla Blueprints
//################################################
/**
* All graph element identifiers must be of the long data type. Implementations of this
* interface makes it possible to control the conversion of the identifier in the
* VertexToHadoopBinary utility class.
*
* @author Stephen Mallette (http://stephen.genoprime.com)
*/
// public static interface ElementIdHandler {
// long convertIdentifier(final Element element);
// }
//
// public void writeVertex(final Vertex vertex, final ElementIdHandler elementIdHandler, final DataOutput out) throws IOException {
// Schema schema = new Schema();
// //Convert properties and update schema
// Multimap<HadoopType, FaunusProperty> properties = getProperties(vertex);
// for (HadoopType type : properties.keySet()) schema.add(type);
// for (Edge edge : vertex.getEdges(Direction.BOTH)) {
// schema.add(edge.getLabel());
// for (String key : edge.getPropertyKeys()) schema.add(key);
// }
//
// WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(vertex));
// schema.writeSchema(out);
// WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(vertex));
// if (trackState) out.writeByte(ElementState.NEW.getByteValue());
// writeProperties(properties, schema, out);
// out.writeBoolean(false);
// WritableUtils.writeVLong(out, 0);
// writeEdges(vertex, Direction.IN, elementIdHandler, schema, out);
// writeEdges(vertex, Direction.OUT, elementIdHandler, schema, out);
//
// }
//
// private Multimap<HadoopType, FaunusProperty> getProperties(Element element) {
// Multimap<HadoopType, FaunusProperty> properties = HashMultimap.create();
// for (String key : element.getPropertyKeys()) {
// HadoopType type = types.get(key);
// properties.put(type, new FaunusProperty(type, element.getProperty(key)));
// }
// return properties;
// }
//
// private void writeEdges(final Vertex vertex, final Direction direction, final ElementIdHandler elementIdHandler,
// final Schema schema, final DataOutput out) throws IOException {
// final Multiset<String> labelCount = HashMultiset.create();
// for (final Edge edge : vertex.getEdges(direction)) {
// labelCount.add(edge.getLabel());
// }
// WritableUtils.writeVInt(out, labelCount.elementSet().size());
// for (String label : labelCount.elementSet()) {
// HadoopType type = types.get(label);
// WritableUtils.writeVLong(out, schema.getTypeId(type));
// WritableUtils.writeVInt(out, labelCount.count(label));
// for (final Edge edge : vertex.getEdges(direction, label)) {
// WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(edge));
// if (trackState) out.writeByte(ElementState.NEW.getByteValue());
// writeProperties(getProperties(edge), schema, out);
// out.writeBoolean(false);
// WritableUtils.writeVLong(out, 0);
// WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(edge.getVertex(direction.opposite())));
// }
// }
// }
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_FaunusSerializer.java |
76 | public interface AttributeHandler<V> {
/**
* Verifies the given (not-null) attribute value is valid.
* Throws an {@link IllegalArgumentException} if the value is invalid,
* otherwise simply returns.
*
* @param value to verify
*/
public void verifyAttribute(V value);
/**
* Converts the given (not-null) value to the expected datatype V.
* The given object will NOT be of type V.
* Throws an {@link IllegalArgumentException} if it cannot be converted.
*
* @param value to convert
* @return converted to expected datatype
*/
public V convert(Object value);
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_AttributeHandler.java |
3,178 | static final class Fields {
static final XContentBuilderString FIELDDATA = new XContentBuilderString("fielddata");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
static final XContentBuilderString FIELDS = new XContentBuilderString("fields");
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_FieldDataStats.java |
375 | public class HBaseStoreTest extends KeyColumnValueStoreTest {
@BeforeClass
public static void startHBase() throws IOException, BackendException {
HBaseStorageSetup.startHBase();
}
@AfterClass
public static void stopHBase() {
// Workaround for https://issues.apache.org/jira/browse/HBASE-10312
if (VersionInfo.getVersion().startsWith("0.96"))
HBaseStorageSetup.killIfRunning();
}
public KeyColumnValueStoreManager openStorageManager() throws BackendException {
WriteConfiguration config = HBaseStorageSetup.getHBaseGraphConfiguration();
return new HBaseStoreManager(new BasicConfiguration(GraphDatabaseConfiguration.ROOT_NS,config, BasicConfiguration.Restriction.NONE));
}
@Test
public void testGetKeysWithKeyRange() throws Exception {
super.testGetKeysWithKeyRange();
}
} | 0true
| titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseStoreTest.java |
1,426 | class HazelcastInstanceLoader implements IHazelcastInstanceLoader {
private final static ILogger logger = Logger.getLogger(HazelcastInstanceFactory.class);
private final Properties props = new Properties();
private String instanceName = null;
private HazelcastInstance instance;
private Config config = null;
public void configure(Properties props) {
this.props.putAll(props);
}
public HazelcastInstance loadInstance() throws CacheException {
if (instance != null && instance.getLifecycleService().isRunning()) {
logger.warning("Current HazelcastInstance is already loaded and running! " +
"Returning current instance...");
return instance;
}
String configResourcePath = null;
instanceName = CacheEnvironment.getInstanceName(props);
configResourcePath = CacheEnvironment.getConfigFilePath(props);
if (!isEmpty(configResourcePath)) {
try {
config = ConfigLoader.load(configResourcePath);
} catch (IOException e) {
logger.warning("IOException: " + e.getMessage());
}
if (config == null) {
throw new CacheException("Could not find configuration file: " + configResourcePath);
}
}
if (instanceName != null) {
instance = Hazelcast.getHazelcastInstanceByName(instanceName);
if (instance == null) {
try {
createOrGetInstance();
} catch (DuplicateInstanceNameException ignored) {
instance = Hazelcast.getHazelcastInstanceByName(instanceName);
}
}
} else {
createOrGetInstance();
}
return instance;
}
private void createOrGetInstance() throws DuplicateInstanceNameException {
if (config == null) {
config = new XmlConfigBuilder().build();
}
config.setInstanceName(instanceName);
instance = Hazelcast.newHazelcastInstance(config);
}
public void unloadInstance() throws CacheException {
if (instance == null) {
return;
}
final boolean shutDown = CacheEnvironment.shutdownOnStop(props, (instanceName == null));
if (!shutDown) {
logger.warning(CacheEnvironment.SHUTDOWN_ON_STOP + " property is set to 'false'. " +
"Leaving current HazelcastInstance active! (Warning: Do not disable Hazelcast "
+ GroupProperties.PROP_SHUTDOWNHOOK_ENABLED + " property!)");
return;
}
try {
instance.getLifecycleService().shutdown();
instance = null;
} catch (Exception e) {
throw new CacheException(e);
}
}
private static boolean isEmpty(String s) {
return s == null || s.trim().length() == 0;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_instance_HazelcastInstanceLoader.java |
2,815 | public enum AnalyzerScope {
INDEX,
INDICES,
GLOBAL
} | 0true
| src_main_java_org_elasticsearch_index_analysis_AnalyzerScope.java |
1,245 | private static final class LastMMapEntrySearchInfo {
private final int foundMmapIndex;
private final long requestedPosition;
private LastMMapEntrySearchInfo(int foundMmapIndex, long requestedPosition) {
this.foundMmapIndex = foundMmapIndex;
this.requestedPosition = requestedPosition;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapManagerNew.java |
457 | executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_replicatedmap_ClientReplicatedMapTest.java |
643 | public class DeleteIndexTemplateAction extends IndicesAction<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse, DeleteIndexTemplateRequestBuilder> {
public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction();
public static final String NAME = "indices/template/delete";
private DeleteIndexTemplateAction() {
super(NAME);
}
@Override
public DeleteIndexTemplateResponse newResponse() {
return new DeleteIndexTemplateResponse();
}
@Override
public DeleteIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new DeleteIndexTemplateRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_template_delete_DeleteIndexTemplateAction.java |
776 | public class CollectionRollbackOperation extends CollectionBackupAwareOperation {
private long itemId;
private boolean removeOperation;
public CollectionRollbackOperation() {
}
public CollectionRollbackOperation(String name, long itemId, boolean removeOperation) {
super(name);
this.itemId = itemId;
this.removeOperation = removeOperation;
}
@Override
public boolean shouldBackup() {
return true;
}
@Override
public Operation getBackupOperation() {
return new CollectionRollbackBackupOperation(name, itemId, removeOperation);
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_ROLLBACK;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
if (removeOperation) {
getOrCreateContainer().rollbackRemove(itemId);
} else {
getOrCreateContainer().rollbackAdd(itemId);
}
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
out.writeBoolean(removeOperation);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
removeOperation = in.readBoolean();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionRollbackOperation.java |
124 | class FindInvocationVisitor extends Visitor {
Node node;
Tree.InvocationExpression result;
Tree.InvocationExpression current;
TypedDeclaration parameter;
FindInvocationVisitor(Node node) {
this.node=node;
}
@Override
public void visit(Tree.ListedArgument that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter=p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.SpreadArgument that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter = p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.NamedArgument that) {
if (node==that) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter = p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.Return that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = (TypedDeclaration) that.getDeclaration();
}
super.visit(that);
}
@Override
public void visit(Tree.AssignOp that) {
if (node==that.getRightTerm()) {
//result=current;
Term lt = that.getLeftTerm();
if (lt instanceof Tree.BaseMemberExpression) {
Declaration d = ((Tree.BaseMemberExpression) lt).getDeclaration();
if (d instanceof TypedDeclaration) {
parameter = (TypedDeclaration) d;
}
}
}
super.visit(that);
}
@Override
public void visit(Tree.SpecifierStatement that) {
Expression e = that.getSpecifierExpression().getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
Term bme = that.getBaseMemberExpression();
if (bme instanceof Tree.BaseMemberExpression) {
Declaration d =
((Tree.BaseMemberExpression) bme).getDeclaration();
if (d instanceof TypedDeclaration) {
parameter = (TypedDeclaration) d;
}
}
}
super.visit(that);
}
@Override
public void visit(Tree.AttributeDeclaration that) {
Tree.SpecifierOrInitializerExpression sie =
that.getSpecifierOrInitializerExpression();
if (sie!=null) {
Expression e = sie.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getDeclarationModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.MethodDeclaration that) {
Tree.SpecifierOrInitializerExpression sie =
that.getSpecifierExpression();
if (sie!=null) {
Expression e = sie.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getDeclarationModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.InitializerParameter that) {
Tree.SpecifierExpression se = that.getSpecifierExpression();
if (se!=null) {
Tree.Expression e = se.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getParameterModel().getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.InvocationExpression that) {
Tree.InvocationExpression oc=current;
current = that;
super.visit(that);
current=oc;
}
@Override
public void visit(Tree.BaseMemberExpression that) {
if (that == node) {
result = current;
}
super.visit(that);
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_FindInvocationVisitor.java |
1,495 | @SuppressWarnings("serial")
public class OObjectNotManagedException extends RuntimeException {
public OObjectNotManagedException() {
super();
}
public OObjectNotManagedException(String message, Throwable cause) {
super(message, cause);
}
public OObjectNotManagedException(String message) {
super(message);
}
public OObjectNotManagedException(Throwable cause) {
super(cause);
}
} | 0true
| object_src_main_java_com_orientechnologies_orient_object_db_OObjectNotManagedException.java |
194 | @Test
public class JNADirectMemoryTest {
public void testLong() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
long value = rnd.nextLong();
long pointer = directMemory.allocate(OLongSerializer.LONG_SIZE);
directMemory.setLong(pointer, value);
Assert.assertEquals(directMemory.getLong(pointer), value);
directMemory.free(pointer);
}
public void testInt() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
int value = rnd.nextInt();
long pointer = directMemory.allocate(OIntegerSerializer.INT_SIZE);
directMemory.setInt(pointer, value);
Assert.assertEquals(directMemory.getInt(pointer), value);
directMemory.free(pointer);
}
public void testChar() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
char value = (char) rnd.nextInt();
long pointer = directMemory.allocate(OCharSerializer.CHAR_SIZE);
directMemory.setChar(pointer, value);
Assert.assertEquals(directMemory.getChar(pointer), value);
directMemory.free(pointer);
}
public void testByte() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[1];
rnd.nextBytes(value);
long pointer = directMemory.allocate(1);
directMemory.setByte(pointer, value[0]);
Assert.assertEquals(directMemory.getByte(pointer), value[0]);
directMemory.free(pointer);
}
public void testAllocateBytes() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value);
Assert.assertEquals(directMemory.get(pointer, value.length), value);
directMemory.free(pointer);
}
public void testBytesWithoutOffset() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value.length);
directMemory.set(pointer, value, 0, value.length);
Assert.assertEquals(directMemory.get(pointer, value.length), value);
Assert.assertEquals(directMemory.get(pointer, value.length / 2), Arrays.copyOf(value, value.length / 2));
byte[] result = new byte[value.length];
directMemory.get(pointer, result, value.length / 2, value.length / 2);
byte[] expectedResult = new byte[value.length];
System.arraycopy(value, 0, expectedResult, expectedResult.length / 2, expectedResult.length / 2);
Assert.assertEquals(result, expectedResult);
directMemory.free(pointer);
}
public void testBytesWithOffset() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value.length);
directMemory.set(pointer, value, value.length / 2, value.length / 2);
Assert.assertEquals(directMemory.get(pointer, value.length / 2), Arrays.copyOfRange(value, value.length / 2, value.length));
directMemory.free(pointer);
}
public void testCopyData() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value.length);
directMemory.set(pointer, value, 0, value.length);
directMemory.moveData(pointer, pointer + value.length / 2, value.length / 2);
System.arraycopy(value, 0, value, value.length / 2, value.length / 2);
Assert.assertEquals(value, directMemory.get(pointer, value.length));
directMemory.free(pointer);
}
public void testCopyDataOverlap() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value.length);
directMemory.set(pointer, value, 0, value.length);
directMemory.moveData(pointer, pointer + 1, value.length / 3);
System.arraycopy(value, 0, value, 1, value.length / 3);
Assert.assertEquals(value, directMemory.get(pointer, value.length));
directMemory.free(pointer);
}
public void testCopyDataOverlapInterval() {
final Random rnd = new Random();
ODirectMemory directMemory = new OJNADirectMemory();
byte[] value = new byte[256];
rnd.nextBytes(value);
long pointer = directMemory.allocate(value.length);
directMemory.set(pointer, value, 0, value.length);
directMemory.moveData(pointer + 2, pointer + 5, value.length / 3);
System.arraycopy(value, 2, value, 5, value.length / 3);
Assert.assertEquals(value, directMemory.get(pointer, value.length));
directMemory.free(pointer);
}
} | 0true
| nativeos_src_test_java_com_orientechnologies_nio_JNADirectMemoryTest.java |
577 | public interface IndexValuesResultListener {
boolean addResult(OIdentifiable value);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_OIndex.java |
1,776 | public class LineStringBuilder extends BaseLineStringBuilder<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapename);
builder.field(FIELD_COORDINATES);
coordinatesToXcontent(builder, false);
builder.endObject();
return builder;
}
@Override
public GeoShapeType type() {
return TYPE;
}
} | 0true
| src_main_java_org_elasticsearch_common_geo_builders_LineStringBuilder.java |
2,522 | public abstract class AbstractXContentGenerator implements XContentGenerator {
@Override
public void writeStringField(String fieldName, String value) throws IOException {
writeFieldName(fieldName);
writeString(value);
}
@Override
public void writeBooleanField(String fieldName, boolean value) throws IOException {
writeFieldName(fieldName);
writeBoolean(value);
}
@Override
public void writeNullField(String fieldName) throws IOException {
writeFieldName(fieldName);
writeNull();
}
@Override
public void writeNumberField(String fieldName, int value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, long value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, double value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, float value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeBinaryField(String fieldName, byte[] data) throws IOException {
writeFieldName(fieldName);
writeBinary(data);
}
@Override
public void writeArrayFieldStart(String fieldName) throws IOException {
writeFieldName(fieldName);
writeStartArray();
}
@Override
public void writeObjectFieldStart(String fieldName) throws IOException {
writeFieldName(fieldName);
writeStartObject();
}
} | 0true
| src_main_java_org_elasticsearch_common_xcontent_support_AbstractXContentGenerator.java |
1,130 | public class NativePayloadSumNoRecordScoreScript extends AbstractSearchScript {
public static final String NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE = "native_payload_sum_no_record_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativePayloadSumNoRecordScoreScript(params);
}
}
private NativePayloadSumNoRecordScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS);
for (TermPosition pos : indexFieldTerm) {
score += pos.payloadAsFloat(0);
}
}
return score;
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_scripts_score_script_NativePayloadSumNoRecordScoreScript.java |
168 | class TypeProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final ProducedType type;
private final int offset;
private final String text;
private final Tree.CompilationUnit rootNode;
private Point selection;
private TypeProposal(int offset, ProducedType type,
String text, Tree.CompilationUnit rootNode) {
this.type = type;
this.offset = offset;
this.rootNode = rootNode;
this.text = text;
}
@Override
public void apply(IDocument document) {
try {
final DocumentChange change =
new DocumentChange("Specify Type", document);
change.setEdit(new MultiTextEdit());
HashSet<Declaration> decs =
new HashSet<Declaration>();
if (type!=null) {
importType(decs, type, rootNode);
}
int il = applyImports(change, decs, rootNode, document);
change.addEdit(new ReplaceEdit(offset,
getCurrentLength(document), text));
change.perform(new NullProgressMonitor());
selection = new Point(offset+il, text.length());
}
catch (Exception e) {
e.printStackTrace();
}
}
private int getCurrentLength(IDocument document)
throws BadLocationException {
int length = 0;
for (int i=offset;
i<document.getLength();
i++) {
if (Character.isWhitespace(document.getChar(i))) {
break;
}
length++;
}
return length;
}
@Override
public Point getSelection(IDocument document) {
return selection;
}
@Override
public void apply(ITextViewer viewer, char trigger, int stateMask,
int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int offset,
DocumentEvent event) {
try {
String prefix = document.get(this.offset,
offset-this.offset);
return text.startsWith(prefix);
}
catch (BadLocationException e) {
return false;
}
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public String getDisplayString() {
return text;
}
@Override
public Image getImage() {
if (type==null) {
return getDecoratedImage(CEYLON_LITERAL, 0, false);
}
else {
return getImageForDeclaration(type.getDeclaration());
}
}
@Override
public IContextInformation getContextInformation() {
return null;
}
static ProposalPosition getTypeProposals(IDocument document,
int offset, int length, ProducedType infType,
Tree.CompilationUnit rootNode, String kind) {
TypeDeclaration td = infType.getDeclaration();
List<TypeDeclaration> supertypes = isTypeUnknown(infType) ?
Collections.<TypeDeclaration>emptyList() :
td.getSupertypeDeclarations();
int size = supertypes.size();
if (kind!=null) size++;
if (td instanceof UnionType ||
td instanceof IntersectionType) {
size++;
}
ICompletionProposal[] proposals =
new ICompletionProposal[size];
int i=0;
if (kind!=null) {
proposals[i++] =
new TypeProposal(offset, null, kind, rootNode);
}
if (td instanceof UnionType ||
td instanceof IntersectionType) {
String typename =
infType.getProducedTypeName(rootNode.getUnit());
proposals[i++] =
new TypeProposal(offset, infType, typename, rootNode);
}
for (int j=supertypes.size()-1; j>=0; j--) {
ProducedType type =
infType.getSupertype(supertypes.get(j));
String typename =
type.getProducedTypeName(rootNode.getUnit());
proposals[i++] =
new TypeProposal(offset, type, typename, rootNode);
}
return new ProposalPosition(document, offset, length,
0, proposals);
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_TypeProposal.java |
1,644 | public abstract class OAbstractDistributedQueryExecutor extends OQueryExecutor {
protected final ExecutorService distributedQueryExecutors;
protected final ServerInstance serverInstance;
protected final AtomicInteger failedNodes = new AtomicInteger(0);
protected OAbstractDistributedQueryExecutor(OCommandRequestText iCommand, OStorageEmbedded wrapped, ServerInstance serverInstance) {
super(iCommand, wrapped);
this.serverInstance = serverInstance;
final int cl = Runtime.getRuntime().availableProcessors() * 4;
distributedQueryExecutors = new ThreadPoolExecutor(0, cl, 0L, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<Runnable>(cl),
new ThreadFactory() {
private final AtomicInteger i = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
final Thread t = new Thread(Thread.currentThread().getThreadGroup(), r, "DistributedQueryExecutor-"
+ i.getAndIncrement());
t.setDaemon(true);
t.setPriority(Thread.NORM_PRIORITY);
return t;
}
});
}
/**
* Run given command on all nodes.
*
* @param iDistributedCommand
* command to execute
* @return number of the nodes that are running command
*/
protected int runCommandOnAllNodes(final OCommandRequestText iDistributedCommand) {
final List<ODHTNode> nodes = serverInstance.getDHTNodes();
final int nodesNumber = nodes.size();
final List<Future> tasks = new ArrayList<Future>(nodesNumber);
for (final ODHTNode node : nodes) {
tasks.add(distributedQueryExecutors.submit(new Runnable() {
@Override
public void run() {
try {
Object result = node.command(wrapped.getName(), iDistributedCommand, false);
if (result != null && !node.isLocal()) {
// generally we need thread local database for record deserializing, but not hear
// select resultset will be routed thought OHazelcastResultListener, so it will never reach this block
// other commands return primitive types so that thread local database instance is not required for deserializing
result = OCommandResultSerializationHelper.readFromStream((byte[]) result);
}
addResult(result);
} catch (IOException e) {
OLogManager.instance().error(this, "Error deserializing result from node " + node.getNodeId(), e);
}
}
}));
}
for (final Future task : tasks) {
try {
task.get();
} catch (Exception e) {
failedNodes.incrementAndGet();
// OLogManager.instance().error(this, "Query execution failed on one of the nodes", e);
}
}
return nodesNumber;
}
/**
* Determine way to handle returned result
*
* @param result
*/
protected abstract void addResult(Object result);
} | 0true
| distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_oldsharding_OAbstractDistributedQueryExecutor.java |
1,128 | public interface OSQLMethod extends Comparable<OSQLMethod> {
/**
* @return method name
*/
String getName();
/**
* Returns a convinient SQL String representation of the method.
* <p>
* Example :
*
* <pre>
* field.myMethod( param1, param2, [optionalParam3])
* </pre>
*
* This text will be used in exception messages.
*
* @return String , never null.
*/
public String getSyntax();
/**
* @return minimum number of arguments requiered by this method
*/
int getMinParams();
/**
* @return maximum number of arguments requiered by this method
*/
int getMaxParams();
/**
* Process a record.
*
* @param iCurrentRecord
* : current record
* @param iContext
* execution context
* @param ioResult
* : field value
* @param iMethodParams
* : function parameters, number is ensured to be within minParams and maxParams.
* @return evaluation result
*/
Object execute(final OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams)
throws ParseException;
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_method_OSQLMethod.java |
3,753 | static final class Fields {
static final XContentBuilderString MERGES = new XContentBuilderString("merges");
static final XContentBuilderString CURRENT = new XContentBuilderString("current");
static final XContentBuilderString CURRENT_DOCS = new XContentBuilderString("current_docs");
static final XContentBuilderString CURRENT_SIZE = new XContentBuilderString("current_size");
static final XContentBuilderString CURRENT_SIZE_IN_BYTES = new XContentBuilderString("current_size_in_bytes");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
static final XContentBuilderString TOTAL_DOCS = new XContentBuilderString("total_docs");
static final XContentBuilderString TOTAL_SIZE = new XContentBuilderString("total_size");
static final XContentBuilderString TOTAL_SIZE_IN_BYTES = new XContentBuilderString("total_size_in_bytes");
} | 0true
| src_main_java_org_elasticsearch_index_merge_MergeStats.java |
2,011 | private static class ValueSetterEntryProcessor extends AbstractEntryProcessor<String, String> {
private final String value;
ValueSetterEntryProcessor(String value) {
this.value = value;
}
public Object process(Map.Entry entry) {
entry.setValue(value);
return null;
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
3,705 | public class VersionFieldMapper extends AbstractFieldMapper<Long> implements InternalMapper, RootMapper {
public static final String NAME = "_version";
public static final String CONTENT_TYPE = "_version";
public static class Defaults {
public static final String NAME = VersionFieldMapper.NAME;
public static final float BOOST = 1.0f;
public static final FieldType FIELD_TYPE = NumericDocValuesField.TYPE;
}
public static class Builder extends Mapper.Builder<Builder, VersionFieldMapper> {
DocValuesFormatProvider docValuesFormat;
public Builder() {
super(Defaults.NAME);
}
@Override
public VersionFieldMapper build(BuilderContext context) {
return new VersionFieldMapper(docValuesFormat);
}
public Builder docValuesFormat(DocValuesFormatProvider docValuesFormat) {
this.docValuesFormat = docValuesFormat;
return this;
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder = version();
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals(DOC_VALUES_FORMAT)) {
String docValuesFormatName = fieldNode.toString();
builder.docValuesFormat(parserContext.docValuesFormatService().get(docValuesFormatName));
}
}
return builder;
}
}
private final ThreadLocal<Field> fieldCache = new ThreadLocal<Field>() {
@Override
protected Field initialValue() {
return new NumericDocValuesField(NAME, -1L);
}
};
public VersionFieldMapper() {
this(null);
}
VersionFieldMapper(DocValuesFormatProvider docValuesFormat) {
super(new Names(NAME, NAME, NAME, NAME), Defaults.BOOST, Defaults.FIELD_TYPE, null, null, null, null, docValuesFormat, null, null, null, ImmutableSettings.EMPTY);
}
@Override
protected String defaultDocValuesFormat() {
return "disk";
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
// see UidFieldMapper.parseCreateField
final Field version = fieldCache.get();
context.version(version);
fields.add(version);
}
@Override
public void parse(ParseContext context) throws IOException {
// _version added in preparse
}
@Override
public Long value(Object value) {
if (value == null || (value instanceof Long)) {
return (Long) value;
} else {
return Long.parseLong(value.toString());
}
}
@Override
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with version=0 so that Lucene doesn't write a Bitset for documents
// that don't have the field
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new NumericDocValuesField(NAME, 0L));
}
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public boolean includeInObject() {
return false;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("long");
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
if (!includeDefaults && (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (docValuesFormat != null) {
if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
}
} else {
String format = defaultDocValuesFormat();
if (format == null) {
format = DocValuesFormatService.DEFAULT_FORMAT;
}
builder.field(DOC_VALUES_FORMAT, format);
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
if (mergeContext.mergeFlags().simulate()) {
return;
}
AbstractFieldMapper<?> fieldMergeWith = (AbstractFieldMapper<?>) mergeWith;
if (fieldMergeWith.docValuesFormatProvider() != null) {
this.docValuesFormat = fieldMergeWith.docValuesFormatProvider();
}
}
@Override
public void close() {
fieldCache.remove();
}
@Override
public boolean hasDocValues() {
return true;
}
} | 1no label
| src_main_java_org_elasticsearch_index_mapper_internal_VersionFieldMapper.java |
70 | public interface OSharedContainer {
public boolean existsResource(final String iName);
public <T> T removeResource(final String iName);
public <T> T getResource(final String iName, final Callable<T> iCallback);
} | 0true
| commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedContainer.java |
3,793 | public class PercolateStats implements Streamable, ToXContent {
private long percolateCount;
private long percolateTimeInMillis;
private long current;
private long memorySizeInBytes;
private long numQueries;
/**
* Noop constructor for serialazation purposes.
*/
public PercolateStats() {
}
PercolateStats(long percolateCount, long percolateTimeInMillis, long current, long memorySizeInBytes, long numQueries) {
this.percolateCount = percolateCount;
this.percolateTimeInMillis = percolateTimeInMillis;
this.current = current;
this.memorySizeInBytes = memorySizeInBytes;
this.numQueries = numQueries;
}
/**
* @return The number of times the percolate api has been invoked.
*/
public long getCount() {
return percolateCount;
}
/**
* @return The total amount of time spend in the percolate api
*/
public long getTimeInMillis() {
return percolateTimeInMillis;
}
/**
* @return The total amount of time spend in the percolate api
*/
public TimeValue getTime() {
return new TimeValue(getTimeInMillis());
}
/**
* @return The total amount of active percolate api invocations.
*/
public long getCurrent() {
return current;
}
/**
* @return The total number of loaded percolate queries.
*/
public long getNumQueries() {
return numQueries;
}
/**
* @return The total size the loaded queries take in memory.
*/
public long getMemorySizeInBytes() {
return memorySizeInBytes;
}
/**
* @return The total size the loaded queries take in memory.
*/
public ByteSizeValue getMemorySize() {
return new ByteSizeValue(memorySizeInBytes);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.PERCOLATE);
builder.field(Fields.TOTAL, percolateCount);
builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, percolateTimeInMillis);
builder.field(Fields.CURRENT, current);
builder.field(Fields.MEMORY_SIZE_IN_BYTES, memorySizeInBytes);
builder.field(Fields.MEMORY_SIZE, getMemorySize());
builder.field(Fields.QUERIES, getNumQueries());
builder.endObject();
return builder;
}
public void add(PercolateStats percolate) {
if (percolate == null) {
return;
}
percolateCount += percolate.getCount();
percolateTimeInMillis += percolate.getTimeInMillis();
current += percolate.getCurrent();
memorySizeInBytes += percolate.getMemorySizeInBytes();
numQueries += percolate.getNumQueries();
}
static final class Fields {
static final XContentBuilderString PERCOLATE = new XContentBuilderString("percolate");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TIME = new XContentBuilderString("getTime");
static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
static final XContentBuilderString CURRENT = new XContentBuilderString("current");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString QUERIES = new XContentBuilderString("queries");
}
public static PercolateStats readPercolateStats(StreamInput in) throws IOException {
PercolateStats stats = new PercolateStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
percolateCount = in.readVLong();
percolateTimeInMillis = in.readVLong();
current = in.readVLong();
memorySizeInBytes = in.readVLong();
numQueries = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(percolateCount);
out.writeVLong(percolateTimeInMillis);
out.writeVLong(current);
out.writeVLong(memorySizeInBytes);
out.writeVLong(numQueries);
}
} | 1no label
| src_main_java_org_elasticsearch_index_percolator_stats_PercolateStats.java |
109 | (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}}); | 0true
| src_main_java_jsr166e_CountedCompleter.java |
170 | {
@Override
public boolean matchesSafely( Iterable<LogEntry> item )
{
Iterator<LogEntry> actualEntries = item.iterator();
for ( Matcher<? extends LogEntry> matcher : matchers )
{
if ( actualEntries.hasNext() )
{
LogEntry next = actualEntries.next();
if ( !matcher.matches( next ) )
{
// Wrong!
return false;
}
}
else
{
// Too few actual entries!
return false;
}
}
if ( actualEntries.hasNext() )
{
// Too many actual entries!
return false;
}
// All good in the hood :)
return true;
}
@Override
public void describeTo( Description description )
{
for ( Matcher<? extends LogEntry> matcher : matchers )
{
description.appendDescriptionOf( matcher ).appendText( ",\n" );
}
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogMatchers.java |
2,523 | public abstract class AbstractXContentParser implements XContentParser {
//Currently this is not a setting that can be changed and is a policy
// that relates to how parsing of things like "boost" are done across
// the whole of Elasticsearch (eg if String "1.0" is a valid float).
// The idea behind keeping it as a constant is that we can track
// references to this policy decision throughout the codebase and find
// and change any code that needs to apply an alternative policy.
public static final boolean DEFAULT_NUMBER_COEERCE_POLICY = true;
private static void checkCoerceString(boolean coeerce, Class<? extends Number> clazz) {
if (!coeerce) {
//Need to throw type IllegalArgumentException as current catch logic in
//NumberFieldMapper.parseCreateField relies on this for "malformed" value detection
throw new IllegalArgumentException(clazz.getSimpleName() + " value passed as String");
}
}
// The 3rd party parsers we rely on are known to silently truncate fractions: see
// http://fasterxml.github.io/jackson-core/javadoc/2.3.0/com/fasterxml/jackson/core/JsonParser.html#getShortValue()
// If this behaviour is flagged as undesirable and any truncation occurs
// then this method is called to trigger the"malformed" handling logic
void ensureNumberConversion(boolean coerce, long result, Class<? extends Number> clazz) throws IOException {
if (!coerce) {
double fullVal = doDoubleValue();
if (result != fullVal) {
// Need to throw type IllegalArgumentException as current catch
// logic in NumberFieldMapper.parseCreateField relies on this
// for "malformed" value detection
throw new IllegalArgumentException(fullVal + " cannot be converted to " + clazz.getSimpleName() + " without data loss");
}
}
}
@Override
public boolean isBooleanValue() throws IOException {
switch (currentToken()) {
case VALUE_BOOLEAN:
return true;
case VALUE_NUMBER:
NumberType numberType = numberType();
return numberType == NumberType.LONG || numberType == NumberType.INT;
case VALUE_STRING:
return Booleans.isBoolean(textCharacters(), textOffset(), textLength());
default:
return false;
}
}
@Override
public boolean booleanValue() throws IOException {
Token token = currentToken();
if (token == Token.VALUE_NUMBER) {
return intValue() != 0;
} else if (token == Token.VALUE_STRING) {
return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */);
}
return doBooleanValue();
}
protected abstract boolean doBooleanValue() throws IOException;
@Override
public short shortValue() throws IOException {
return shortValue(DEFAULT_NUMBER_COEERCE_POLICY);
}
@Override
public short shortValue(boolean coerce) throws IOException {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Short.class);
return Short.parseShort(text());
}
short result = doShortValue();
ensureNumberConversion(coerce, result, Short.class);
return result;
}
protected abstract short doShortValue() throws IOException;
@Override
public int intValue() throws IOException {
return intValue(DEFAULT_NUMBER_COEERCE_POLICY);
}
@Override
public int intValue(boolean coerce) throws IOException {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Integer.class);
return Integer.parseInt(text());
}
int result = doIntValue();
ensureNumberConversion(coerce, result, Integer.class);
return result;
}
protected abstract int doIntValue() throws IOException;
@Override
public long longValue() throws IOException {
return longValue(DEFAULT_NUMBER_COEERCE_POLICY);
}
@Override
public long longValue(boolean coerce) throws IOException {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Long.class);
return Long.parseLong(text());
}
long result = doLongValue();
ensureNumberConversion(coerce, result, Long.class);
return result;
}
protected abstract long doLongValue() throws IOException;
@Override
public float floatValue() throws IOException {
return floatValue(DEFAULT_NUMBER_COEERCE_POLICY);
}
@Override
public float floatValue(boolean coerce) throws IOException {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Float.class);
return Float.parseFloat(text());
}
return doFloatValue();
}
protected abstract float doFloatValue() throws IOException;
@Override
public double doubleValue() throws IOException {
return doubleValue(DEFAULT_NUMBER_COEERCE_POLICY);
}
@Override
public double doubleValue(boolean coerce) throws IOException {
Token token = currentToken();
if (token == Token.VALUE_STRING) {
checkCoerceString(coerce, Double.class);
return Double.parseDouble(text());
}
return doDoubleValue();
}
protected abstract double doDoubleValue() throws IOException;
@Override
public String textOrNull() throws IOException {
if (currentToken() == Token.VALUE_NULL) {
return null;
}
return text();
}
@Override
public BytesRef bytesOrNull() throws IOException {
if (currentToken() == Token.VALUE_NULL) {
return null;
}
return bytes();
}
@Override
public Map<String, Object> map() throws IOException {
return readMap(this);
}
@Override
public Map<String, Object> mapOrdered() throws IOException {
return readOrderedMap(this);
}
@Override
public Map<String, Object> mapAndClose() throws IOException {
try {
return map();
} finally {
close();
}
}
@Override
public Map<String, Object> mapOrderedAndClose() throws IOException {
try {
return mapOrdered();
} finally {
close();
}
}
static interface MapFactory {
Map<String, Object> newMap();
}
static final MapFactory SIMPLE_MAP_FACTORY = new MapFactory() {
@Override
public Map<String, Object> newMap() {
return new HashMap<String, Object>();
}
};
static final MapFactory ORDERED_MAP_FACTORY = new MapFactory() {
@Override
public Map<String, Object> newMap() {
return new LinkedHashMap<String, Object>();
}
};
static Map<String, Object> readMap(XContentParser parser) throws IOException {
return readMap(parser, SIMPLE_MAP_FACTORY);
}
static Map<String, Object> readOrderedMap(XContentParser parser) throws IOException {
return readMap(parser, ORDERED_MAP_FACTORY);
}
static Map<String, Object> readMap(XContentParser parser, MapFactory mapFactory) throws IOException {
Map<String, Object> map = mapFactory.newMap();
XContentParser.Token t = parser.currentToken();
if (t == null) {
t = parser.nextToken();
}
if (t == XContentParser.Token.START_OBJECT) {
t = parser.nextToken();
}
for (; t == XContentParser.Token.FIELD_NAME; t = parser.nextToken()) {
// Must point to field name
String fieldName = parser.currentName();
// And then the value...
t = parser.nextToken();
Object value = readValue(parser, mapFactory, t);
map.put(fieldName, value);
}
return map;
}
private static List<Object> readList(XContentParser parser, MapFactory mapFactory, XContentParser.Token t) throws IOException {
ArrayList<Object> list = new ArrayList<Object>();
while ((t = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
list.add(readValue(parser, mapFactory, t));
}
return list;
}
private static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token t) throws IOException {
if (t == XContentParser.Token.VALUE_NULL) {
return null;
} else if (t == XContentParser.Token.VALUE_STRING) {
return parser.text();
} else if (t == XContentParser.Token.VALUE_NUMBER) {
XContentParser.NumberType numberType = parser.numberType();
if (numberType == XContentParser.NumberType.INT) {
return parser.intValue();
} else if (numberType == XContentParser.NumberType.LONG) {
return parser.longValue();
} else if (numberType == XContentParser.NumberType.FLOAT) {
return parser.floatValue();
} else if (numberType == XContentParser.NumberType.DOUBLE) {
return parser.doubleValue();
}
} else if (t == XContentParser.Token.VALUE_BOOLEAN) {
return parser.booleanValue();
} else if (t == XContentParser.Token.START_OBJECT) {
return readMap(parser, mapFactory);
} else if (t == XContentParser.Token.START_ARRAY) {
return readList(parser, mapFactory, t);
} else if (t == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
return parser.binaryValue();
}
return null;
}
} | 0true
| src_main_java_org_elasticsearch_common_xcontent_support_AbstractXContentParser.java |
5,402 | public abstract class FieldDataSource {
public static class MetaData {
public static final MetaData UNKNOWN = new MetaData();
public enum Uniqueness {
UNIQUE,
NOT_UNIQUE,
UNKNOWN;
public boolean unique() {
return this == UNIQUE;
}
}
private long maxAtomicUniqueValuesCount = -1;
private boolean multiValued = true;
private Uniqueness uniqueness = Uniqueness.UNKNOWN;
private MetaData() {}
private MetaData(MetaData other) {
this.maxAtomicUniqueValuesCount = other.maxAtomicUniqueValuesCount;
this.multiValued = other.multiValued;
this.uniqueness = other.uniqueness;
}
private MetaData(long maxAtomicUniqueValuesCount, boolean multiValued, Uniqueness uniqueness) {
this.maxAtomicUniqueValuesCount = maxAtomicUniqueValuesCount;
this.multiValued = multiValued;
this.uniqueness = uniqueness;
}
public long maxAtomicUniqueValuesCount() {
return maxAtomicUniqueValuesCount;
}
public boolean multiValued() {
return multiValued;
}
public Uniqueness uniqueness() {
return uniqueness;
}
public static MetaData load(IndexFieldData indexFieldData, SearchContext context) {
MetaData metaData = new MetaData();
metaData.uniqueness = Uniqueness.UNIQUE;
for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
AtomicFieldData fieldData = indexFieldData.load(readerContext);
metaData.multiValued |= fieldData.isMultiValued();
metaData.maxAtomicUniqueValuesCount = Math.max(metaData.maxAtomicUniqueValuesCount, fieldData.getNumberUniqueValues());
}
return metaData;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(MetaData other) {
return new Builder(other);
}
public static class Builder {
private final MetaData metaData;
private Builder() {
metaData = new MetaData();
}
private Builder(MetaData metaData) {
this.metaData = new MetaData(metaData);
}
public Builder maxAtomicUniqueValuesCount(long maxAtomicUniqueValuesCount) {
metaData.maxAtomicUniqueValuesCount = maxAtomicUniqueValuesCount;
return this;
}
public Builder multiValued(boolean multiValued) {
metaData.multiValued = multiValued;
return this;
}
public Builder uniqueness(Uniqueness uniqueness) {
metaData.uniqueness = uniqueness;
return this;
}
public MetaData build() {
return metaData;
}
}
}
/**
* Get the current {@link BytesValues}.
*/
public abstract BytesValues bytesValues();
/**
* Ask the underlying data source to provide pre-computed hashes, optional operation.
*/
public void setNeedsHashes(boolean needsHashes) {}
public abstract MetaData metaData();
public static abstract class Bytes extends FieldDataSource {
public static abstract class WithOrdinals extends Bytes {
public abstract BytesValues.WithOrdinals bytesValues();
public static class FieldData extends WithOrdinals implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexFieldData.WithOrdinals<?> indexFieldData;
protected final MetaData metaData;
protected AtomicFieldData.WithOrdinals<?> atomicFieldData;
private BytesValues.WithOrdinals bytesValues;
public FieldData(IndexFieldData.WithOrdinals<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
}
@Override
public BytesValues.WithOrdinals bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
}
}
public static class FieldData extends Bytes implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexFieldData<?> indexFieldData;
protected final MetaData metaData;
protected AtomicFieldData<?> atomicFieldData;
private BytesValues bytesValues;
public FieldData(IndexFieldData<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
}
public static class Script extends Bytes {
private final ScriptBytesValues values;
public Script(SearchScript script) {
values = new ScriptBytesValues(script);
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
return values;
}
}
public static class SortedAndUnique extends Bytes implements ReaderContextAware {
private final FieldDataSource delegate;
private final MetaData metaData;
private BytesValues bytesValues;
public SortedAndUnique(FieldDataSource delegate) {
this.delegate = delegate;
this.metaData = MetaData.builder(delegate.metaData()).uniqueness(MetaData.Uniqueness.UNIQUE).build();
}
@Override
public MetaData metaData() {
return metaData;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
bytesValues = null; // order may change per-segment -> reset
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = delegate.bytesValues();
if (bytesValues.isMultiValued() &&
(!delegate.metaData().uniqueness.unique() || bytesValues.getOrder() != Order.BYTES)) {
bytesValues = new SortedUniqueBytesValues(bytesValues);
}
}
return bytesValues;
}
static class SortedUniqueBytesValues extends FilterBytesValues {
final BytesRef spare;
int[] sortedIds;
final BytesRefHash bytes;
int numUniqueValues;
int pos = Integer.MAX_VALUE;
public SortedUniqueBytesValues(BytesValues delegate) {
super(delegate);
bytes = new BytesRefHash();
spare = new BytesRef();
}
@Override
public int setDocument(int docId) {
final int numValues = super.setDocument(docId);
if (numValues == 0) {
sortedIds = null;
return 0;
}
bytes.clear();
bytes.reinit();
for (int i = 0; i < numValues; ++i) {
bytes.add(super.nextValue(), super.currentValueHash());
}
numUniqueValues = bytes.size();
sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
pos = 0;
return numUniqueValues;
}
@Override
public BytesRef nextValue() {
bytes.get(sortedIds[pos++], spare);
return spare;
}
@Override
public int currentValueHash() {
return spare.hashCode();
}
@Override
public Order getOrder() {
return Order.BYTES;
}
}
}
}
public static abstract class Numeric extends FieldDataSource {
/** Whether the underlying data is floating-point or not. */
public abstract boolean isFloatingPoint();
/** Get the current {@link LongValues}. */
public abstract LongValues longValues();
/** Get the current {@link DoubleValues}. */
public abstract DoubleValues doubleValues();
public static class WithScript extends Numeric {
private final LongValues longValues;
private final DoubleValues doubleValues;
private final FieldDataSource.WithScript.BytesValues bytesValues;
public WithScript(Numeric delegate, SearchScript script) {
this.longValues = new LongValues(delegate, script);
this.doubleValues = new DoubleValues(delegate, script);
this.bytesValues = new FieldDataSource.WithScript.BytesValues(delegate, script);
}
@Override
public boolean isFloatingPoint() {
return true; // even if the underlying source produces longs, scripts can change them to doubles
}
@Override
public BytesValues bytesValues() {
return bytesValues;
}
@Override
public LongValues longValues() {
return longValues;
}
@Override
public DoubleValues doubleValues() {
return doubleValues;
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
private final Numeric source;
private final SearchScript script;
public LongValues(Numeric source, SearchScript script) {
super(true);
this.source = source;
this.script = script;
}
@Override
public int setDocument(int docId) {
return source.longValues().setDocument(docId);
}
@Override
public long nextValue() {
script.setNextVar("_value", source.longValues().nextValue());
return script.runAsLong();
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
private final Numeric source;
private final SearchScript script;
public DoubleValues(Numeric source, SearchScript script) {
super(true);
this.source = source;
this.script = script;
}
@Override
public int setDocument(int docId) {
return source.doubleValues().setDocument(docId);
}
@Override
public double nextValue() {
script.setNextVar("_value", source.doubleValues().nextValue());
return script.runAsDouble();
}
}
}
public static class FieldData extends Numeric implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexNumericFieldData<?> indexFieldData;
protected final MetaData metaData;
protected AtomicNumericFieldData atomicFieldData;
private BytesValues bytesValues;
private LongValues longValues;
private DoubleValues doubleValues;
public FieldData(IndexNumericFieldData<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
@Override
public boolean isFloatingPoint() {
return indexFieldData.getNumericType().isFloatingPoint();
}
@Override
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
if (longValues != null) {
longValues = atomicFieldData.getLongValues();
}
if (doubleValues != null) {
doubleValues = atomicFieldData.getDoubleValues();
}
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
@Override
public org.elasticsearch.index.fielddata.LongValues longValues() {
if (longValues == null) {
longValues = atomicFieldData.getLongValues();
}
assert longValues.getOrder() == Order.NUMERIC;
return longValues;
}
@Override
public org.elasticsearch.index.fielddata.DoubleValues doubleValues() {
if (doubleValues == null) {
doubleValues = atomicFieldData.getDoubleValues();
}
assert doubleValues.getOrder() == Order.NUMERIC;
return doubleValues;
}
}
public static class Script extends Numeric {
private final ScriptValueType scriptValueType;
private final ScriptDoubleValues doubleValues;
private final ScriptLongValues longValues;
private final ScriptBytesValues bytesValues;
public Script(SearchScript script, ScriptValueType scriptValueType) {
this.scriptValueType = scriptValueType;
longValues = new ScriptLongValues(script);
doubleValues = new ScriptDoubleValues(script);
bytesValues = new ScriptBytesValues(script);
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
@Override
public boolean isFloatingPoint() {
return scriptValueType != null ? scriptValueType.isFloatingPoint() : true;
}
@Override
public LongValues longValues() {
return longValues;
}
@Override
public DoubleValues doubleValues() {
return doubleValues;
}
@Override
public BytesValues bytesValues() {
return bytesValues;
}
}
public static class SortedAndUnique extends Numeric implements ReaderContextAware {
private final Numeric delegate;
private final MetaData metaData;
private LongValues longValues;
private DoubleValues doubleValues;
private BytesValues bytesValues;
public SortedAndUnique(Numeric delegate) {
this.delegate = delegate;
this.metaData = MetaData.builder(delegate.metaData()).uniqueness(MetaData.Uniqueness.UNIQUE).build();
}
@Override
public MetaData metaData() {
return metaData;
}
@Override
public boolean isFloatingPoint() {
return delegate.isFloatingPoint();
}
@Override
public void setNextReader(AtomicReaderContext reader) {
longValues = null; // order may change per-segment -> reset
doubleValues = null;
bytesValues = null;
}
@Override
public org.elasticsearch.index.fielddata.LongValues longValues() {
if (longValues == null) {
longValues = delegate.longValues();
if (longValues.isMultiValued() &&
(!delegate.metaData().uniqueness.unique() || longValues.getOrder() != Order.NUMERIC)) {
longValues = new SortedUniqueLongValues(longValues);
}
}
return longValues;
}
@Override
public org.elasticsearch.index.fielddata.DoubleValues doubleValues() {
if (doubleValues == null) {
doubleValues = delegate.doubleValues();
if (doubleValues.isMultiValued() &&
(!delegate.metaData().uniqueness.unique() || doubleValues.getOrder() != Order.NUMERIC)) {
doubleValues = new SortedUniqueDoubleValues(doubleValues);
}
}
return doubleValues;
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = delegate.bytesValues();
if (bytesValues.isMultiValued() &&
(!delegate.metaData().uniqueness.unique() || bytesValues.getOrder() != Order.BYTES)) {
bytesValues = new SortedUniqueBytesValues(bytesValues);
}
}
return bytesValues;
}
private static class SortedUniqueLongValues extends FilterLongValues {
int numUniqueValues;
long[] array = new long[2];
int pos = Integer.MAX_VALUE;
final InPlaceMergeSorter sorter = new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final long tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
@Override
protected int compare(int i, int j) {
final long l1 = array[i];
final long l2 = array[j];
return Longs.compare(l1, l2);
}
};
protected SortedUniqueLongValues(LongValues delegate) {
super(delegate);
}
@Override
public int setDocument(int docId) {
final int numValues = super.setDocument(docId);
array = ArrayUtil.grow(array, numValues);
for (int i = 0; i < numValues; ++i) {
array[i] = super.nextValue();
}
pos = 0;
return numUniqueValues = CollectionUtils.sortAndDedup(array, numValues);
}
@Override
public long nextValue() {
assert pos < numUniqueValues;
return array[pos++];
}
@Override
public Order getOrder() {
return Order.NUMERIC;
}
}
private static class SortedUniqueDoubleValues extends FilterDoubleValues {
int numUniqueValues;
double[] array = new double[2];
int pos = Integer.MAX_VALUE;
final InPlaceMergeSorter sorter = new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final double tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
@Override
protected int compare(int i, int j) {
return Double.compare(array[i], array[j]);
}
};
SortedUniqueDoubleValues(DoubleValues delegate) {
super(delegate);
}
@Override
public int setDocument(int docId) {
final int numValues = super.setDocument(docId);
array = ArrayUtil.grow(array, numValues);
for (int i = 0; i < numValues; ++i) {
array[i] = super.nextValue();
}
pos = 0;
return numUniqueValues = CollectionUtils.sortAndDedup(array, numValues);
}
@Override
public double nextValue() {
assert pos < numUniqueValues;
return array[pos++];
}
@Override
public Order getOrder() {
return Order.NUMERIC;
}
}
}
}
// No need to implement ReaderContextAware here, the delegate already takes care of updating data structures
public static class WithScript extends Bytes {
private final BytesValues bytesValues;
public WithScript(FieldDataSource delegate, SearchScript script) {
this.bytesValues = new BytesValues(delegate, script);
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
@Override
public BytesValues bytesValues() {
return bytesValues;
}
static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues {
private final FieldDataSource source;
private final SearchScript script;
private final BytesRef scratch;
public BytesValues(FieldDataSource source, SearchScript script) {
super(true);
this.source = source;
this.script = script;
scratch = new BytesRef();
}
@Override
public int setDocument(int docId) {
return source.bytesValues().setDocument(docId);
}
@Override
public BytesRef nextValue() {
BytesRef value = source.bytesValues().nextValue();
script.setNextVar("_value", value.utf8ToString());
scratch.copyChars(script.run().toString());
return scratch;
}
}
}
public static class GeoPoint extends FieldDataSource implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexGeoPointFieldData<?> indexFieldData;
private final MetaData metaData;
protected AtomicGeoPointFieldData<?> atomicFieldData;
private BytesValues bytesValues;
private GeoPointValues geoPointValues;
public GeoPoint(IndexGeoPointFieldData<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
@Override
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
if (geoPointValues != null) {
geoPointValues = atomicFieldData.getGeoPointValues();
}
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
public org.elasticsearch.index.fielddata.GeoPointValues geoPointValues() {
if (geoPointValues == null) {
geoPointValues = atomicFieldData.getGeoPointValues();
}
return geoPointValues;
}
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_support_FieldDataSource.java |
1,401 | public interface AccessDelegate<T extends HazelcastRegion> {
/**
* Get the wrapped cache region
*
* @return The underlying region
*/
T getHazelcastRegion();
/**
* Attempt to retrieve an object from the cache. Mainly used in attempting
* to resolve entities/collections from the second level cache.
*
* @param key The key of the item to be retrieved.
* @param txTimestamp a timestamp prior to the transaction start time
* @return the cached object or <tt>null</tt>
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
Object get(Object key, long txTimestamp) throws CacheException;
/**
* Called after an item has been inserted (before the transaction completes),
* instead of calling evict().
* This method is used by "synchronous" concurrency strategies.
*
* @param key The item key
* @param value The item
* @param version The item's version value
* @return Were the contents of the cache actual changed by this operation?
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean insert(Object key, Object value, Object version) throws CacheException;
/**
* Called after an item has been inserted (after the transaction completes),
* instead of calling release().
* This method is used by "asynchronous" concurrency strategies.
*
* @param key The item key
* @param value The item
* @param version The item's version value
* @return Were the contents of the cache actual changed by this operation?
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean afterInsert(Object key, Object value, Object version) throws CacheException;
/**
* Called after an item has been updated (before the transaction completes),
* instead of calling evict(). This method is used by "synchronous" concurrency
* strategies.
*
* @param key The item key
* @param value The item
* @param currentVersion The item's current version value
* @param previousVersion The item's previous version value
* @return Were the contents of the cache actual changed by this operation?
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean update(Object key, Object value, Object currentVersion, Object previousVersion) throws CacheException;
/**
* Called after an item has been updated (after the transaction completes),
* instead of calling release(). This method is used by "asynchronous"
* concurrency strategies.
*
* @param key The item key
* @param value The item
* @param currentVersion The item's current version value
* @param previousVersion The item's previous version value
* @param lock The lock previously obtained from {@link #lockItem}
* @return Were the contents of the cache actual changed by this operation?
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean afterUpdate(Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock)
throws CacheException;
/**
* Attempt to cache an object, after loading from the database.
*
* @param key The item key
* @param value The item
* @param txTimestamp a timestamp prior to the transaction start time
* @param version the item version number
* @return <tt>true</tt> if the object was successfully cached
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean putFromLoad(Object key, Object value, long txTimestamp, Object version) throws CacheException;
/**
* Attempt to cache an object, after loading from the database, explicitly
* specifying the minimalPut behavior.
*
* @param key The item key
* @param value The item
* @param txTimestamp a timestamp prior to the transaction start time
* @param version the item version number
* @param minimalPutOverride Explicit minimalPut flag
* @return <tt>true</tt> if the object was successfully cached
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
boolean putFromLoad(Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
throws CacheException;
/**
* Called after an item has become stale (before the transaction completes).
* This method is used by "synchronous" concurrency strategies.
*
* @param key The key of the item to remove
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void remove(Object key) throws CacheException;
/**
* Called to evict data from the entire region
*
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void removeAll() throws CacheException;
/**
* Forcibly evict an item from the cache immediately without regard for transaction
* isolation.
*
* @param key The key of the item to remove
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void evict(Object key) throws CacheException;
/**
* Forcibly evict all items from the cache immediately without regard for transaction
* isolation.
*
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void evictAll() throws CacheException;
/**
* We are going to attempt to update/delete the keyed object. This
* method is used by "asynchronous" concurrency strategies.
* <p/>
* The returned object must be passed back to release(), to release the
* lock. Concurrency strategies which do not support client-visible
* locks may silently return null.
*
* @param key The key of the item to lock
* @param version The item's current version value
* @return A representation of our lock on the item; or null.
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
SoftLock lockItem(Object key, Object version) throws CacheException;
/**
* Lock the entire region
*
* @return A representation of our lock on the item; or null.
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
SoftLock lockRegion() throws CacheException;
/**
* Called when we have finished the attempted update/delete (which may or
* may not have been successful), after transaction completion. This method
* is used by "asynchronous" concurrency strategies.
*
* @param key The item key
* @param lock The lock previously obtained from {@link #lockItem}
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void unlockItem(Object key, SoftLock lock) throws CacheException;
/**
* Called after we have finished the attempted invalidation of the entire
* region
*
* @param lock The lock previously obtained from {@link #lockRegion}
* @throws org.hibernate.cache.CacheException
* Propogated from underlying {@link org.hibernate.cache.Region}
*/
void unlockRegion(SoftLock lock) throws CacheException;
} | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_access_AccessDelegate.java |
1,742 | map.addEntryListener(new EntryListener<Integer, Integer>() {
@Override
public void entryAdded(EntryEvent<Integer, Integer> event) {
addCount.incrementAndGet();
latch.countDown();
}
@Override
public void entryRemoved(EntryEvent<Integer, Integer> event) {
removeCount.incrementAndGet();
latch.countDown();
}
@Override
public void entryUpdated(EntryEvent<Integer, Integer> event) {
updateCount.incrementAndGet();
latch.countDown();
}
@Override
public void entryEvicted(EntryEvent<Integer, Integer> event) {
}
}, true); | 0true
| hazelcast_src_test_java_com_hazelcast_map_EntryProcessorTest.java |
2,210 | return new DocIdSet() {
@Override
public DocIdSetIterator iterator() {
return null;
}
@Override
public boolean isCacheable() {
return true;
}
}; | 0true
| src_test_java_org_elasticsearch_common_lucene_search_XBooleanFilterLuceneTests.java |
3,457 | public class GetField implements Streamable, Iterable<Object> {
private String name;
private List<Object> values;
private GetField() {
}
public GetField(String name, List<Object> values) {
this.name = name;
this.values = values;
}
public String getName() {
return name;
}
public Object getValue() {
if (values != null && !values.isEmpty()) {
return values.get(0);
}
return null;
}
public List<Object> getValues() {
return values;
}
public boolean isMetadataField() {
return MapperService.isMetadataField(name);
}
@Override
public Iterator<Object> iterator() {
return values.iterator();
}
public static GetField readGetField(StreamInput in) throws IOException {
GetField result = new GetField();
result.readFrom(in);
return result;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
int size = in.readVInt();
values = new ArrayList<Object>(size);
for (int i = 0; i < size; i++) {
values.add(in.readGenericValue());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVInt(values.size());
for (Object obj : values) {
out.writeGenericValue(obj);
}
}
} | 0true
| src_main_java_org_elasticsearch_index_get_GetField.java |
569 | public class OpenIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<OpenIndexClusterStateUpdateRequest> {
OpenIndexClusterStateUpdateRequest() {
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_open_OpenIndexClusterStateUpdateRequest.java |
3,114 | public class RefreshFailedEngineException extends EngineException {
public RefreshFailedEngineException(ShardId shardId, Throwable t) {
super(shardId, "Refresh failed", t);
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_RefreshFailedEngineException.java |
3,006 | public class ShardIdCache extends AbstractIndexShardComponent {
final CounterMetric totalMetric = new CounterMetric();
@Inject
public ShardIdCache(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}
public IdCacheStats stats() {
return new IdCacheStats(totalMetric.count());
}
public void onCached(long sizeInBytes) {
totalMetric.inc(sizeInBytes);
}
public void onRemoval(long sizeInBytes) {
totalMetric.dec(sizeInBytes);
}
} | 0true
| src_main_java_org_elasticsearch_index_cache_id_ShardIdCache.java |
59 | {
@Override
public void bytesWritten( long numberOfBytes )
{
bytesWritten.addAndGet( numberOfBytes );
}
@Override
public void bytesRead( long numberOfBytes )
{
}
}, TxLog.class.getName() ); | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTxLogMonitoring.java |
937 | Thread t = new Thread(new Runnable() {
public void run() {
try {
lock.tryLock(60, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
latch.countDown();
}
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_lock_LockTest.java |
1,376 | public abstract class CustomerEndpoint extends BaseEndpoint {
@Resource(name="blCustomerService")
protected CustomerService customerService;
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_endpoint_customer_CustomerEndpoint.java |
164 | (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}}); | 0true
| src_main_java_jsr166y_CountedCompleter.java |
1,234 | public interface AdminClient {
/**
* A client allowing to perform actions/operations against the cluster.
*/
ClusterAdminClient cluster();
/**
* A client allowing to perform actions/operations against the indices.
*/
IndicesAdminClient indices();
} | 0true
| src_main_java_org_elasticsearch_client_AdminClient.java |
3,610 | public class StringFieldMapper extends AbstractFieldMapper<String> implements AllFieldMapper.IncludeInAll {
public static final String CONTENT_TYPE = "string";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.freeze();
}
// NOTE, when adding defaults here, make sure you add them in the builder
public static final String NULL_VALUE = null;
public static final int POSITION_OFFSET_GAP = 0;
public static final int IGNORE_ABOVE = -1;
}
public static class Builder extends AbstractFieldMapper.Builder<Builder, StringFieldMapper> {
protected String nullValue = Defaults.NULL_VALUE;
protected int positionOffsetGap = Defaults.POSITION_OFFSET_GAP;
protected NamedAnalyzer searchQuotedAnalyzer;
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder nullValue(String nullValue) {
this.nullValue = nullValue;
return this;
}
@Override
public Builder searchAnalyzer(NamedAnalyzer searchAnalyzer) {
super.searchAnalyzer(searchAnalyzer);
if (searchQuotedAnalyzer == null) {
searchQuotedAnalyzer = searchAnalyzer;
}
return this;
}
public Builder positionOffsetGap(int positionOffsetGap) {
this.positionOffsetGap = positionOffsetGap;
return this;
}
public Builder searchQuotedAnalyzer(NamedAnalyzer analyzer) {
this.searchQuotedAnalyzer = analyzer;
return builder;
}
public Builder ignoreAbove(int ignoreAbove) {
this.ignoreAbove = ignoreAbove;
return this;
}
@Override
public StringFieldMapper build(BuilderContext context) {
if (positionOffsetGap > 0) {
indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionOffsetGap);
searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionOffsetGap);
searchQuotedAnalyzer = new NamedAnalyzer(searchQuotedAnalyzer, positionOffsetGap);
}
// if the field is not analyzed, then by default, we should omit norms and have docs only
// index options, as probably what the user really wants
// if they are set explicitly, we will use those values
// we also change the values on the default field type so that toXContent emits what
// differs from the defaults
FieldType defaultFieldType = new FieldType(Defaults.FIELD_TYPE);
if (fieldType.indexed() && !fieldType.tokenized()) {
defaultFieldType.setOmitNorms(true);
defaultFieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
if (!omitNormsSet && boost == Defaults.BOOST) {
fieldType.setOmitNorms(true);
}
if (!indexOptionsSet) {
fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
}
}
defaultFieldType.freeze();
StringFieldMapper fieldMapper = new StringFieldMapper(buildNames(context),
boost, fieldType, defaultFieldType, docValues, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,
positionOffsetGap, ignoreAbove, postingsProvider, docValuesProvider, similarity, normsLoading,
fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll);
return fieldMapper;
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
StringFieldMapper.Builder builder = stringField(name);
parseField(builder, name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String propName = Strings.toUnderscoreCase(entry.getKey());
Object propNode = entry.getValue();
if (propName.equals("null_value")) {
builder.nullValue(propNode.toString());
} else if (propName.equals("search_quote_analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
builder.searchQuotedAnalyzer(analyzer);
} else if (propName.equals("position_offset_gap")) {
builder.positionOffsetGap(XContentMapValues.nodeIntegerValue(propNode, -1));
// we need to update to actual analyzers if they are not set in this case...
// so we can inject the position offset gap...
if (builder.indexAnalyzer == null) {
builder.indexAnalyzer = parserContext.analysisService().defaultIndexAnalyzer();
}
if (builder.searchAnalyzer == null) {
builder.searchAnalyzer = parserContext.analysisService().defaultSearchAnalyzer();
}
if (builder.searchQuotedAnalyzer == null) {
builder.searchQuotedAnalyzer = parserContext.analysisService().defaultSearchQuoteAnalyzer();
}
} else if (propName.equals("ignore_above")) {
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
} else {
parseMultiField(builder, name, node, parserContext, propName, propNode);
}
}
return builder;
}
}
private String nullValue;
private Boolean includeInAll;
private int positionOffsetGap;
private NamedAnalyzer searchQuotedAnalyzer;
private int ignoreAbove;
private final FieldType defaultFieldType;
protected StringFieldMapper(Names names, float boost, FieldType fieldType,FieldType defaultFieldType, Boolean docValues,
String nullValue, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,
NamedAnalyzer searchQuotedAnalyzer, int positionOffsetGap, int ignoreAbove,
PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,
SimilarityProvider similarity, Loading normsLoading, @Nullable Settings fieldDataSettings,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat,
similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
if (fieldType.tokenized() && fieldType.indexed() && hasDocValues()) {
throw new MapperParsingException("Field [" + names.fullName() + "] cannot be analyzed and have doc values");
}
this.defaultFieldType = defaultFieldType;
this.nullValue = nullValue;
this.positionOffsetGap = positionOffsetGap;
this.searchQuotedAnalyzer = searchQuotedAnalyzer != null ? searchQuotedAnalyzer : this.searchAnalyzer;
this.ignoreAbove = ignoreAbove;
}
@Override
public FieldType defaultFieldType() {
return defaultFieldType;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("string");
}
@Override
public void includeInAll(Boolean includeInAll) {
if (includeInAll != null) {
this.includeInAll = includeInAll;
}
}
@Override
public void includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) {
this.includeInAll = includeInAll;
}
}
@Override
public void unsetIncludeInAll() {
includeInAll = null;
}
@Override
public String value(Object value) {
if (value == null) {
return null;
}
return value.toString();
}
@Override
protected boolean customBoost() {
return true;
}
public int getPositionOffsetGap() {
return this.positionOffsetGap;
}
@Override
public Analyzer searchQuoteAnalyzer() {
return this.searchQuotedAnalyzer;
}
@Override
public Filter nullValueFilter() {
if (nullValue == null) {
return null;
}
return termFilter(nullValue, null);
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
ValueAndBoost valueAndBoost = parseCreateFieldForString(context, nullValue, boost);
if (valueAndBoost.value() == null) {
return;
}
if (ignoreAbove > 0 && valueAndBoost.value().length() > ignoreAbove) {
return;
}
if (context.includeInAll(includeInAll, this)) {
context.allEntries().addText(names.fullName(), valueAndBoost.value(), valueAndBoost.boost());
}
if (fieldType.indexed() || fieldType.stored()) {
Field field = new StringField(names.indexName(), valueAndBoost.value(), fieldType);
field.setBoost(valueAndBoost.boost());
fields.add(field);
}
if (hasDocValues()) {
fields.add(new SortedSetDocValuesField(names.indexName(), new BytesRef(valueAndBoost.value())));
}
if (fields.isEmpty()) {
context.ignoredValue(names.indexName(), valueAndBoost.value());
}
}
/**
* Parse a field as though it were a string.
* @param context parse context used during parsing
* @param nullValue value to use for null
* @param defaultBoost default boost value returned unless overwritten in the field
* @return the parsed field and the boost either parsed or defaulted
* @throws IOException if thrown while parsing
*/
public static ValueAndBoost parseCreateFieldForString(ParseContext context, String nullValue, float defaultBoost) throws IOException {
if (context.externalValueSet()) {
return new ValueAndBoost((String) context.externalValue(), defaultBoost);
}
XContentParser parser = context.parser();
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
return new ValueAndBoost(nullValue, defaultBoost);
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
XContentParser.Token token;
String currentFieldName = null;
String value = nullValue;
float boost = defaultBoost;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else {
if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
value = parser.textOrNull();
} else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else {
throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]");
}
}
}
return new ValueAndBoost(value, boost);
}
return new ValueAndBoost(parser.textOrNull(), defaultBoost);
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
super.merge(mergeWith, mergeContext);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
if (!mergeContext.mergeFlags().simulate()) {
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
this.nullValue = ((StringFieldMapper) mergeWith).nullValue;
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
}
}
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
if (includeDefaults || nullValue != null) {
builder.field("null_value", nullValue);
}
if (includeInAll != null) {
builder.field("include_in_all", includeInAll);
} else if (includeDefaults) {
builder.field("include_in_all", false);
}
if (includeDefaults || positionOffsetGap != Defaults.POSITION_OFFSET_GAP) {
builder.field("position_offset_gap", positionOffsetGap);
}
if (searchQuotedAnalyzer != null && searchAnalyzer != searchQuotedAnalyzer) {
builder.field("search_quote_analyzer", searchQuotedAnalyzer.name());
} else if (includeDefaults) {
if (searchQuotedAnalyzer == null) {
builder.field("search_quote_analyzer", "default");
} else {
builder.field("search_quote_analyzer", searchQuotedAnalyzer.name());
}
}
if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
builder.field("ignore_above", ignoreAbove);
}
}
/** Extension of {@link Field} supporting reuse of a cached TokenStream for not-tokenized values. */
static class StringField extends Field {
public StringField(String name, String value, FieldType fieldType) {
super(name, fieldType);
fieldsData = value;
}
@Override
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
if (!fieldType().indexed()) {
return null;
}
// Only use the cached TokenStream if the value is indexed and not-tokenized
if (fieldType().tokenized()) {
return super.tokenStream(analyzer);
}
return NOT_ANALYZED_TOKENSTREAM.get().setValue((String) fieldsData);
}
}
private static final ThreadLocal<StringTokenStream> NOT_ANALYZED_TOKENSTREAM = new ThreadLocal<StringTokenStream>() {
@Override
protected StringTokenStream initialValue() {
return new StringTokenStream();
}
};
// Copied from Field.java
static final class StringTokenStream extends TokenStream {
private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
private boolean used = false;
private String value = null;
/**
* Creates a new TokenStream that returns a String as single token.
* <p>Warning: Does not initialize the value, you must call
* {@link #setValue(String)} afterwards!
*/
StringTokenStream() {
}
/** Sets the string value. */
StringTokenStream setValue(String value) {
this.value = value;
return this;
}
@Override
public boolean incrementToken() {
if (used) {
return false;
}
clearAttributes();
termAttribute.append(value);
offsetAttribute.setOffset(0, value.length());
used = true;
return true;
}
@Override
public void end() {
final int finalOffset = value.length();
offsetAttribute.setOffset(finalOffset, finalOffset);
value = null;
}
@Override
public void reset() {
used = false;
}
@Override
public void close() {
value = null;
}
}
/**
* Parsed value and boost to be returned from {@link #parseCreateFieldForString}.
*/
public static class ValueAndBoost {
private final String value;
private final float boost;
public ValueAndBoost(String value, float boost) {
this.value = value;
this.boost = boost;
}
/**
* Value of string field.
* @return value of string field
*/
public String value() {
return value;
}
/**
* Boost either parsed from the document or defaulted.
* @return boost either parsed from the document or defaulted
*/
public float boost() {
return boost;
}
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_StringFieldMapper.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.