Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
231 |
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface OId {
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_annotation_OId.java
|
3,228 |
public abstract class ScriptDocValues {
public static final ScriptDocValues EMPTY = new Empty();
public static final Strings EMPTY_STRINGS = new Strings(BytesValues.EMPTY);
protected int docId;
protected boolean listLoaded = false;
public void setNextDocId(int docId) {
this.docId = docId;
this.listLoaded = false;
}
public abstract boolean isEmpty();
public abstract List<?> getValues();
public static class Empty extends ScriptDocValues {
@Override
public void setNextDocId(int docId) {
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public List<?> getValues() {
return Collections.emptyList();
}
}
public final static class Strings extends ScriptDocValues {
private final BytesValues values;
private final CharsRef spare = new CharsRef();
private SlicedObjectList<String> list;
public Strings(BytesValues values) {
this.values = values;
list = new SlicedObjectList<String>(values.isMultiValued() ? new String[10] : new String[1]) {
@Override
public void grow(int newLength) {
assert offset == 0; // NOTE: senseless if offset != 0
if (values.length >= newLength) {
return;
}
final String[] current = values;
values = new String[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
System.arraycopy(current, 0, values, 0, current.length);
}
};
}
@Override
public boolean isEmpty() {
return values.setDocument(docId) == 0;
}
public BytesValues getInternalValues() {
return this.values;
}
public BytesRef getBytesValue() {
int numValues = values.setDocument(docId);
if (numValues == 0) {
return null;
}
return values.nextValue();
}
public String getValue() {
String value = null;
if (values.setDocument(docId) > 0) {
UnicodeUtil.UTF8toUTF16(values.nextValue(), spare);
value = spare.toString();
}
return value;
}
public List<String> getValues() {
if (!listLoaded) {
final int numValues = values.setDocument(docId);
list.offset = 0;
list.grow(numValues);
list.length = numValues;
for (int i = 0; i < numValues; i++) {
BytesRef next = values.nextValue();
UnicodeUtil.UTF8toUTF16(next, spare);
list.values[i] = spare.toString();
}
listLoaded = true;
}
return list;
}
}
public static class Longs extends ScriptDocValues {
private final LongValues values;
private final MutableDateTime date = new MutableDateTime(0, DateTimeZone.UTC);
private final SlicedLongList list;
public Longs(LongValues values) {
this.values = values;
this.list = new SlicedLongList(values.isMultiValued() ? 10 : 1);
}
public LongValues getInternalValues() {
return this.values;
}
@Override
public boolean isEmpty() {
return values.setDocument(docId) == 0;
}
public long getValue() {
int numValues = values.setDocument(docId);
if (numValues == 0) {
return 0l;
}
return values.nextValue();
}
public List<Long> getValues() {
if (!listLoaded) {
final int numValues = values.setDocument(docId);
list.offset = 0;
list.grow(numValues);
list.length = numValues;
for (int i = 0; i < numValues; i++) {
list.values[i] = values.nextValue();
}
listLoaded = true;
}
return list;
}
public MutableDateTime getDate() {
date.setMillis(getValue());
return date;
}
}
public static class Doubles extends ScriptDocValues {
private final DoubleValues values;
private final SlicedDoubleList list;
public Doubles(DoubleValues values) {
this.values = values;
this.list = new SlicedDoubleList(values.isMultiValued() ? 10 : 1);
}
public DoubleValues getInternalValues() {
return this.values;
}
@Override
public boolean isEmpty() {
return values.setDocument(docId) == 0;
}
public double getValue() {
int numValues = values.setDocument(docId);
if (numValues == 0) {
return 0d;
}
return values.nextValue();
}
public List<Double> getValues() {
if (!listLoaded) {
int numValues = values.setDocument(docId);
list.offset = 0;
list.grow(numValues);
list.length = numValues;
for (int i = 0; i < numValues; i++) {
list.values[i] = values.nextValue();
}
listLoaded = true;
}
return list;
}
}
public static class GeoPoints extends ScriptDocValues {
private final GeoPointValues values;
private final SlicedObjectList<GeoPoint> list;
public GeoPoints(GeoPointValues values) {
this.values = values;
list = new SlicedObjectList<GeoPoint>(values.isMultiValued() ? new GeoPoint[10] : new GeoPoint[1]) {
@Override
public void grow(int newLength) {
assert offset == 0; // NOTE: senseless if offset != 0
if (values.length >= newLength) {
return;
}
final GeoPoint[] current = values;
values = new GeoPoint[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
System.arraycopy(current, 0, values, 0, current.length);
}
};
}
@Override
public boolean isEmpty() {
return values.setDocument(docId) == 0;
}
public GeoPoint getValue() {
int numValues = values.setDocument(docId);
if (numValues == 0) {
return null;
}
return values.nextValue();
}
public double getLat() {
return getValue().lat();
}
public double[] getLats() {
List<GeoPoint> points = getValues();
double[] lats = new double[points.size()];
for (int i = 0; i < points.size(); i++) {
lats[i] = points.get(i).lat();
}
return lats;
}
public double[] getLons() {
List<GeoPoint> points = getValues();
double[] lons = new double[points.size()];
for (int i = 0; i < points.size(); i++) {
lons[i] = points.get(i).lon();
}
return lons;
}
public double getLon() {
return getValue().lon();
}
public List<GeoPoint> getValues() {
if (!listLoaded) {
int numValues = values.setDocument(docId);
list.offset = 0;
list.grow(numValues);
list.length = numValues;
for (int i = 0; i < numValues; i++) {
GeoPoint next = values.nextValue();
GeoPoint point = list.values[i];
if (point == null) {
point = list.values[i] = new GeoPoint();
}
point.reset(next.lat(), next.lon());
list.values[i] = point;
}
listLoaded = true;
}
return list;
}
public double factorDistance(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double factorDistanceWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double factorDistance02(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 1;
}
public double factorDistance13(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 2;
}
public double arcDistance(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double arcDistanceWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double arcDistanceInKm(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
}
public double arcDistanceInKmWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
}
public double arcDistanceInMiles(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
}
public double arcDistanceInMilesWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
}
public double distance(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double distanceWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
}
public double distanceInKm(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
}
public double distanceInKmWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
}
public double distanceInMiles(double lat, double lon) {
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
}
public double distanceInMilesWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
GeoPoint point = getValue();
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_ScriptDocValues.java
|
1,772 |
public abstract class BasePolygonBuilder<E extends BasePolygonBuilder<E>> extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
// Linear ring defining the shell of the polygon
protected Ring<E> shell;
// List of linear rings defining the holes of the polygon
protected final ArrayList<BaseLineStringBuilder<?>> holes = new ArrayList<BaseLineStringBuilder<?>>();
@SuppressWarnings("unchecked")
private E thisRef() {
return (E)this;
}
public E point(double longitude, double latitude) {
shell.point(longitude, latitude);
return thisRef();
}
/**
* Add a point to the shell of the polygon
* @param coordinate coordinate of the new point
* @return this
*/
public E point(Coordinate coordinate) {
shell.point(coordinate);
return thisRef();
}
/**
* Add a array of points to the shell of the polygon
* @param coordinates coordinates of the new points to add
* @return this
*/
public E points(Coordinate...coordinates) {
shell.points(coordinates);
return thisRef();
}
/**
* Add a new hole to the polygon
* @param hole linear ring defining the hole
* @return this
*/
public E hole(BaseLineStringBuilder<?> hole) {
holes.add(hole);
return thisRef();
}
/**
* build new hole to the polygon
* @param hole linear ring defining the hole
* @return this
*/
public Ring<E> hole() {
Ring<E> hole = new Ring<E>(thisRef());
this.holes.add(hole);
return hole;
}
/**
* Close the shell of the polygon
* @return parent
*/
public ShapeBuilder close() {
return shell.close();
}
/**
* The coordinates setup by the builder will be assembled to a polygon. The result will consist of
* a set of polygons. Each of these components holds a list of linestrings defining the polygon: the
* first set of coordinates will be used as the shell of the polygon. The others are defined to holes
* within the polygon.
* This Method also wraps the polygons at the dateline. In order to this fact the result may
* contains more polygons and less holes than defined in the builder it self.
*
* @return coordinates of the polygon
*/
public Coordinate[][][] coordinates() {
int numEdges = shell.points.size()-1; // Last point is repeated
for (int i = 0; i < holes.size(); i++) {
numEdges += holes.get(i).points.size()-1;
}
Edge[] edges = new Edge[numEdges];
Edge[] holeComponents = new Edge[holes.size()];
int offset = createEdges(0, true, shell, edges, 0);
for (int i = 0; i < holes.size(); i++) {
int length = createEdges(i+1, false, this.holes.get(i), edges, offset);
holeComponents[i] = edges[offset];
offset += length;
}
int numHoles = holeComponents.length;
numHoles = merge(edges, 0, intersections(+DATELINE, edges), holeComponents, numHoles);
numHoles = merge(edges, 0, intersections(-DATELINE, edges), holeComponents, numHoles);
return compose(edges, holeComponents, numHoles);
}
@Override
public Shape build() {
Geometry geometry = buildGeometry(FACTORY, wrapdateline);
return new JtsGeometry(geometry, SPATIAL_CONTEXT, !wrapdateline);
}
protected XContentBuilder coordinatesArray(XContentBuilder builder, Params params) throws IOException {
shell.coordinatesToXcontent(builder, true);
for(BaseLineStringBuilder<?> hole : holes) {
hole.coordinatesToXcontent(builder, true);
}
return builder;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapename);
builder.startArray(FIELD_COORDINATES);
coordinatesArray(builder, params);
builder.endArray();
builder.endObject();
return builder;
}
public Geometry buildGeometry(GeometryFactory factory, boolean fixDateline) {
if(fixDateline) {
Coordinate[][][] polygons = coordinates();
return polygons.length == 1
? polygon(factory, polygons[0])
: multipolygon(factory, polygons);
} else {
return toPolygon(factory);
}
}
public Polygon toPolygon() {
return toPolygon(FACTORY);
}
protected Polygon toPolygon(GeometryFactory factory) {
final LinearRing shell = linearRing(factory, this.shell.points);
final LinearRing[] holes = new LinearRing[this.holes.size()];
Iterator<BaseLineStringBuilder<?>> iterator = this.holes.iterator();
for (int i = 0; iterator.hasNext(); i++) {
holes[i] = linearRing(factory, iterator.next().points);
}
return factory.createPolygon(shell, holes);
}
protected static LinearRing linearRing(GeometryFactory factory, ArrayList<Coordinate> coordinates) {
return factory.createLinearRing(coordinates.toArray(new Coordinate[coordinates.size()]));
}
@Override
public GeoShapeType type() {
return TYPE;
}
protected static Polygon polygon(GeometryFactory factory, Coordinate[][] polygon) {
LinearRing shell = factory.createLinearRing(polygon[0]);
LinearRing[] holes;
if(polygon.length > 1) {
holes = new LinearRing[polygon.length-1];
for (int i = 0; i < holes.length; i++) {
holes[i] = factory.createLinearRing(polygon[i+1]);
}
} else {
holes = null;
}
return factory.createPolygon(shell, holes);
}
/**
* Create a Multipolygon from a set of coordinates. Each primary array contains a polygon which
* in turn contains an array of linestrings. These line Strings are represented as an array of
* coordinates. The first linestring will be the shell of the polygon the others define holes
* within the polygon.
*
* @param factory {@link GeometryFactory} to use
* @param polygons definition of polygons
* @return a new Multipolygon
*/
protected static MultiPolygon multipolygon(GeometryFactory factory, Coordinate[][][] polygons) {
Polygon[] polygonSet = new Polygon[polygons.length];
for (int i = 0; i < polygonSet.length; i++) {
polygonSet[i] = polygon(factory, polygons[i]);
}
return factory.createMultiPolygon(polygonSet);
}
/**
* This method sets the component id of all edges in a ring to a given id and shifts the
* coordinates of this component according to the dateline
*
* @param edge An arbitrary edge of the component
* @param id id to apply to the component
* @param edges a list of edges to which all edges of the component will be added (could be <code>null</code>)
* @return number of edges that belong to this component
*/
private static int component(final Edge edge, final int id, final ArrayList<Edge> edges) {
// find a coordinate that is not part of the dateline
Edge any = edge;
while(any.coordinate.x == +DATELINE || any.coordinate.x == -DATELINE) {
if((any = any.next) == edge) {
break;
}
}
double shift = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0);
if (debugEnabled()) {
LOGGER.debug("shift: {[]}", shift);
}
// run along the border of the component, collect the
// edges, shift them according to the dateline and
// update the component id
int length = 0;
Edge current = edge;
do {
current.coordinate = shift(current.coordinate, shift);
current.component = id;
if(edges != null) {
edges.add(current);
}
length++;
} while((current = current.next) != edge);
return length;
}
/**
* Compute all coordinates of a component
* @param component an arbitrary edge of the component
* @param coordinates Array of coordinates to write the result to
* @return the coordinates parameter
*/
private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates) {
for (int i = 0; i < coordinates.length; i++) {
coordinates[i] = (component = component.next).coordinate;
}
return coordinates;
}
private static Coordinate[][][] buildCoordinates(ArrayList<ArrayList<Coordinate[]>> components) {
Coordinate[][][] result = new Coordinate[components.size()][][];
for (int i = 0; i < result.length; i++) {
ArrayList<Coordinate[]> component = components.get(i);
result[i] = component.toArray(new Coordinate[component.size()][]);
}
if(debugEnabled()) {
for (int i = 0; i < result.length; i++) {
LOGGER.debug("Component {[]}:", i);
for (int j = 0; j < result[i].length; j++) {
LOGGER.debug("\t" + Arrays.toString(result[i][j]));
}
}
}
return result;
}
private static final Coordinate[][] EMPTY = new Coordinate[0][];
private static Coordinate[][] holes(Edge[] holes, int numHoles) {
if (numHoles == 0) {
return EMPTY;
}
final Coordinate[][] points = new Coordinate[numHoles][];
for (int i = 0; i < numHoles; i++) {
int length = component(holes[i], -(i+1), null); // mark as visited by inverting the sign
points[i] = coordinates(holes[i], new Coordinate[length+1]);
}
return points;
}
private static Edge[] edges(Edge[] edges, int numHoles, ArrayList<ArrayList<Coordinate[]>> components) {
ArrayList<Edge> mainEdges = new ArrayList<Edge>(edges.length);
for (int i = 0; i < edges.length; i++) {
if (edges[i].component >= 0) {
int length = component(edges[i], -(components.size()+numHoles+1), mainEdges);
ArrayList<Coordinate[]> component = new ArrayList<Coordinate[]>();
component.add(coordinates(edges[i], new Coordinate[length+1]));
components.add(component);
}
}
return mainEdges.toArray(new Edge[mainEdges.size()]);
}
private static Coordinate[][][] compose(Edge[] edges, Edge[] holes, int numHoles) {
final ArrayList<ArrayList<Coordinate[]>> components = new ArrayList<ArrayList<Coordinate[]>>();
assign(holes, holes(holes, numHoles), numHoles, edges(edges, numHoles, components), components);
return buildCoordinates(components);
}
private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, ArrayList<ArrayList<Coordinate[]>> components) {
// Assign Hole to related components
// To find the new component the hole belongs to all intersections of the
// polygon edges with a vertical line are calculated. This vertical line
// is an arbitrary point of the hole. The polygon edge next to this point
// is part of the polygon the hole belongs to.
if (debugEnabled()) {
LOGGER.debug("Holes: " + Arrays.toString(holes));
}
for (int i = 0; i < numHoles; i++) {
final Edge current = holes[i];
final int intersections = intersections(current.coordinate.x, edges);
final int pos = Arrays.binarySearch(edges, 0, intersections, current, INTERSECTION_ORDER);
assert pos < 0 : "illegal state: two edges cross the datum at the same position";
final int index = -(pos+2);
final int component = -edges[index].component - numHoles - 1;
if(debugEnabled()) {
LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]);
LOGGER.debug("\tComponent: " + component);
LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges));
}
components.get(component).add(points[i]);
}
}
private static int merge(Edge[] intersections, int offset, int length, Edge[] holes, int numHoles) {
// Intersections appear pairwise. On the first edge the inner of
// of the polygon is entered. On the second edge the outer face
// is entered. Other kinds of intersections are discard by the
// intersection function
for (int i = 0; i < length; i += 2) {
Edge e1 = intersections[offset + i + 0];
Edge e2 = intersections[offset + i + 1];
// If two segments are connected maybe a hole must be deleted
// Since Edges of components appear pairwise we need to check
// the second edge only (the first edge is either polygon or
// already handled)
if (e2.component > 0) {
//TODO: Check if we could save the set null step
numHoles--;
holes[e2.component-1] = holes[numHoles];
holes[numHoles] = null;
}
connect(e1, e2);
}
return numHoles;
}
private static void connect(Edge in, Edge out) {
assert in != null && out != null;
assert in != out;
// Connecting two Edges by inserting the point at
// dateline intersection and connect these by adding
// two edges between this points. One per direction
if(in.intersect != in.next.coordinate) {
// NOTE: the order of the object creation is crucial here! Don't change it!
// first edge has no point on dateline
Edge e1 = new Edge(in.intersect, in.next);
if(out.intersect != out.next.coordinate) {
// second edge has no point on dateline
Edge e2 = new Edge(out.intersect, out.next);
in.next = new Edge(in.intersect, e2, in.intersect);
} else {
// second edge intersects with dateline
in.next = new Edge(in.intersect, out.next, in.intersect);
}
out.next = new Edge(out.intersect, e1, out.intersect);
} else {
// first edge intersects with dateline
Edge e2 = new Edge(out.intersect, in.next, out.intersect);
if(out.intersect != out.next.coordinate) {
// second edge has no point on dateline
Edge e1 = new Edge(out.intersect, out.next);
in.next = new Edge(in.intersect, e1, in.intersect);
} else {
// second edge intersects with dateline
in.next = new Edge(in.intersect, out.next, in.intersect);
}
out.next = e2;
}
}
private static int createEdges(int component, boolean direction, BaseLineStringBuilder<?> line, Edge[] edges, int offset) {
Coordinate[] points = line.coordinates(false); // last point is repeated
Edge.ring(component, direction, points, 0, edges, offset, points.length-1);
return points.length-1;
}
public static class Ring<P extends ShapeBuilder> extends BaseLineStringBuilder<Ring<P>> {
private final P parent;
protected Ring(P parent) {
this(parent, new ArrayList<Coordinate>());
}
protected Ring(P parent, ArrayList<Coordinate> points) {
super(points);
this.parent = parent;
}
public P close() {
Coordinate start = points.get(0);
Coordinate end = points.get(points.size()-1);
if(start.x != end.x || start.y != end.y) {
points.add(start);
}
return parent;
}
@Override
public GeoShapeType type() {
return null;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_geo_builders_BasePolygonBuilder.java
|
1,542 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, Text> {
private boolean isVertex;
private final Text textWritable = new Text();
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
if (!context.getConfiguration().getBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, false))
throw new IllegalStateException(PathMap.class.getSimpleName() + " requires that paths be enabled");
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex && value.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : edge.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_PathMap.java
|
2,151 |
public class TransactionalMapProxy extends TransactionalMapProxySupport implements TransactionalMap {
private final Map<Object, TxnValueWrapper> txMap = new HashMap<Object, TxnValueWrapper>();
public TransactionalMapProxy(String name, MapService mapService, NodeEngine nodeEngine, TransactionSupport transaction) {
super(name, mapService, nodeEngine, transaction);
}
public boolean containsKey(Object key) {
checkTransactionState();
return txMap.containsKey(key) || containsKeyInternal(getService().toData(key, partitionStrategy));
}
public int size() {
checkTransactionState();
int currentSize = sizeInternal();
for (TxnValueWrapper wrapper : txMap.values()) {
if (wrapper.type == TxnValueWrapper.Type.NEW) {
currentSize++;
} else if (wrapper.type == TxnValueWrapper.Type.REMOVED) {
currentSize--;
}
}
return currentSize;
}
public boolean isEmpty() {
checkTransactionState();
return size() == 0;
}
public Object get(Object key) {
checkTransactionState();
TxnValueWrapper currentValue = txMap.get(key);
if (currentValue != null) {
return checkIfRemoved(currentValue);
}
return getService().toObject(getInternal(getService().toData(key, partitionStrategy)));
}
public Object getForUpdate(Object key) {
checkTransactionState();
TxnValueWrapper currentValue = txMap.get(key);
if (currentValue != null) {
return checkIfRemoved(currentValue);
}
Data dataKey = getService().toData(key, partitionStrategy);
return getService().toObject(getForUpdateInternal(dataKey));
}
private Object checkIfRemoved(TxnValueWrapper wrapper) {
checkTransactionState();
return wrapper == null || wrapper.type == TxnValueWrapper.Type.REMOVED ? null : wrapper.value;
}
public Object put(Object key, Object value) {
checkTransactionState();
MapService service = getService();
final Object valueBeforeTxn = service.toObject(putInternal(service.toData(key, partitionStrategy),
service.toData(value)));
TxnValueWrapper currentValue = txMap.get(key);
if (value != null) {
TxnValueWrapper wrapper = valueBeforeTxn == null ?
new TxnValueWrapper(value, TxnValueWrapper.Type.NEW) :
new TxnValueWrapper(value, TxnValueWrapper.Type.UPDATED);
txMap.put(key, wrapper);
}
return currentValue == null ? valueBeforeTxn : checkIfRemoved(currentValue);
}
public Object put(Object key, Object value, long ttl, TimeUnit timeUnit) {
checkTransactionState();
MapService service = getService();
final Object valueBeforeTxn = service.toObject(putInternal(service.toData(key, partitionStrategy),
service.toData(value), ttl, timeUnit));
TxnValueWrapper currentValue = txMap.get(key);
if (value != null) {
TxnValueWrapper wrapper = valueBeforeTxn == null ?
new TxnValueWrapper(value, TxnValueWrapper.Type.NEW) :
new TxnValueWrapper(value, TxnValueWrapper.Type.UPDATED);
txMap.put(key, wrapper);
}
return currentValue == null ? valueBeforeTxn : checkIfRemoved(currentValue);
}
public void set(Object key, Object value) {
checkTransactionState();
MapService service = getService();
final Data dataBeforeTxn = putInternal(service.toData(key, partitionStrategy), service.toData(value));
if (value != null) {
TxnValueWrapper wrapper = dataBeforeTxn == null ? new TxnValueWrapper(value, TxnValueWrapper.Type.NEW) : new TxnValueWrapper(value, TxnValueWrapper.Type.UPDATED);
txMap.put(key, wrapper);
}
}
public Object putIfAbsent(Object key, Object value) {
checkTransactionState();
TxnValueWrapper wrapper = txMap.get(key);
boolean haveTxnPast = wrapper != null;
MapService service = getService();
if (haveTxnPast) {
if (wrapper.type != TxnValueWrapper.Type.REMOVED) {
return wrapper.value;
}
putInternal(service.toData(key, partitionStrategy), service.toData(value));
txMap.put(key, new TxnValueWrapper(value, TxnValueWrapper.Type.NEW));
return null;
} else {
Data oldValue = putIfAbsentInternal(service.toData(key, partitionStrategy), service.toData(value));
if (oldValue == null) {
txMap.put(key, new TxnValueWrapper(value, TxnValueWrapper.Type.NEW));
}
return service.toObject(oldValue);
}
}
public Object replace(Object key, Object value) {
checkTransactionState();
TxnValueWrapper wrapper = txMap.get(key);
boolean haveTxnPast = wrapper != null;
MapService service = getService();
if (haveTxnPast) {
if (wrapper.type == TxnValueWrapper.Type.REMOVED) {
return null;
}
putInternal(service.toData(key, partitionStrategy), service.toData(value));
txMap.put(key, new TxnValueWrapper(value, TxnValueWrapper.Type.UPDATED));
return wrapper.value;
} else {
Data oldValue = replaceInternal(service.toData(key, partitionStrategy), service.toData(value));
if (oldValue != null) {
txMap.put(key, new TxnValueWrapper(value, TxnValueWrapper.Type.UPDATED));
}
return service.toObject(oldValue);
}
}
public boolean replace(Object key, Object oldValue, Object newValue) {
checkTransactionState();
TxnValueWrapper wrapper = txMap.get(key);
boolean haveTxnPast = wrapper != null;
MapService service = getService();
if (haveTxnPast) {
if (!wrapper.value.equals(oldValue)) {
return false;
}
putInternal(service.toData(key, partitionStrategy), service.toData(newValue));
txMap.put(key, new TxnValueWrapper(wrapper.value, TxnValueWrapper.Type.UPDATED));
return true;
} else {
boolean success = replaceIfSameInternal(service.toData(key), service.toData(oldValue), service.toData(newValue));
if (success) {
txMap.put(key, new TxnValueWrapper(newValue, TxnValueWrapper.Type.UPDATED));
}
return success;
}
}
public boolean remove(Object key, Object value) {
checkTransactionState();
TxnValueWrapper wrapper = txMap.get(key);
MapService service = getService();
if (wrapper != null && !service.compare(name, wrapper.value, value)) {
return false;
}
boolean removed = removeIfSameInternal(service.toData(key, partitionStrategy), value);
if (removed) {
txMap.put(key, new TxnValueWrapper(value, TxnValueWrapper.Type.REMOVED));
}
return removed;
}
public Object remove(Object key) {
checkTransactionState();
MapService service = getService();
final Object valueBeforeTxn = service.toObject(removeInternal(service.toData(key, partitionStrategy)));
TxnValueWrapper wrapper = null;
if (valueBeforeTxn != null || txMap.containsKey(key)) {
wrapper = txMap.put(key, new TxnValueWrapper(valueBeforeTxn, TxnValueWrapper.Type.REMOVED));
}
return wrapper == null ? valueBeforeTxn : checkIfRemoved(wrapper);
}
public void delete(Object key) {
checkTransactionState();
MapService service = getService();
Data data = removeInternal(service.toData(key, partitionStrategy));
if (data != null || txMap.containsKey(key)) {
txMap.put(key, new TxnValueWrapper(service.toObject(data), TxnValueWrapper.Type.REMOVED));
}
}
public Set<Object> keySet() {
checkTransactionState();
final Set<Data> keySet = keySetInternal();
final Set<Object> keys = new HashSet<Object>(keySet.size());
final MapService service = getService();
// convert Data to Object
for (final Data data : keySet) {
keys.add(service.toObject(data));
}
for (final Map.Entry<Object, TxnValueWrapper> entry : txMap.entrySet()) {
if (TxnValueWrapper.Type.NEW.equals(entry.getValue().type)) {
keys.add(entry.getKey());
} else if (TxnValueWrapper.Type.REMOVED.equals(entry.getValue().type)) {
keys.remove(entry.getKey());
}
}
return keys;
}
public Set keySet(Predicate predicate) {
checkTransactionState();
if (predicate == null) {
throw new NullPointerException("Predicate should not be null!");
}
if (predicate instanceof PagingPredicate) {
throw new NullPointerException("Paging is not supported for Transactional queries!");
}
final MapService service = getService();
final QueryResultSet queryResultSet = (QueryResultSet) queryInternal(predicate, IterationType.KEY, false);
final Set<Object> keySet = new HashSet<Object>(queryResultSet); //todo: Can't we just use the original set?
for (final Map.Entry<Object, TxnValueWrapper> entry : txMap.entrySet()) {
if (!TxnValueWrapper.Type.REMOVED.equals(entry.getValue().type)) {
final Object value = entry.getValue().value instanceof Data ?
service.toObject(entry.getValue().value) : entry.getValue().value;
final QueryEntry queryEntry = new QueryEntry(null, service.toData(entry.getKey()), entry.getKey(), value);
// apply predicate on txMap.
if (predicate.apply(queryEntry)) {
keySet.add(entry.getKey());
}
} else {
// meanwhile remove keys which are not in txMap.
keySet.remove(entry.getKey());
}
}
return keySet;
}
public Collection<Object> values() {
checkTransactionState();
final Collection<Data> dataSet = valuesInternal();
final Collection<Object> values = new ArrayList<Object>(dataSet.size());
for (final Data data : dataSet) {
values.add(getService().toObject(data));
}
for (TxnValueWrapper wrapper : txMap.values()) {
if (TxnValueWrapper.Type.NEW.equals(wrapper.type)) {
values.add(wrapper.value);
} else if (TxnValueWrapper.Type.REMOVED.equals(wrapper.type)) {
values.remove(wrapper.value);
}
}
return values;
}
public Collection values(Predicate predicate) {
checkTransactionState();
if (predicate == null) {
throw new NullPointerException("Predicate can not be null!");
}
if (predicate instanceof PagingPredicate) {
throw new IllegalArgumentException("Paging is not supported for Transactional queries");
}
final MapService service = getService();
final QueryResultSet queryResultSet = (QueryResultSet) queryInternal(predicate, IterationType.ENTRY, false);
final Set<Object> valueSet = new HashSet<Object>(); //todo: Can't we just use the original set?
final Set<Object> keyWontBeIncluded = new HashSet<Object>();
// delete updated or removed elements from the result set
for (final Map.Entry<Object, TxnValueWrapper> entry : txMap.entrySet()) {
final boolean isRemoved = TxnValueWrapper.Type.REMOVED.equals(entry.getValue().type);
final boolean isUpdated = TxnValueWrapper.Type.UPDATED.equals(entry.getValue().type);
if (isRemoved) {
keyWontBeIncluded.add(entry.getKey());
} else {
if (isUpdated){
keyWontBeIncluded.add(entry.getKey());
}
final Object entryValue = entry.getValue().value;
final Object objectValue = entryValue instanceof Data ?
service.toObject(entryValue) : entryValue;
final QueryEntry queryEntry = new QueryEntry(null, service.toData(entry.getKey()), entry.getKey(), objectValue);
// apply predicate on txMap.
if (predicate.apply(queryEntry)) {
valueSet.add(entryValue);
}
}
}
final Iterator<Map.Entry> iterator = queryResultSet.rawIterator();
while (iterator.hasNext()){
final Map.Entry entry = iterator.next();
if (keyWontBeIncluded.contains(entry.getKey())){
continue;
}
valueSet.add(entry.getValue());
}
return valueSet;
}
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("TransactionalMap");
sb.append("{name='").append(name).append('\'');
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_tx_TransactionalMapProxy.java
|
125 |
public interface OProfilerMBean extends OService {
public enum METRIC_TYPE {
CHRONO, COUNTER, STAT, SIZE, ENABLED, TEXT
}
public void updateCounter(String iStatName, String iDescription, long iPlus);
public void updateCounter(String iStatName, String iDescription, long iPlus, String iDictionary);
public long getCounter(String iStatName);
public String dump();
public String dumpCounters();
public OProfilerEntry getChrono(String string);
public long startChrono();
public long stopChrono(String iName, String iDescription, long iStartTime);
public long stopChrono(String iName, String iDescription, long iStartTime, String iDictionary);
public String dumpChronos();
public String[] getCountersAsString();
public String[] getChronosAsString();
public Date getLastReset();
public boolean isRecording();
public boolean startRecording();
public boolean stopRecording();
public void unregisterHookValue(String string);
public void configure(String string);
public void setAutoDump(int iNewValue);
public String metadataToJSON();
public Map<String, OPair<String, METRIC_TYPE>> getMetadata();
public void registerHookValue(final String iName, final String iDescription, final METRIC_TYPE iType,
final OProfilerHookValue iHookValue);
public void registerHookValue(final String iName, final String iDescription, final METRIC_TYPE iType,
final OProfilerHookValue iHookValue, final String iMetadataName);
public String getSystemMetric(String iMetricName);
public String getProcessMetric(String iName);
public String getDatabaseMetric(String databaseName, String iName);
public String toJSON(String command, final String iPar1);
public void resetRealtime(final String iText);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_profiler_OProfilerMBean.java
|
2,889 |
public final class Predicates {
//we don't want instances.
private Predicates() {
}
public static Predicate instanceOf(final Class klass) {
return new InstanceOfPredicate(klass);
}
private static Comparable readAttribute(Map.Entry entry, String attribute) {
QueryableEntry queryableEntry = (QueryableEntry) entry;
Comparable value = queryableEntry.getAttribute(attribute);
if (value == null) {
return IndexImpl.NULL;
}
return value;
}
public static Predicate and(Predicate x, Predicate y) {
return new AndPredicate(x, y);
}
public static Predicate not(Predicate predicate) {
return new NotPredicate(predicate);
}
public static Predicate or(Predicate x, Predicate y) {
return new OrPredicate(x, y);
}
public static Predicate notEqual(String attribute, Comparable y) {
return new NotEqualPredicate(attribute, y);
}
public static Predicate equal(String attribute, Comparable y) {
return new EqualPredicate(attribute, y);
}
public static Predicate like(String attribute, String pattern) {
return new LikePredicate(attribute, pattern);
}
public static Predicate ilike(String attribute, String pattern) {
return new ILikePredicate(attribute, pattern);
}
public static Predicate regex(String attribute, String pattern) {
return new RegexPredicate(attribute, pattern);
}
public static Predicate greaterThan(String x, Comparable y) {
return new GreaterLessPredicate(x, y, false, false);
}
public static Predicate greaterEqual(String x, Comparable y) {
return new GreaterLessPredicate(x, y, true, false);
}
public static Predicate lessThan(String x, Comparable y) {
return new GreaterLessPredicate(x, y, false, true);
}
public static Predicate lessEqual(String x, Comparable y) {
return new GreaterLessPredicate(x, y, true, true);
}
public static Predicate between(String attribute, Comparable from, Comparable to) {
return new BetweenPredicate(attribute, from, to);
}
public static Predicate in(String attribute, Comparable... values) {
return new InPredicate(attribute, values);
}
public static class BetweenPredicate extends AbstractPredicate {
private Comparable to;
private Comparable from;
public BetweenPredicate() {
}
public BetweenPredicate(String first, Comparable from, Comparable to) {
super(first);
this.from = from;
this.to = to;
}
@Override
public boolean apply(Map.Entry entry) {
Comparable entryValue = readAttribute(entry);
if (entryValue == null) {
return false;
}
Comparable fromConvertedValue = convert(entry, entryValue, from);
Comparable toConvertedValue = convert(entry, entryValue, to);
if (fromConvertedValue == null || toConvertedValue == null) {
return false;
}
return entryValue.compareTo(fromConvertedValue) >= 0 && entryValue.compareTo(toConvertedValue) <= 0;
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
return index.getSubRecordsBetween(from, to);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeObject(to);
out.writeObject(from);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
to = in.readObject();
from = in.readObject();
}
@Override
public String toString() {
return attribute + " BETWEEN " + from + " AND " + to;
}
}
public static class NotPredicate implements Predicate, DataSerializable {
private Predicate predicate;
public NotPredicate(Predicate predicate) {
this.predicate = predicate;
}
public NotPredicate() {
}
@Override
public boolean apply(Map.Entry mapEntry) {
return !predicate.apply(mapEntry);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeObject(predicate);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
predicate = in.readObject();
}
@Override
public String toString() {
return "NOT(" + predicate + ")";
}
}
public static class InPredicate extends AbstractPredicate {
private Comparable[] values;
private volatile Set<Comparable> convertedInValues;
public InPredicate() {
}
public InPredicate(String attribute, Comparable... values) {
super(attribute);
this.values = values;
}
@Override
public boolean apply(Map.Entry entry) {
Comparable entryValue = readAttribute(entry);
Set<Comparable> set = convertedInValues;
if (set == null) {
set = new HashSet<Comparable>(values.length);
for (Comparable value : values) {
set.add(convert(entry, entryValue, value));
}
convertedInValues = set;
}
return entryValue != null && set.contains(entryValue);
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
if (index != null) {
return index.getRecords(values);
} else {
return null;
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeInt(values.length);
for (Object value : values) {
out.writeObject(value);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
int len = in.readInt();
values = new Comparable[len];
for (int i = 0; i < len; i++) {
values[i] = in.readObject();
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(attribute);
sb.append(" IN (");
for (int i = 0; i < values.length; i++) {
if (i > 0) {
sb.append(",");
}
sb.append(values[i]);
}
sb.append(")");
return sb.toString();
}
}
public static class RegexPredicate implements Predicate, DataSerializable {
private String attribute;
private String regex;
private volatile Pattern pattern;
public RegexPredicate() {
}
public RegexPredicate(String attribute, String regex) {
this.attribute = attribute;
this.regex = regex;
}
@Override
public boolean apply(Map.Entry entry) {
Comparable attribute = readAttribute(entry, this.attribute);
String firstVal = attribute == IndexImpl.NULL ? null : (String) attribute;
if (firstVal == null) {
return (regex == null);
} else if (regex == null) {
return false;
} else {
if (pattern == null) {
pattern = Pattern.compile(regex);
}
Matcher m = pattern.matcher(firstVal);
return m.matches();
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(attribute);
out.writeUTF(regex);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
attribute = in.readUTF();
regex = in.readUTF();
}
@Override
public String toString() {
return attribute + " REGEX '" + regex + "'";
}
}
public static class LikePredicate implements Predicate, DataSerializable {
protected String attribute;
protected String second;
private volatile Pattern pattern;
public LikePredicate() {
}
public LikePredicate(String attribute, String second) {
this.attribute = attribute;
this.second = second;
}
@Override
public boolean apply(Map.Entry entry) {
Comparable attribute = readAttribute(entry, this.attribute);
String firstVal = attribute == IndexImpl.NULL ? null : (String) attribute;
if (firstVal == null) {
return (second == null);
} else if (second == null) {
return false;
} else {
if (pattern == null) {
// we quote the input string then escape then replace % and _
// at the end we have a regex pattern look like : \QSOME_STRING\E.*\QSOME_OTHER_STRING\E
final String quoted = Pattern.quote(second);
String regex = quoted
//escaped %
.replaceAll("(?<!\\\\)[%]", "\\\\E.*\\\\Q")
//escaped _
.replaceAll("(?<!\\\\)[_]", "\\\\E.\\\\Q")
//non escaped %
.replaceAll("\\\\%", "%")
//non escaped _
.replaceAll("\\\\_", "_");
int flags = getFlags();
pattern = Pattern.compile(regex, flags);
}
Matcher m = pattern.matcher(firstVal);
return m.matches();
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(attribute);
out.writeUTF(second);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
attribute = in.readUTF();
second = in.readUTF();
}
protected int getFlags() {
//no flags
return 0;
}
@Override
public String toString() {
StringBuffer builder = new StringBuffer(attribute)
.append(" LIKE '")
.append(second)
.append("'");
return builder.toString();
}
}
public static class ILikePredicate extends LikePredicate {
public ILikePredicate() {
}
public ILikePredicate(String attribute, String second) {
super(attribute, second);
}
@Override
public String toString() {
StringBuffer builder = new StringBuffer(attribute)
.append(" ILIKE '")
.append(second)
.append("'");
return builder.toString();
}
@Override
protected int getFlags() {
return Pattern.CASE_INSENSITIVE;
}
}
public static class AndPredicate implements IndexAwarePredicate, DataSerializable {
protected Predicate[] predicates;
public AndPredicate() {
}
public AndPredicate(Predicate... predicates) {
this.predicates = predicates;
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Set<QueryableEntry> smallestIndexedResult = null;
List<Set<QueryableEntry>> otherIndexedResults = new LinkedList<Set<QueryableEntry>>();
List<Predicate> lsNoIndexPredicates = null;
for (Predicate predicate : predicates) {
boolean indexed = false;
if (predicate instanceof IndexAwarePredicate) {
IndexAwarePredicate iap = (IndexAwarePredicate) predicate;
if (iap.isIndexed(queryContext)) {
indexed = true;
Set<QueryableEntry> s = iap.filter(queryContext);
if (smallestIndexedResult == null) {
smallestIndexedResult = s;
} else if (s.size() < smallestIndexedResult.size()) {
otherIndexedResults.add(smallestIndexedResult);
smallestIndexedResult = s;
} else {
otherIndexedResults.add(s);
}
} else {
if (lsNoIndexPredicates == null) {
lsNoIndexPredicates = new LinkedList<Predicate>();
lsNoIndexPredicates.add(predicate);
}
}
}
if (!indexed) {
if (lsNoIndexPredicates == null) {
lsNoIndexPredicates = new LinkedList<Predicate>();
}
lsNoIndexPredicates.add(predicate);
}
}
if (smallestIndexedResult == null) {
return null;
}
return new AndResultSet(smallestIndexedResult, otherIndexedResults, lsNoIndexPredicates);
}
@Override
public boolean isIndexed(QueryContext queryContext) {
for (Predicate predicate : predicates) {
if (predicate instanceof IndexAwarePredicate) {
IndexAwarePredicate iap = (IndexAwarePredicate) predicate;
if (iap.isIndexed(queryContext)) {
return true;
}
}
}
return false;
}
@Override
public boolean apply(Map.Entry mapEntry) {
for (Predicate predicate : predicates) {
if (!predicate.apply(mapEntry)) {
return false;
}
}
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("(");
int size = predicates.length;
for (int i = 0; i < size; i++) {
if (i > 0) {
sb.append(" AND ");
}
sb.append(predicates[i]);
}
sb.append(")");
return sb.toString();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(predicates.length);
for (Predicate predicate : predicates) {
out.writeObject(predicate);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
int size = in.readInt();
predicates = new Predicate[size];
for (int i = 0; i < size; i++) {
predicates[i] = in.readObject();
}
}
}
public static class OrPredicate implements IndexAwarePredicate, DataSerializable {
private Predicate[] predicates;
public OrPredicate() {
}
public OrPredicate(Predicate... predicates) {
this.predicates = predicates;
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
List<Set<QueryableEntry>> indexedResults = new LinkedList<Set<QueryableEntry>>();
for (Predicate predicate : predicates) {
if (predicate instanceof IndexAwarePredicate) {
IndexAwarePredicate iap = (IndexAwarePredicate) predicate;
if (iap.isIndexed(queryContext)) {
Set<QueryableEntry> s = iap.filter(queryContext);
if (s != null) {
indexedResults.add(s);
}
} else {
return null;
}
}
}
return indexedResults.isEmpty() ? null : new OrResultSet(indexedResults);
}
@Override
public boolean isIndexed(QueryContext queryContext) {
for (Predicate predicate : predicates) {
if (predicate instanceof IndexAwarePredicate) {
IndexAwarePredicate iap = (IndexAwarePredicate) predicate;
if (!iap.isIndexed(queryContext)) {
return false;
}
} else {
return false;
}
}
return true;
}
@Override
public boolean apply(Map.Entry mapEntry) {
for (Predicate predicate : predicates) {
if (predicate.apply(mapEntry)) {
return true;
}
}
return false;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(predicates.length);
for (Predicate predicate : predicates) {
out.writeObject(predicate);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
int size = in.readInt();
predicates = new Predicate[size];
for (int i = 0; i < size; i++) {
predicates[i] = in.readObject();
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("(");
int size = predicates.length;
for (int i = 0; i < size; i++) {
if (i > 0) {
sb.append(" OR ");
}
sb.append(predicates[i]);
}
sb.append(")");
return sb.toString();
}
}
public static class GreaterLessPredicate extends EqualPredicate {
boolean equal;
boolean less;
public GreaterLessPredicate() {
}
public GreaterLessPredicate(String attribute, Comparable value, boolean equal, boolean less) {
super(attribute, value);
this.equal = equal;
this.less = less;
}
@Override
public boolean apply(Map.Entry mapEntry) {
final Comparable entryValue = readAttribute(mapEntry);
final Comparable attributeValue = convert(mapEntry, entryValue, value);
final int result = entryValue.compareTo(attributeValue);
return equal && result == 0 || (less ? (result < 0) : (result > 0));
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
final ComparisonType comparisonType;
if (less) {
comparisonType = equal ? ComparisonType.LESSER_EQUAL : ComparisonType.LESSER;
} else {
comparisonType = equal ? ComparisonType.GREATER_EQUAL : ComparisonType.GREATER;
}
return index.getSubRecords(comparisonType, value);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
equal = in.readBoolean();
less = in.readBoolean();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeBoolean(equal);
out.writeBoolean(less);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(attribute);
sb.append(less ? "<" : ">");
if (equal) {
sb.append("=");
}
sb.append(value);
return sb.toString();
}
}
public static class NotEqualPredicate extends EqualPredicate {
public NotEqualPredicate() {
}
public NotEqualPredicate(String attribute, Comparable value) {
super(attribute, value);
}
@Override
public boolean apply(Map.Entry entry) {
return !super.apply(entry);
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
if (index != null) {
return index.getSubRecords(ComparisonType.NOT_EQUAL, value);
} else {
return null;
}
}
@Override
public String toString() {
return attribute + " != " + value;
}
}
public static class EqualPredicate extends AbstractPredicate {
protected Comparable value;
public EqualPredicate() {
}
public EqualPredicate(String attribute, Comparable value) {
super(attribute);
this.value = value;
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
return index.getRecords(value);
}
@Override
public boolean apply(Map.Entry mapEntry) {
Comparable entryValue = readAttribute(mapEntry);
if (entryValue == null) {
return value == null || value == IndexImpl.NULL;
}
value = convert(mapEntry, entryValue, value);
return entryValue.equals(value);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeObject(value);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
value = in.readObject();
}
@Override
public String toString() {
return attribute + "=" + value;
}
}
public abstract static class AbstractPredicate implements IndexAwarePredicate, DataSerializable {
protected String attribute;
private transient volatile AttributeType attributeType;
protected AbstractPredicate() {
}
protected AbstractPredicate(String attribute) {
this.attribute = attribute;
}
protected Comparable convert(Map.Entry mapEntry, Comparable entryValue, Comparable attributeValue) {
if (attributeValue == null) {
return null;
}
if (attributeValue instanceof IndexImpl.NullObject) {
return IndexImpl.NULL;
}
AttributeType type = attributeType;
if (type == null) {
QueryableEntry queryableEntry = (QueryableEntry) mapEntry;
type = queryableEntry.getAttributeType(attribute);
attributeType = type;
}
if (type == AttributeType.ENUM) {
// if attribute type is enum, convert given attribute to enum string
return type.getConverter().convert(attributeValue);
} else {
// if given attribute value is already in expected type then there's no need for conversion.
if (entryValue != null && entryValue.getClass().isAssignableFrom(attributeValue.getClass())) {
return attributeValue;
} else if (type != null) {
return type.getConverter().convert(attributeValue);
} else {
throw new QueryException("Unknown attribute type: " + attributeValue.getClass());
}
}
}
@Override
public boolean isIndexed(QueryContext queryContext) {
return getIndex(queryContext) != null;
}
protected Index getIndex(QueryContext queryContext) {
return queryContext.getIndex(attribute);
}
protected Comparable readAttribute(Map.Entry entry) {
QueryableEntry queryableEntry = (QueryableEntry) entry;
Comparable val = queryableEntry.getAttribute(attribute);
if (val != null && val.getClass().isEnum()) {
val = val.toString();
}
return val;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(attribute);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
attribute = in.readUTF();
}
}
private static class InstanceOfPredicate implements Predicate, DataSerializable {
private Class klass;
public InstanceOfPredicate(Class klass) {
this.klass = klass;
}
@Override
public boolean apply(Map.Entry mapEntry) {
Object value = mapEntry.getValue();
if (value == null) {
return false;
}
return klass.isAssignableFrom(value.getClass());
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(klass.getName());
}
@Override
public void readData(ObjectDataInput in) throws IOException {
String klassName = in.readUTF();
try {
klass = in.getClassLoader().loadClass(klassName);
} catch (ClassNotFoundException e) {
throw new HazelcastSerializationException("Failed to load class: " + klass, e);
}
}
@Override
public String toString() {
return " instanceOf (" + klass.getName() + ")";
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_Predicates.java
|
5,075 |
class Reaper implements Runnable {
@Override
public void run() {
long time = threadPool.estimatedTimeInMillis();
for (SearchContext context : activeContexts.values()) {
if (context.lastAccessTime() == -1) { // its being processed or timeout is disabled
continue;
}
if ((time - context.lastAccessTime() > context.keepAlive())) {
freeContext(context);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_SearchService.java
|
874 |
public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
@Inject
public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
super(settings, threadPool, clusterService, searchService, searchPhaseController);
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
new AsyncAction(searchRequest, listener).start();
}
private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
}
@Override
protected String firstPhaseName() {
return "query";
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResult> listener) {
searchService.sendExecuteQuery(node, request, listener);
}
@Override
protected void moveToSecondPhase() {
sortedShardList = searchPhaseController.sortDocs(firstResults);
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
if (docIdsToLoad.asList().isEmpty()) {
finishHim();
return;
}
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
int localOperations = 0;
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
localOperations++;
} else {
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}
if (localOperations > 0) {
if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}
}
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
final QuerySearchResult queryResult = firstResults.get(entry.index);
final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
try {
if (localAsync) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
});
} else {
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
} catch (Throwable t) {
onFetchFailure(t, fetchSearchRequest, entry.index, queryResult.shardTarget(), counter);
}
}
}
}
}
}
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final FetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
result.shardTarget(shardTarget);
fetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable t) {
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
}
});
}
void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
}
this.addShardFailure(shardIndex, shardTarget, t);
successulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
void finishHim() {
try {
innerFinishHim();
} catch (Throwable e) {
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures());
if (logger.isDebugEnabled()) {
logger.debug("failed to reduce search", failure);
}
listener.onFailure(failure);
} finally {
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
}
}
void innerFinishHim() throws Exception {
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchQueryThenFetchAction.java
|
4,497 |
public class StartRecoveryRequest extends TransportRequest {
private static final AtomicLong recoveryIdGenerator = new AtomicLong();
private long recoveryId;
private ShardId shardId;
private DiscoveryNode sourceNode;
private DiscoveryNode targetNode;
private boolean markAsRelocated;
private Map<String, StoreFileMetaData> existingFiles;
StartRecoveryRequest() {
}
/**
* Start recovery request.
*
* @param shardId
* @param sourceNode The node to recover from
* @param targetNode The node to recover to
* @param markAsRelocated
* @param existingFiles
*/
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Map<String, StoreFileMetaData> existingFiles) {
this.recoveryId = recoveryIdGenerator.incrementAndGet();
this.shardId = shardId;
this.sourceNode = sourceNode;
this.targetNode = targetNode;
this.markAsRelocated = markAsRelocated;
this.existingFiles = existingFiles;
}
public long recoveryId() {
return this.recoveryId;
}
public ShardId shardId() {
return shardId;
}
public DiscoveryNode sourceNode() {
return sourceNode;
}
public DiscoveryNode targetNode() {
return targetNode;
}
public boolean markAsRelocated() {
return markAsRelocated;
}
public Map<String, StoreFileMetaData> existingFiles() {
return existingFiles;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
recoveryId = in.readLong();
shardId = ShardId.readShardId(in);
sourceNode = DiscoveryNode.readNode(in);
targetNode = DiscoveryNode.readNode(in);
markAsRelocated = in.readBoolean();
int size = in.readVInt();
existingFiles = Maps.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
existingFiles.put(md.name(), md);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(recoveryId);
shardId.writeTo(out);
sourceNode.writeTo(out);
targetNode.writeTo(out);
out.writeBoolean(markAsRelocated);
out.writeVInt(existingFiles.size());
for (StoreFileMetaData md : existingFiles.values()) {
md.writeTo(out);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_recovery_StartRecoveryRequest.java
|
230 |
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface ODocumentInstance {
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_annotation_ODocumentInstance.java
|
1,653 |
public class PersistencePackageRequest {
protected Type type;
protected String ceilingEntityClassname;
protected String configKey;
protected AdornedTargetList adornedList;
protected MapStructure mapStructure;
protected Entity entity;
protected ForeignKey foreignKey;
protected Integer startIndex;
protected Integer maxIndex;
protected Map<String, PersistencePackageRequest> subRequests = new LinkedHashMap<String, PersistencePackageRequest>();
protected boolean validateUnsubmittedProperties = true;
protected OperationTypes operationTypesOverride = null;
// These properties are accessed via getters and setters that operate on arrays.
// We back them with a list so that we can have the convenience .add methods
protected List<ForeignKey> additionalForeignKeys = new ArrayList<ForeignKey>();
protected List<String> customCriteria = new ArrayList<String>();
protected List<FilterAndSortCriteria> filterAndSortCriteria = new ArrayList<FilterAndSortCriteria>();
public enum Type {
STANDARD,
ADORNED,
MAP
}
/* ******************* */
/* STATIC INITIALIZERS */
/* ******************* */
public static PersistencePackageRequest standard() {
return new PersistencePackageRequest(Type.STANDARD);
}
public static PersistencePackageRequest adorned() {
return new PersistencePackageRequest(Type.ADORNED);
}
public static PersistencePackageRequest map() {
return new PersistencePackageRequest(Type.MAP);
}
/**
* Creates a semi-populate PersistencePacakageRequest based on the specified FieldMetadata. This initializer
* will copy over persistence perspective items from the metadata as well as set the appropriate OperationTypes
* as specified in the annotation/xml configuration for the field.
*
* @param md
* @return the newly created PersistencePackageRequest
*/
public static PersistencePackageRequest fromMetadata(FieldMetadata md) {
final PersistencePackageRequest request = new PersistencePackageRequest();
md.accept(new MetadataVisitor() {
@Override
public void visit(BasicFieldMetadata fmd) {
request.setType(Type.STANDARD);
request.setCeilingEntityClassname(fmd.getForeignKeyClass());
}
@Override
public void visit(BasicCollectionMetadata fmd) {
ForeignKey foreignKey = (ForeignKey) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
request.setType(Type.STANDARD);
request.setCeilingEntityClassname(fmd.getCollectionCeilingEntity());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setForeignKey(foreignKey);
}
@Override
public void visit(AdornedTargetCollectionMetadata fmd) {
AdornedTargetList adornedList = (AdornedTargetList) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.ADORNEDTARGETLIST);
request.setType(Type.ADORNED);
request.setCeilingEntityClassname(fmd.getCollectionCeilingEntity());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setAdornedList(adornedList);
}
@Override
public void visit(MapMetadata fmd) {
MapStructure mapStructure = (MapStructure) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE);
ForeignKey foreignKey = (ForeignKey) fmd.getPersistencePerspective().
getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
request.setType(Type.MAP);
request.setCeilingEntityClassname(foreignKey.getForeignKeyClass());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setMapStructure(mapStructure);
request.setForeignKey(foreignKey);
}
});
if (md instanceof CollectionMetadata) {
request.setCustomCriteria(((CollectionMetadata) md).getCustomCriteria());
}
return request;
}
/* ************ */
/* CONSTRUCTORS */
/* ************ */
public PersistencePackageRequest() {
}
public PersistencePackageRequest(Type type) {
this.type = type;
}
/* ************ */
/* WITH METHODS */
/* ************ */
public PersistencePackageRequest withType(Type type) {
setType(type);
return this;
}
public PersistencePackageRequest withCeilingEntityClassname(String className) {
setCeilingEntityClassname(className);
return this;
}
public PersistencePackageRequest withForeignKey(ForeignKey foreignKey) {
setForeignKey(foreignKey);
return this;
}
public PersistencePackageRequest withConfigKey(String configKey) {
setConfigKey(configKey);
return this;
}
public PersistencePackageRequest withFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
if (ArrayUtils.isNotEmpty(filterAndSortCriteria)) {
setFilterAndSortCriteria(filterAndSortCriteria);
}
return this;
}
public PersistencePackageRequest withAdornedList(AdornedTargetList adornedList) {
setAdornedList(adornedList);
return this;
}
public PersistencePackageRequest withMapStructure(MapStructure mapStructure) {
setMapStructure(mapStructure);
return this;
}
public PersistencePackageRequest withCustomCriteria(String[] customCriteria) {
if (ArrayUtils.isNotEmpty(customCriteria)) {
setCustomCriteria(customCriteria);
}
return this;
}
public PersistencePackageRequest withEntity(Entity entity) {
setEntity(entity);
return this;
}
public PersistencePackageRequest withStartIndex(Integer startIndex) {
setStartIndex(startIndex);
return this;
}
public PersistencePackageRequest withMaxIndex(Integer maxIndex) {
setMaxIndex(maxIndex);
return this;
}
/* *********** */
/* ADD METHODS */
/* *********** */
public PersistencePackageRequest addAdditionalForeignKey(ForeignKey foreignKey) {
additionalForeignKeys.add(foreignKey);
return this;
}
public PersistencePackageRequest addSubRequest(String infoPropertyName, PersistencePackageRequest subRequest) {
subRequests.put(infoPropertyName, subRequest);
return this;
}
public PersistencePackageRequest addCustomCriteria(String customCriteria) {
if (StringUtils.isNotBlank(customCriteria)) {
this.customCriteria.add(customCriteria);
}
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(FilterAndSortCriteria filterAndSortCriteria) {
this.filterAndSortCriteria.add(filterAndSortCriteria);
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
if (filterAndSortCriteria != null) {
this.filterAndSortCriteria.addAll(Arrays.asList(filterAndSortCriteria));
}
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(List<FilterAndSortCriteria> filterAndSortCriteria) {
this.filterAndSortCriteria.addAll(filterAndSortCriteria);
return this;
}
/* ************************ */
/* CUSTOM GETTERS / SETTERS */
/* ************************ */
public String[] getCustomCriteria() {
String[] arr = new String[this.customCriteria.size()];
arr = this.customCriteria.toArray(arr);
return arr;
}
public ForeignKey[] getAdditionalForeignKeys() {
ForeignKey[] arr = new ForeignKey[this.additionalForeignKeys.size()];
arr = this.additionalForeignKeys.toArray(arr);
return arr;
}
public void setAdditionalForeignKeys(ForeignKey[] additionalForeignKeys) {
this.additionalForeignKeys = Arrays.asList(additionalForeignKeys);
}
public void setCustomCriteria(String[] customCriteria) {
this.customCriteria = Arrays.asList(customCriteria);
}
public FilterAndSortCriteria[] getFilterAndSortCriteria() {
FilterAndSortCriteria[] arr = new FilterAndSortCriteria[this.filterAndSortCriteria.size()];
arr = this.filterAndSortCriteria.toArray(arr);
return arr;
}
public void setFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
this.filterAndSortCriteria.addAll(Arrays.asList(filterAndSortCriteria));
}
/* ************************** */
/* STANDARD GETTERS / SETTERS */
/* ************************** */
public ForeignKey getForeignKey() {
return foreignKey;
}
public void setForeignKey(ForeignKey foreignKey) {
this.foreignKey = foreignKey;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public String getCeilingEntityClassname() {
return ceilingEntityClassname;
}
public void setCeilingEntityClassname(String ceilingEntityClassname) {
this.ceilingEntityClassname = ceilingEntityClassname;
}
public String getConfigKey() {
return configKey;
}
public void setConfigKey(String configKey) {
this.configKey = configKey;
}
public AdornedTargetList getAdornedList() {
return adornedList;
}
public void setAdornedList(AdornedTargetList adornedList) {
this.adornedList = adornedList;
}
public MapStructure getMapStructure() {
return mapStructure;
}
public void setMapStructure(MapStructure mapStructure) {
this.mapStructure = mapStructure;
}
public Entity getEntity() {
return entity;
}
public void setEntity(Entity entity) {
this.entity = entity;
}
public OperationTypes getOperationTypesOverride() {
return operationTypesOverride;
}
public void setOperationTypesOverride(OperationTypes operationTypesOverride) {
this.operationTypesOverride = operationTypesOverride;
}
public Integer getStartIndex() {
return startIndex;
}
public void setStartIndex(Integer startIndex) {
this.startIndex = startIndex;
}
public Integer getMaxIndex() {
return maxIndex;
}
public void setMaxIndex(Integer maxIndex) {
this.maxIndex = maxIndex;
}
public Map<String, PersistencePackageRequest> getSubRequests() {
return subRequests;
}
public void setSubRequests(Map<String, PersistencePackageRequest> subRequests) {
this.subRequests = subRequests;
}
public boolean isValidateUnsubmittedProperties() {
return validateUnsubmittedProperties;
}
public void setValidateUnsubmittedProperties(boolean validateUnsubmittedProperties) {
this.validateUnsubmittedProperties = validateUnsubmittedProperties;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_domain_PersistencePackageRequest.java
|
3,256 |
abstract class LongValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
protected final IndexNumericFieldData<?> indexFieldData;
protected final long missingValue;
protected long bottom;
protected LongValues readerValues;
protected final SortMode sortMode;
public LongValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, long missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public final int compareBottom(int doc) throws IOException {
long v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(bottom, v2);
}
@Override
public final int compareDocToValue(int doc, T valueObj) throws IOException {
final long value = valueObj.longValue();
long docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(docValue, value);
}
static final int compare(long left, long right) {
if (left > right) {
return 1;
} else if (left < right) {
return -1;
} else {
return 0;
}
}
@Override
public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
readerValues = indexFieldData.load(context).getLongValues();
return this;
}
@Override
public int compareBottomMissing() {
return compare(bottom, missingValue);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_LongValuesComparatorBase.java
|
509 |
public class DeleteIndexRequest extends MasterNodeOperationRequest<DeleteIndexRequest> {
private String[] indices;
// Delete index should work by default on both open and closed indices.
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
private TimeValue timeout = AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
DeleteIndexRequest() {
}
/**
* Constructs a new delete index request for the specified index.
*/
public DeleteIndexRequest(String index) {
this.indices = new String[]{index};
}
public DeleteIndexRequest(String... indices) {
this.indices = indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteIndexRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (indices == null || indices.length == 0) {
validationException = addValidationError("index / indices is missing", validationException);
}
return validationException;
}
public DeleteIndexRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* The index to delete.
*/
String[] indices() {
return indices;
}
/**
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
* to <tt>10s</tt>.
*/
public TimeValue timeout() {
return timeout;
}
/**
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
* to <tt>10s</tt>.
*/
public DeleteIndexRequest timeout(TimeValue timeout) {
this.timeout = timeout;
return this;
}
/**
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
* to <tt>10s</tt>.
*/
public DeleteIndexRequest timeout(String timeout) {
return timeout(TimeValue.parseTimeValue(timeout, null));
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
timeout = readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
timeout.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_delete_DeleteIndexRequest.java
|
144 |
@Test
public class IntegerSerializerTest {
private static final int FIELD_SIZE = 4;
private static final Integer OBJECT = 1;
private OIntegerSerializer integerSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
integerSerializer = new OIntegerSerializer();
}
public void testFieldSize() {
Assert.assertEquals(integerSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
integerSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(integerSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
integerSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(integerSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
integerSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(integerSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_IntegerSerializerTest.java
|
875 |
public class KryoSerializer {
public static final int DEFAULT_MAX_OUTPUT_SIZE = 10 * 1024 * 1024; // 10 MB in bytes
public static final int KRYO_ID_OFFSET = 50;
private final boolean registerRequired;
private final ThreadLocal<Kryo> kryos;
private final Map<Integer,TypeRegistration> registrations;
private final int maxOutputSize;
private static final StaticBuffer.Factory<Input> INPUT_FACTORY = new StaticBuffer.Factory<Input>() {
@Override
public Input get(byte[] array, int offset, int limit) {
//Needs to copy array - otherwise we see BufferUnderflow exceptions from concurrent access
//See https://github.com/EsotericSoftware/kryo#threading
return new Input(Arrays.copyOfRange(array,offset,limit));
}
};
public KryoSerializer(final List<Class> defaultRegistrations) {
this(defaultRegistrations, false);
}
public KryoSerializer(final List<Class> defaultRegistrations, boolean registrationRequired) {
this(defaultRegistrations, registrationRequired, DEFAULT_MAX_OUTPUT_SIZE);
}
public KryoSerializer(final List<Class> defaultRegistrations, boolean registrationRequired, int maxOutputSize) {
this.maxOutputSize = maxOutputSize;
this.registerRequired = registrationRequired;
this.registrations = new HashMap<Integer,TypeRegistration>();
for (Class clazz : defaultRegistrations) {
// Preconditions.checkArgument(isValidClass(clazz),"Class does not have a default constructor: %s",clazz.getName());
objectVerificationCache.put(clazz,Boolean.TRUE);
}
kryos = new ThreadLocal<Kryo>() {
public Kryo initialValue() {
Kryo k = new Kryo();
k.setRegistrationRequired(registerRequired);
k.register(Class.class,new DefaultSerializers.ClassSerializer());
for (int i=0;i<defaultRegistrations.size();i++) {
Class clazz = defaultRegistrations.get(i);
k.register(clazz, KRYO_ID_OFFSET + i);
}
return k;
}
};
}
Kryo getKryo() {
return kryos.get();
}
public Object readClassAndObject(ReadBuffer buffer) {
Input i = buffer.asRelative(INPUT_FACTORY);
int startPos = i.position();
Object value = getKryo().readClassAndObject(i);
buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
return value;
}
// public <T> T readObject(ReadBuffer buffer, Class<T> type) {
// Input i = buffer.asRelative(INPUT_FACTORY);
// int startPos = i.position();
// T value = getKryo().readObjectOrNull(i, type);
// buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
// return value;
// }
public <T> T readObjectNotNull(ReadBuffer buffer, Class<T> type) {
Input i = buffer.asRelative(INPUT_FACTORY);
int startPos = i.position();
T value = getKryo().readObject(i, type);
buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
return value;
}
private Output getOutput(Object object) {
return new Output(128,maxOutputSize);
}
private void writeOutput(WriteBuffer out, Output output) {
byte[] array = output.getBuffer();
int limit = output.position();
for (int i=0;i<limit;i++) out.putByte(array[i]);
}
// public void writeObject(WriteBuffer out, Object object, Class<?> type) {
// Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
// Output output = getOutput(object);
// getKryo().writeObjectOrNull(output, object, type);
// writeOutput(out,output);
// }
public void writeObjectNotNull(WriteBuffer out, Object object) {
Preconditions.checkNotNull(object);
Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
Output output = getOutput(object);
getKryo().writeObject(output, object);
writeOutput(out,output);
}
public void writeClassAndObject(WriteBuffer out, Object object) {
Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
Output output = getOutput(object);
getKryo().writeClassAndObject(output, object);
writeOutput(out,output);
}
private final Cache<Class<?>,Boolean> objectVerificationCache = CacheBuilder.newBuilder()
.maximumSize(10000).concurrencyLevel(4).initialCapacity(32).build();
final boolean isValidObject(final Object o) {
if (o==null) return true;
Boolean status = objectVerificationCache.getIfPresent(o.getClass());
if (status==null) {
Kryo kryo = getKryo();
if (!(kryo.getSerializer(o.getClass()) instanceof FieldSerializer)) status=Boolean.TRUE;
else if (!isValidClass(o.getClass())) status=Boolean.FALSE;
else {
try {
Output out = new Output(128, maxOutputSize);
kryo.writeClassAndObject(out,o);
Input in = new Input(out.getBuffer(),0,out.position());
Object ocopy = kryo.readClassAndObject(in);
status=(o.equals(ocopy)?Boolean.TRUE:Boolean.FALSE);
} catch (Throwable e) {
status=Boolean.FALSE;
}
}
objectVerificationCache.put(o.getClass(),status);
}
return status;
}
public static final boolean isValidClass(Class<?> type) {
if (type.isPrimitive()) return true;
else if (Enum.class.isAssignableFrom(type)) return true;
else if (type.isArray()) {
return isValidClass(type.getComponentType());
} else {
for (Constructor c : type.getDeclaredConstructors()) {
if (c.getParameterTypes().length==0) return true;
}
return false;
}
}
private static class TypeRegistration {
final Class type;
final com.esotericsoftware.kryo.Serializer serializer;
TypeRegistration(Class type, com.esotericsoftware.kryo.Serializer serializer) {
this.type=type;
this.serializer=serializer;
}
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_database_serialize_kryo_KryoSerializer.java
|
8 |
private class OutgoingMessageHolder implements MessageHolder
{
private Deque<Message<? extends MessageType>> outgoingMessages = new ArrayDeque<Message<? extends MessageType>>();
@Override
public synchronized void offer( Message<? extends MessageType> message )
{
outgoingMessages.addFirst( message );
}
public synchronized Message<? extends MessageType> nextOutgoingMessage()
{
return outgoingMessages.pollFirst();
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_StateMachines.java
|
1,270 |
@SuppressWarnings("unchecked")
public class InternalTransportClusterAdminClient extends AbstractClusterAdminClient implements InternalClusterAdminClient {
private final TransportClientNodesService nodesService;
private final ThreadPool threadPool;
private final ImmutableMap<ClusterAction, TransportActionNodeProxy> actions;
@Inject
public InternalTransportClusterAdminClient(Settings settings, TransportClientNodesService nodesService, ThreadPool threadPool, TransportService transportService,
Map<String, GenericAction> actions) {
this.nodesService = nodesService;
this.threadPool = threadPool;
MapBuilder<ClusterAction, TransportActionNodeProxy> actionsBuilder = new MapBuilder<ClusterAction, TransportActionNodeProxy>();
for (GenericAction action : actions.values()) {
if (action instanceof ClusterAction) {
actionsBuilder.put((ClusterAction) action, new TransportActionNodeProxy(settings, action, transportService));
}
}
this.actions = actionsBuilder.immutableMap();
}
@Override
public ThreadPool threadPool() {
return this.threadPool;
}
@SuppressWarnings("unchecked")
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request) {
final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
@Override
public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
return proxy.execute(node, request);
}
});
}
@SuppressWarnings("unchecked")
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final ClusterAction<Request, Response, RequestBuilder> action, final Request request, final ActionListener<Response> listener) {
final TransportActionNodeProxy<Request, Response> proxy = actions.get(action);
nodesService.execute(new TransportClientNodesService.NodeListenerCallback<Response>() {
@Override
public void doWithNode(DiscoveryNode node, ActionListener<Response> listener) throws ElasticsearchException {
proxy.execute(node, request, listener);
}
}, listener);
}
}
| 1no label
|
src_main_java_org_elasticsearch_client_transport_support_InternalTransportClusterAdminClient.java
|
516 |
public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder<IndicesExistsRequest, IndicesExistsResponse, IndicesExistsRequestBuilder> {
public IndicesExistsRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
super((InternalIndicesAdminClient) indicesClient, new IndicesExistsRequest(indices));
}
public IndicesExistsRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public IndicesExistsRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
@Override
protected void doExecute(ActionListener<IndicesExistsResponse> listener) {
((IndicesAdminClient) client).exists(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_exists_indices_IndicesExistsRequestBuilder.java
|
31 |
final class NestedLiteralCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final int loc;
private final int index;
private final String value;
NestedLiteralCompletionProposal(String value, int loc,
int index) {
this.value = value;
this.loc = loc;
this.index = index;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = value;
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return value;
}
@Override
public Image getImage() {
return getDecoratedImage(CEYLON_LITERAL, 0, false);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
if (value.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java
|
1,532 |
public class EdgesVerticesMap {
public static final String DIRECTION = Tokens.makeNamespace(EdgesVerticesMap.class) + ".direction";
public enum Counters {
IN_EDGES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Direction direction) {
final Configuration configuration = new EmptyConfiguration();
configuration.set(DIRECTION, direction.name());
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Direction direction;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.direction = Direction.valueOf(context.getConfiguration().get(DIRECTION));
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.direction.equals(IN) || this.direction.equals(BOTH)) {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
value.getPaths(edge, true);
edgesProcessed++;
edge.clearPaths();
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
} else {
for (final Edge e : value.getEdges(IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edge.clearPaths();
}
}
}
if (this.direction.equals(OUT) || this.direction.equals(BOTH)) {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
value.getPaths(edge, true);
edgesProcessed++;
edge.clearPaths();
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
} else {
for (final Edge e : value.getEdges(OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edge.clearPaths();
}
}
}
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_EdgesVerticesMap.java
|
2,933 |
public class MultiResultSet extends AbstractSet<QueryableEntry> {
private Set<Object> index;
private final List<ConcurrentMap<Data, QueryableEntry>> resultSets
= new ArrayList<ConcurrentMap<Data, QueryableEntry>>();
public MultiResultSet() {
}
public void addResultSet(ConcurrentMap<Data, QueryableEntry> resultSet) {
resultSets.add(resultSet);
}
@Override
public boolean contains(Object o) {
QueryableEntry entry = (QueryableEntry) o;
if (index != null) {
return checkFromIndex(entry);
} else {
//todo: what is the point of this condition? Is it some kind of optimization?
if (resultSets.size() > 3) {
index = new HashSet<Object>();
for (ConcurrentMap<Data, QueryableEntry> result : resultSets) {
for (QueryableEntry queryableEntry : result.values()) {
index.add(queryableEntry.getIndexKey());
}
}
return checkFromIndex(entry);
} else {
for (ConcurrentMap<Data, QueryableEntry> resultSet : resultSets) {
if (resultSet.containsKey(entry.getIndexKey())) {
return true;
}
}
return false;
}
}
}
private boolean checkFromIndex(QueryableEntry entry) {
return index.contains(entry.getIndexKey());
}
@Override
public Iterator<QueryableEntry> iterator() {
return new It();
}
class It implements Iterator<QueryableEntry> {
int currentIndex;
Iterator<QueryableEntry> currentIterator;
@Override
public boolean hasNext() {
if (resultSets.size() == 0) {
return false;
}
if (currentIterator != null && currentIterator.hasNext()) {
return true;
}
while (currentIndex < resultSets.size()) {
currentIterator = resultSets.get(currentIndex++).values().iterator();
if (currentIterator.hasNext()) {
return true;
}
}
return false;
}
@Override
public QueryableEntry next() {
if (resultSets.size() == 0) {
return null;
}
return currentIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
@Override
public boolean add(QueryableEntry obj) {
throw new UnsupportedOperationException();
}
@Override
public int size() {
int size = 0;
for (ConcurrentMap<Data, QueryableEntry> resultSet : resultSets) {
size += resultSet.size();
}
return size;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_impl_MultiResultSet.java
|
36 |
public class TitanGraphQueryTestSuite extends GraphQueryTestSuite {
public TitanGraphQueryTestSuite(final GraphTest graphTest) {
super(graphTest);
}
@Override
public void testGraphQueryForVertices() {
TitanGraph g = (TitanGraph) graphTest.generateGraph();
if (g.getRelationType("age") == null) {
TitanManagement mgmt = g.getManagementSystem();
mgmt.makePropertyKey("age").dataType(Integer.class).cardinality(Cardinality.SINGLE).make();
mgmt.commit();
}
g.shutdown();
super.testGraphQueryForVertices();
}
@Override
public void testGraphQueryForEdges() {
TitanGraph g = (TitanGraph) graphTest.generateGraph();
if (g.getRelationType("weight") == null) {
TitanManagement mgmt = g.getManagementSystem();
mgmt.makePropertyKey("weight").dataType(Double.class).cardinality(Cardinality.SINGLE).make();
mgmt.commit();
}
g.shutdown();
super.testGraphQueryForEdges();
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TitanGraphQueryTestSuite.java
|
730 |
public class TransportShardDeleteAction extends TransportShardReplicationOperationAction<ShardDeleteRequest, ShardDeleteRequest, ShardDeleteResponse> {
@Inject
public TransportShardDeleteAction(Settings settings, TransportService transportService,
ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
ShardStateAction shardStateAction) {
super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
}
@Override
protected boolean checkWriteConsistency() {
return true;
}
@Override
protected ShardDeleteRequest newRequestInstance() {
return new ShardDeleteRequest();
}
@Override
protected ShardDeleteRequest newReplicaRequestInstance() {
return new ShardDeleteRequest();
}
@Override
protected ShardDeleteResponse newResponseInstance() {
return new ShardDeleteResponse();
}
@Override
protected String transportAction() {
return "indices/index/b_shard/delete";
}
@Override
protected String executor() {
return ThreadPool.Names.INDEX;
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, ShardDeleteRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, ShardDeleteRequest request) {
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
}
@Override
protected PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
ShardDeleteRequest request = shardRequest.request;
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
.origin(Engine.Operation.Origin.PRIMARY);
indexShard.delete(delete);
// update the version to happen on the replicas
request.version(delete.version());
if (request.refresh()) {
try {
indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
} catch (Exception e) {
// ignore
}
}
ShardDeleteResponse response = new ShardDeleteResponse(delete.version(), delete.found());
return new PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest>(shardRequest.request, response, null);
}
@Override
protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
ShardDeleteRequest request = shardRequest.request;
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version())
.origin(Engine.Operation.Origin.REPLICA);
indexShard.delete(delete);
if (request.refresh()) {
try {
indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
} catch (Exception e) {
// ignore
}
}
}
@Override
protected ShardIterator shards(ClusterState clusterState, ShardDeleteRequest request) {
GroupShardsIterator group = clusterService.operationRouting().broadcastDeleteShards(clusterService.state(), request.index());
for (ShardIterator shardIt : group) {
if (shardIt.shardId().id() == request.shardId()) {
return shardIt;
}
}
throw new ElasticsearchIllegalStateException("No shards iterator found for shard [" + request.shardId() + "]");
}
}
| 0true
|
src_main_java_org_elasticsearch_action_delete_index_TransportShardDeleteAction.java
|
2,155 |
public class AndDocIdSet extends DocIdSet {
private final DocIdSet[] sets;
public AndDocIdSet(DocIdSet[] sets) {
this.sets = sets;
}
@Override
public boolean isCacheable() {
for (DocIdSet set : sets) {
if (!set.isCacheable()) {
return false;
}
}
return true;
}
@Override
public Bits bits() throws IOException {
Bits[] bits = new Bits[sets.length];
for (int i = 0; i < sets.length; i++) {
bits[i] = sets[i].bits();
if (bits[i] == null) {
return null;
}
}
return new AndBits(bits);
}
@Override
public DocIdSetIterator iterator() throws IOException {
// we try and be smart here, if we can iterate through docsets quickly, prefer to iterate
// over them as much as possible, before actually going to "bits" based ones to check
List<DocIdSet> iterators = new ArrayList<DocIdSet>(sets.length);
List<Bits> bits = new ArrayList<Bits>(sets.length);
for (DocIdSet set : sets) {
if (DocIdSets.isFastIterator(set)) {
iterators.add(set);
} else {
Bits bit = set.bits();
if (bit != null) {
bits.add(bit);
} else {
iterators.add(set);
}
}
}
if (bits.isEmpty()) {
return new IteratorBasedIterator(iterators.toArray(new DocIdSet[iterators.size()]));
}
if (iterators.isEmpty()) {
return new BitsDocIdSetIterator(new AndBits(bits.toArray(new Bits[bits.size()])));
}
// combination of both..., first iterating over the "fast" ones, and then checking on the more
// expensive ones
return new BitsDocIdSetIterator.FilteredIterator(
new IteratorBasedIterator(iterators.toArray(new DocIdSet[iterators.size()])),
new AndBits(bits.toArray(new Bits[bits.size()]))
);
}
static class AndBits implements Bits {
private final Bits[] bits;
AndBits(Bits[] bits) {
this.bits = bits;
}
@Override
public boolean get(int index) {
for (Bits bit : bits) {
if (!bit.get(index)) {
return false;
}
}
return true;
}
@Override
public int length() {
return bits[0].length();
}
}
static class IteratorBasedIterator extends DocIdSetIterator {
int lastReturn = -1;
private DocIdSetIterator[] iterators = null;
private final long cost;
IteratorBasedIterator(DocIdSet[] sets) throws IOException {
iterators = new DocIdSetIterator[sets.length];
int j = 0;
long cost = Integer.MAX_VALUE;
for (DocIdSet set : sets) {
if (set == null) {
lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching
break;
} else {
DocIdSetIterator dcit = set.iterator();
if (dcit == null) {
lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching
break;
}
iterators[j++] = dcit;
cost = Math.min(cost, dcit.cost());
}
}
this.cost = cost;
if (lastReturn != DocIdSetIterator.NO_MORE_DOCS) {
lastReturn = (iterators.length > 0 ? -1 : DocIdSetIterator.NO_MORE_DOCS);
}
}
@Override
public final int docID() {
return lastReturn;
}
@Override
public final int nextDoc() throws IOException {
if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
DocIdSetIterator dcit = iterators[0];
int target = dcit.nextDoc();
int size = iterators.length;
int skip = 0;
int i = 1;
while (i < size) {
if (i != skip) {
dcit = iterators[i];
int docid = dcit.advance(target);
if (docid > target) {
target = docid;
if (i != 0) {
skip = i;
i = 0;
continue;
} else
skip = 0;
}
}
i++;
}
return (lastReturn = target);
}
@Override
public final int advance(int target) throws IOException {
if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
DocIdSetIterator dcit = iterators[0];
target = dcit.advance(target);
int size = iterators.length;
int skip = 0;
int i = 1;
while (i < size) {
if (i != skip) {
dcit = iterators[i];
int docid = dcit.advance(target);
if (docid > target) {
target = docid;
if (i != 0) {
skip = i;
i = 0;
continue;
} else {
skip = 0;
}
}
}
i++;
}
return (lastReturn = target);
}
@Override
public long cost() {
return cost;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_docset_AndDocIdSet.java
|
269 |
public interface OCommandOutputListener {
public void onMessage(String iText);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandOutputListener.java
|
5,760 |
public class FetchPhase implements SearchPhase {
private final FetchSubPhase[] fetchSubPhases;
@Inject
public FetchPhase(HighlightPhase highlightPhase, ScriptFieldsFetchSubPhase scriptFieldsPhase, PartialFieldsFetchSubPhase partialFieldsPhase,
MatchedQueriesFetchSubPhase matchedQueriesPhase, ExplainFetchSubPhase explainPhase, VersionFetchSubPhase versionPhase,
FetchSourceSubPhase fetchSourceSubPhase, FieldDataFieldsFetchSubPhase fieldDataFieldsFetchSubPhase) {
this.fetchSubPhases = new FetchSubPhase[]{scriptFieldsPhase, partialFieldsPhase, matchedQueriesPhase, explainPhase, highlightPhase,
fetchSourceSubPhase, versionPhase, fieldDataFieldsFetchSubPhase};
}
@Override
public Map<String, ? extends SearchParseElement> parseElements() {
ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder();
parseElements.put("fields", new FieldsParseElement());
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
parseElements.putAll(fetchSubPhase.parseElements());
}
return parseElements.build();
}
@Override
public void preProcess(SearchContext context) {
}
public void execute(SearchContext context) {
FieldsVisitor fieldsVisitor;
List<String> extractFieldNames = null;
if (!context.hasFieldNames()) {
if (context.hasPartialFields()) {
// partial fields need the source, so fetch it
fieldsVisitor = new UidAndSourceFieldsVisitor();
} else {
// no fields specified, default to return source if no explicit indication
if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
context.fetchSourceContext(new FetchSourceContext(true));
}
fieldsVisitor = context.sourceRequested() ? new UidAndSourceFieldsVisitor() : new JustUidFieldsVisitor();
}
} else if (context.fieldNames().isEmpty()) {
if (context.sourceRequested()) {
fieldsVisitor = new UidAndSourceFieldsVisitor();
} else {
fieldsVisitor = new JustUidFieldsVisitor();
}
} else {
boolean loadAllStored = false;
Set<String> fieldNames = null;
for (String fieldName : context.fieldNames()) {
if (fieldName.equals("*")) {
loadAllStored = true;
continue;
}
if (fieldName.equals(SourceFieldMapper.NAME)) {
if (context.hasFetchSourceContext()) {
context.fetchSourceContext().fetchSource(true);
} else {
context.fetchSourceContext(new FetchSourceContext(true));
}
continue;
}
FieldMappers x = context.smartNameFieldMappers(fieldName);
if (x == null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
if (context.smartNameObjectMapper(fieldName) != null) {
throw new ElasticsearchIllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
}
} else if (x.mapper().fieldType().stored()) {
if (fieldNames == null) {
fieldNames = new HashSet<String>();
}
fieldNames.add(x.mapper().names().indexName());
} else {
if (extractFieldNames == null) {
extractFieldNames = newArrayList();
}
extractFieldNames.add(fieldName);
}
}
if (loadAllStored) {
fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source
} else if (fieldNames != null) {
boolean loadSource = extractFieldNames != null || context.sourceRequested();
fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource);
} else if (extractFieldNames != null || context.sourceRequested()) {
fieldsVisitor = new UidAndSourceFieldsVisitor();
} else {
fieldsVisitor = new JustUidFieldsVisitor();
}
}
InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
loadStoredFields(context, fieldsVisitor, docId);
fieldsVisitor.postProcess(context.mapperService());
Map<String, SearchHitField> searchFields = null;
if (!fieldsVisitor.fields().isEmpty()) {
searchFields = new HashMap<String, SearchHitField>(fieldsVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldsVisitor.fields().entrySet()) {
searchFields.put(entry.getKey(), new InternalSearchHitField(entry.getKey(), entry.getValue()));
}
}
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText;
if (documentMapper == null) {
typeText = new StringAndBytesText(fieldsVisitor.uid().type());
} else {
typeText = documentMapper.typeText();
}
InternalSearchHit searchHit = new InternalSearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields);
hits[index] = searchHit;
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
AtomicReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
int subDoc = docId - subReaderContext.docBase;
// go over and extract fields that are not mapped / stored
context.lookup().setNextReader(subReaderContext);
context.lookup().setNextDocId(subDoc);
if (fieldsVisitor.source() != null) {
context.lookup().source().setNextSource(fieldsVisitor.source());
}
if (extractFieldNames != null) {
for (String extractFieldName : extractFieldNames) {
List<Object> values = context.lookup().source().extractRawValues(extractFieldName);
if (!values.isEmpty()) {
if (searchHit.fieldsOrNull() == null) {
searchHit.fields(new HashMap<String, SearchHitField>(2));
}
SearchHitField hitField = searchHit.fields().get(extractFieldName);
if (hitField == null) {
hitField = new InternalSearchHitField(extractFieldName, new ArrayList<Object>(2));
searchHit.fields().put(extractFieldName, hitField);
}
for (Object value : values) {
hitField.values().add(value);
}
}
}
}
hitContext.reset(searchHit, subReaderContext, subDoc, context.searcher().getIndexReader(), docId, fieldsVisitor);
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
if (fetchSubPhase.hitExecutionNeeded(context)) {
fetchSubPhase.hitExecute(context, hitContext);
}
}
}
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
if (fetchSubPhase.hitsExecutionNeeded(context)) {
fetchSubPhase.hitsExecute(context, hits);
}
}
context.fetchResult().hits(new InternalSearchHits(hits, context.queryResult().topDocs().totalHits, context.queryResult().topDocs().getMaxScore()));
}
private void loadStoredFields(SearchContext context, FieldsVisitor fieldVisitor, int docId) {
fieldVisitor.reset();
try {
context.searcher().doc(docId, fieldVisitor);
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to fetch doc id [" + docId + "]", e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_fetch_FetchPhase.java
|
2,236 |
static class PRNG {
private static final long multiplier = 0x5DEECE66DL;
private static final long addend = 0xBL;
private static final long mask = (1L << 48) - 1;
final long originalSeed;
long seed;
PRNG(long seed) {
this.originalSeed = seed;
this.seed = (seed ^ multiplier) & mask;
}
public float random(int doc) {
if (doc == 0) {
doc = 0xCAFEBAB;
}
long rand = doc;
rand |= rand << 32;
rand ^= rand;
return nextFloat(rand);
}
public float nextFloat(long rand) {
seed = (seed * multiplier + addend) & mask;
rand ^= seed;
double result = rand / (double)(1L << 54);
return (float) result;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_function_RandomScoreFunction.java
|
378 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(NightlyTest.class)
public class ClientMultiMapListenerStressTest {
static final int MAX_SECONDS = 60 * 10;
static final String MAP_NAME = randomString();
static final int NUMBER_OF_CLIENTS = 8;
static final int THREADS_PER_CLIENT = 8;
static HazelcastInstance server;
@BeforeClass
public static void init() {
server = Hazelcast.newHazelcastInstance();
}
@AfterClass
public static void destroy() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Category(ProblematicTest.class)
@Test
public void listenerAddStressTest() throws InterruptedException {
final PutItemsThread[] putThreads = new PutItemsThread[NUMBER_OF_CLIENTS * THREADS_PER_CLIENT];
int idx=0;
for(int i=0; i<NUMBER_OF_CLIENTS; i++){
HazelcastInstance client = HazelcastClient.newHazelcastClient();
for(int j=0; j<THREADS_PER_CLIENT; j++){
PutItemsThread t = new PutItemsThread(client);
putThreads[idx++]=t;
}
}
for(int i=0; i<putThreads.length; i++){
putThreads[i].start();
}
MultiMap mm = server.getMultiMap(MAP_NAME);
assertJoinable(MAX_SECONDS, putThreads );
assertEquals(PutItemsThread.MAX_ITEMS * putThreads.length, mm.size());
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
for(int i=0; i<putThreads.length; i++){
putThreads[i].assertResult(PutItemsThread.MAX_ITEMS * putThreads.length);
}
}
});
}
public class PutItemsThread extends Thread{
public static final int MAX_ITEMS = 1000;
public final MyEntryListener listener = new MyEntryListener();
public HazelcastInstance hzInstance;
public MultiMap mm;
public String id;
public PutItemsThread(HazelcastInstance hzInstance){
this.id = randomString();
this.hzInstance = hzInstance;
this.mm = hzInstance.getMultiMap(MAP_NAME);
mm.addEntryListener(listener, true);
}
public void run(){
for(int i=0; i< MAX_ITEMS; i++){
mm.put(id+i, id+i);
}
}
public void assertResult(int target){
System.out.println("listener "+id+" add events received "+listener.add.get());
assertEquals(target, listener.add.get());
}
}
static class MyEntryListener implements EntryListener {
public AtomicInteger add = new AtomicInteger(0);
public void entryAdded(EntryEvent event) {
add.incrementAndGet();
}
public void entryRemoved(EntryEvent event) {
}
public void entryUpdated(EntryEvent event) {
}
public void entryEvicted(EntryEvent event) {
}
};
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapListenerStressTest.java
|
240 |
public class ModuleConfigurationType implements BroadleafEnumerationType, Serializable {
private static final long serialVersionUID = 1L;
private static final Map<String, ModuleConfigurationType> TYPES = new LinkedHashMap<String, ModuleConfigurationType>();
public static final ModuleConfigurationType FULFILLMENT_PRICING = new ModuleConfigurationType("FULFILLMENT_PRICING", "Fulfillment Pricing Module");
public static final ModuleConfigurationType TAX_CALCULATION = new ModuleConfigurationType("TAX_CALCULATION", "Tax Calculation Module");
public static final ModuleConfigurationType ADDRESS_VERIFICATION = new ModuleConfigurationType("ADDRESS_VERIFICATION", "Address Verification Module");
public static final ModuleConfigurationType PAYMENT_PROCESSOR = new ModuleConfigurationType("PAYMENT_PROCESSOR", "Payment Processor Module");
public static final ModuleConfigurationType CDN_PROVIDER = new ModuleConfigurationType("CDN_PROVIDER", "Content Delivery Network Module");
public static ModuleConfigurationType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public ModuleConfigurationType() {
//do nothing
}
public ModuleConfigurationType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
@Override
public String getType() {
return type;
}
@Override
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ModuleConfigurationType other = (ModuleConfigurationType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_config_service_type_ModuleConfigurationType.java
|
662 |
class ShardValidateQueryResponse extends BroadcastShardOperationResponse {
private boolean valid;
private String explanation;
private String error;
ShardValidateQueryResponse() {
}
public ShardValidateQueryResponse(String index, int shardId, boolean valid, String explanation, String error) {
super(index, shardId);
this.valid = valid;
this.explanation = explanation;
this.error = error;
}
public boolean isValid() {
return this.valid;
}
public String getExplanation() {
return explanation;
}
public String getError() {
return error;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
valid = in.readBoolean();
explanation = in.readOptionalString();
error = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(valid);
out.writeOptionalString(explanation);
out.writeOptionalString(error);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_validate_query_ShardValidateQueryResponse.java
|
698 |
public class BulkRequestTests extends ElasticsearchTestCase {
@Test
public void testSimpleBulk1() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
// translate Windows line endings (\r\n) to standard ones (\n)
if (Constants.WINDOWS) {
bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
}
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(3));
assertThat(((IndexRequest) bulkRequest.requests().get(0)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }").toBytes()));
assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class));
assertThat(((IndexRequest) bulkRequest.requests().get(2)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }").toBytes()));
}
@Test
public void testSimpleBulk2() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk2.json");
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(3));
}
@Test
public void testSimpleBulk3() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk3.json");
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(3));
}
@Test
public void testSimpleBulk4() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk4.json");
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(4));
assertThat(((UpdateRequest) bulkRequest.requests().get(0)).id(), equalTo("1"));
assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2));
assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().toUtf8(), equalTo("{\"field\":\"value\"}"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).id(), equalTo("0"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).type(), equalTo("type1"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).script(), equalTo("counter += param1"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptLang(), equalTo("js"));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().size(), equalTo(1));
assertThat(((Integer) ((UpdateRequest) bulkRequest.requests().get(1)).scriptParams().get("param1")), equalTo(1));
assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().toUtf8(), equalTo("{\"counter\":1}"));
}
@Test
public void testBulkAllowExplicitIndex() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
try {
new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, null, null, false);
fail();
} catch (Exception e) {
}
bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json");
new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), true, "test", null, false);
}
}
| 0true
|
src_test_java_org_elasticsearch_action_bulk_BulkRequestTests.java
|
161 |
@Service("blStructuredContentService")
public class StructuredContentServiceImpl extends AbstractContentService implements StructuredContentService {
protected static final Log LOG = LogFactory.getLog(StructuredContentServiceImpl.class);
protected static String AND = " && ";
@Resource(name="blStructuredContentDao")
protected StructuredContentDao structuredContentDao;
@Resource(name="blSandBoxItemDao")
protected SandBoxItemDao sandBoxItemDao;
@Resource(name="blSandBoxDao")
protected SandBoxDao sandBoxDao;
@Resource(name="blStaticAssetService")
protected StaticAssetService staticAssetService;
@Resource(name="blLocaleService")
protected LocaleService localeService;
@Resource(name="blContentRuleProcessors")
protected List<StructuredContentRuleProcessor> contentRuleProcessors;
@Value("${automatically.approve.structured.content}")
protected boolean automaticallyApproveAndPromoteStructuredContent=true;
protected Cache structuredContentCache;
protected List<ArchivedStructuredContentPublisher> archivedStructuredContentListeners;
@Override
public StructuredContent findStructuredContentById(Long contentId) {
return structuredContentDao.findStructuredContentById(contentId);
}
@Override
public StructuredContentType findStructuredContentTypeById(Long id) {
return structuredContentDao.findStructuredContentTypeById(id);
}
@Override
public StructuredContentType findStructuredContentTypeByName(String name) {
return structuredContentDao.findStructuredContentTypeByName(name);
}
@Override
public List<StructuredContentType> retrieveAllStructuredContentTypes() {
return structuredContentDao.retrieveAllStructuredContentTypes();
}
@Override
public Map<String, StructuredContentField> findFieldsByContentId(Long contentId) {
StructuredContent sc = findStructuredContentById(contentId);
return structuredContentDao.readFieldsForStructuredContentItem(sc);
}
@Override
public List<StructuredContent> findContentItems(SandBox sandbox, Criteria c) {
return findItems(sandbox, c, StructuredContent.class, StructuredContentImpl.class, "originalItemId");
}
@Override
public List<StructuredContent> findAllContentItems() {
return structuredContentDao.findAllContentItems();
}
@Override
public Long countContentItems(SandBox sandbox, Criteria c) {
return countItems(sandbox, c, StructuredContentImpl.class, "originalItemId");
}
@Override
public StructuredContent addStructuredContent(StructuredContent content, SandBox destinationSandbox) {
if (automaticallyApproveAndPromoteStructuredContent) {
if (destinationSandbox != null && destinationSandbox.getSite() != null) {
destinationSandbox = destinationSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destinationSandbox = null;
}
}
content.setSandbox(destinationSandbox);
content.setArchivedFlag(false);
content.setDeletedFlag(false);
StructuredContent sc = structuredContentDao.addOrUpdateContentItem(content);
if (! isProductionSandBox(destinationSandbox)) {
sandBoxItemDao.addSandBoxItem(destinationSandbox.getId(), SandBoxOperationType.ADD, SandBoxItemType.STRUCTURED_CONTENT, sc.getContentName(), sc.getId(), null);
}
return sc;
}
@Override
public StructuredContent updateStructuredContent(StructuredContent content, SandBox destSandbox) {
if (content.getLockedFlag()) {
throw new IllegalArgumentException("Unable to update a locked record");
}
if (automaticallyApproveAndPromoteStructuredContent) {
if (destSandbox != null && destSandbox.getSite() != null) {
destSandbox = destSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destSandbox = null;
}
}
if (checkForSandboxMatch(content.getSandbox(), destSandbox)) {
if (content.getDeletedFlag()) {
SandBoxItem item = sandBoxItemDao.retrieveBySandboxAndTemporaryItemId(content.getSandbox()==null?null:content.getSandbox().getId(), SandBoxItemType.STRUCTURED_CONTENT, content.getId());
if (content.getOriginalItemId() == null && item != null) {
// This page was added in this sandbox and now needs to be deleted.
content.setArchivedFlag(true);
item.setArchivedFlag(true);
} else if (item != null) {
// This page was being updated but now is being deleted - so change the
// sandbox operation type to deleted
item.setSandBoxOperationType(SandBoxOperationType.DELETE);
sandBoxItemDao.updateSandBoxItem(item);
} else if (automaticallyApproveAndPromoteStructuredContent) {
content.setArchivedFlag(true);
}
}
return structuredContentDao.addOrUpdateContentItem(content);
} else if (checkForProductionSandbox(content.getSandbox())) {
// The passed in content is an existing content item whose values were updated
// Instead, we want to create a clone of this item for the destSandbox
// Create the clone
StructuredContent clonedContent = content.cloneEntity();
clonedContent.setOriginalItemId(content.getId());
clonedContent.setSandbox(destSandbox);
// Detach the old item so it doesn't get updated
structuredContentDao.detach(content);
// Update the new item
StructuredContent returnContent = structuredContentDao.addOrUpdateContentItem(clonedContent);
// Lookup the previous item so that we can update its locked status
StructuredContent prod = findStructuredContentById(content.getId());
prod.setLockedFlag(true);
prod = structuredContentDao.addOrUpdateContentItem(prod);
SandBoxOperationType type = SandBoxOperationType.UPDATE;
if (clonedContent.getDeletedFlag()) {
type = SandBoxOperationType.DELETE;
}
sandBoxItemDao.addSandBoxItem(destSandbox.getId(), type, SandBoxItemType.STRUCTURED_CONTENT, returnContent.getContentName(), returnContent.getId(), returnContent.getOriginalItemId());
return returnContent;
} else {
// This should happen via a promote, revert, or reject in the sandbox service
throw new IllegalArgumentException("Update called when promote or reject was expected.");
}
}
/**
* Saves the given <b>type</b> and returns the merged instance
*/
@Override
public StructuredContentType saveStructuredContentType(StructuredContentType type) {
return structuredContentDao.saveStructuredContentType(type);
}
protected boolean checkForSandboxMatch(SandBox src, SandBox dest) {
if (src != null) {
if (dest != null) {
return src.getId().equals(dest.getId());
}
}
return (src == null && dest == null);
}
protected boolean checkForProductionSandbox(SandBox dest) {
boolean productionSandbox = false;
if (dest == null) {
productionSandbox = true;
} else {
if (dest.getSite() != null && dest.getSite().getProductionSandbox() != null && dest.getSite().getProductionSandbox().getId() != null) {
productionSandbox = dest.getSite().getProductionSandbox().getId().equals(dest.getId());
}
}
return productionSandbox;
}
@Override
public void deleteStructuredContent(StructuredContent content, SandBox destinationSandbox) {
content.setDeletedFlag(true);
updateStructuredContent(content, destinationSandbox);
}
protected String buildRuleExpression(StructuredContent sc) {
StringBuffer ruleExpression = null;
Map<String, StructuredContentRule> ruleMap = sc.getStructuredContentMatchRules();
if (ruleMap != null) {
for (String ruleKey : ruleMap.keySet()) {
if (ruleMap.get(ruleKey).getMatchRule() == null) continue;
if (ruleExpression == null) {
ruleExpression = new StringBuffer(ruleMap.get(ruleKey).getMatchRule());
} else {
ruleExpression.append(AND);
ruleExpression.append(ruleMap.get(ruleKey).getMatchRule());
}
}
}
if (ruleExpression != null) {
return ruleExpression.toString();
} else {
return null;
}
}
protected List<ItemCriteriaDTO> buildItemCriteriaDTOList(StructuredContent sc) {
List<ItemCriteriaDTO> itemCriteriaDTOList = new ArrayList<ItemCriteriaDTO>();
for(StructuredContentItemCriteria criteria : sc.getQualifyingItemCriteria()) {
ItemCriteriaDTO criteriaDTO = new ItemCriteriaDTO();
criteriaDTO.setMatchRule(criteria.getMatchRule());
criteriaDTO.setQty(criteria.getQuantity());
itemCriteriaDTOList.add(criteriaDTO);
}
return itemCriteriaDTOList;
}
protected void buildFieldValues(StructuredContent sc, StructuredContentDTO scDTO, boolean secure) {
String envPrefix = staticAssetService.getStaticAssetEnvironmentUrlPrefix();
if (envPrefix != null && secure) {
envPrefix = staticAssetService.getStaticAssetEnvironmentSecureUrlPrefix();
}
String cmsPrefix = staticAssetService.getStaticAssetUrlPrefix();
for (String fieldKey : sc.getStructuredContentFields().keySet()) {
StructuredContentField scf = sc.getStructuredContentFields().get(fieldKey);
String originalValue = scf.getValue();
if (StringUtils.isNotBlank(envPrefix) && StringUtils.isNotBlank(originalValue) && StringUtils.isNotBlank(cmsPrefix) && originalValue.contains(cmsPrefix)) {
if (originalValue.startsWith("/")) {
originalValue = originalValue.substring(1);
}
String fldValue = originalValue.replaceAll(cmsPrefix, envPrefix+cmsPrefix);
scDTO.getValues().put(fieldKey, fldValue);
} else {
scDTO.getValues().put(fieldKey, originalValue);
}
}
}
/**
* Converts a list of structured content items to a list of structured content DTOs.<br>
* Internally calls buildStructuredContentDTO(...).
*
* @param structuredContentList
* @param secure
* @return
*/
protected List<StructuredContentDTO> buildStructuredContentDTOList(List<StructuredContent> structuredContentList, boolean secure) {
List<StructuredContentDTO> dtoList = new ArrayList<StructuredContentDTO>();
if (structuredContentList != null) {
for(StructuredContent sc : structuredContentList) {
dtoList.add(buildStructuredContentDTO(sc, secure));
}
}
return dtoList;
}
/**
* Converts a StructuredContent into a StructuredContentDTO. If the item contains fields with
* broadleaf cms urls, the urls are converted to utilize the domain
* @param sc
* @param secure
* @return
*/
protected StructuredContentDTO buildStructuredContentDTO(StructuredContent sc, boolean secure) {
StructuredContentDTO scDTO = new StructuredContentDTO();
scDTO.setContentName(sc.getContentName());
scDTO.setContentType(sc.getStructuredContentType().getName());
scDTO.setId(sc.getId());
scDTO.setPriority(sc.getPriority());
if (sc.getLocale() != null) {
scDTO.setLocaleCode(sc.getLocale().getLocaleCode());
}
if (sc.getSandbox() != null) {
scDTO.setSandboxId(sc.getSandbox().getId());
}
scDTO.setRuleExpression(buildRuleExpression(sc));
buildFieldValues(sc, scDTO, secure);
if (sc.getQualifyingItemCriteria() != null && sc.getQualifyingItemCriteria().size() > 0) {
scDTO.setItemCriteriaDTOList(buildItemCriteriaDTOList(sc));
}
return scDTO;
}
protected List<StructuredContentDTO> mergeContent(List<StructuredContentDTO> productionList, List<StructuredContent> sandboxList, boolean secure) {
if (sandboxList == null || sandboxList.size() == 0) {
return productionList;
}
Map<Long,StructuredContentDTO> scMap = new LinkedHashMap<Long,StructuredContentDTO>();
if (productionList != null) {
for(StructuredContentDTO sc : productionList) {
scMap.put(sc.getId(), sc);
}
}
for(StructuredContent sc : sandboxList) {
if (sc.getOriginalItemId() != null) {
scMap.remove(sc.getOriginalItemId());
}
if (! sc.getDeletedFlag() && ! sc.getOfflineFlag()) {
StructuredContentDTO convertedItem = buildStructuredContentDTO(sc, secure);
scMap.put(sc.getId(), convertedItem);
}
}
ArrayList<StructuredContentDTO> returnList = new ArrayList<StructuredContentDTO>(scMap.values());
if (returnList.size() > 1) {
Collections.sort(returnList, new BeanComparator("priority"));
}
return returnList;
}
protected List<StructuredContentDTO> evaluateAndPriortizeContent(List<StructuredContentDTO> structuredContentList, int count, Map<String, Object> ruleDTOs) {
// some optimization for single item lists which don't require prioritization
if (structuredContentList.size() == 1) {
if (processContentRules(structuredContentList.get(0), ruleDTOs)) {
return structuredContentList;
} else {
return new ArrayList<StructuredContentDTO>();
}
}
Iterator<StructuredContentDTO> structuredContentIterator = structuredContentList.iterator();
List<StructuredContentDTO> returnList = new ArrayList<StructuredContentDTO>();
List<StructuredContentDTO> tmpList = new ArrayList<StructuredContentDTO>();
Integer lastPriority = Integer.MIN_VALUE;
while (structuredContentIterator.hasNext()) {
StructuredContentDTO sc = structuredContentIterator.next();
if (! lastPriority.equals(sc.getPriority())) {
// If we've moved to another priority, then shuffle all of the items
// with the previous priority and add them to the return list.
if (tmpList.size() > 1) {
Collections.shuffle(tmpList);
}
returnList.addAll(tmpList);
tmpList.clear();
// If we've added enough items to satisfy the count, then return the
// list.
if (returnList.size() == count) {
return returnList;
} else if (returnList.size() > count) {
return returnList.subList(0, count);
} else {
if (processContentRules(sc, ruleDTOs)) {
tmpList.add(sc);
}
}
} else {
if (processContentRules(sc, ruleDTOs)) {
tmpList.add(sc);
}
}
lastPriority = sc.getPriority();
}
if (tmpList.size() > 1) {
Collections.shuffle(tmpList);
}
returnList.addAll(tmpList);
if (returnList.size() > count) {
return returnList.subList(0, count);
}
return returnList;
}
protected boolean processContentRules(StructuredContentDTO sc, Map<String, Object> ruleDTOs) {
if (contentRuleProcessors != null) {
for (StructuredContentRuleProcessor processor : contentRuleProcessors) {
boolean matchFound = processor.checkForMatch(sc, ruleDTOs);
if (! matchFound) {
return false;
}
}
}
return true;
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByType(SandBox sandBox, StructuredContentType contentType, Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildTypeKey(getProductionSandBox(sandBox), locale, contentType.getName());
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByType(getProductionSandBox(sandBox), contentType, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByType(sandBox, contentType, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, StructuredContentType contentType, String contentName, org.broadleafcommerce.common.locale.domain.Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildNameKey(getProductionSandBox(sandBox), locale, contentType.getName(), contentName);
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByNameAndType(getProductionSandBox(sandBox), contentType, contentName, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByNameAndType(sandBox, contentType, contentName, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, String contentName, org.broadleafcommerce.common.locale.domain.Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildNameKey(getProductionSandBox(sandBox), locale, "any", contentName);
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByName(getProductionSandBox(sandBox), contentName, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByName(sandBox, contentName, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
protected SandBox getProductionSandBox(SandBox currentSandBox) {
SandBox productionSandBox = null;
if (currentSandBox == null || SandBoxType.PRODUCTION.equals(currentSandBox.getSandBoxType())) {
productionSandBox = currentSandBox;
} else if (currentSandBox.getSite() != null) {
productionSandBox = currentSandBox.getSite().getProductionSandbox();
}
return productionSandBox;
}
protected boolean isProductionSandBox(SandBox dest) {
if (dest == null) {
return true;
} else {
return SandBoxType.PRODUCTION.equals(dest.getSandBoxType());
}
}
protected void productionItemArchived(StructuredContent sc) {
// Immediately remove the content from this VM.
removeStructuredContentFromCache(sc);
if (archivedStructuredContentListeners != null) {
for (ArchivedStructuredContentPublisher listener : archivedStructuredContentListeners) {
listener.processStructuredContentArchive(sc, buildTypeKey(sc), buildNameKey(sc));
}
}
}
@Override
public void itemPromoted(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Structured Content Item not found " + sandBoxItem.getTemporaryItemId());
}
} else {
boolean productionSandBox = isProductionSandBox(destinationSandBox);
if (productionSandBox) {
sc.setLockedFlag(false);
} else {
sc.setLockedFlag(true);
}
if (productionSandBox && sc.getOriginalItemId() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Structured content promoted to production. " + sc.getId() + ". Archiving original item " + sc.getOriginalItemId());
}
StructuredContent originalSC = structuredContentDao.findStructuredContentById(sc.getOriginalItemId());
originalSC.setArchivedFlag(Boolean.TRUE);
structuredContentDao.addOrUpdateContentItem(originalSC);
productionItemArchived(originalSC);
if (sc.getDeletedFlag()) {
// if this deleted content is being pushed to production, set it as archived.
sc.setArchivedFlag(true);
}
// We are archiving the old content and making this the new "production page content", so
// null out the original page id before saving.
sc.setOriginalItemId(null);
}
}
if (sc.getOriginalSandBox() == null) {
sc.setOriginalSandBox(sc.getSandbox());
}
sc.setSandbox(destinationSandBox);
structuredContentDao.addOrUpdateContentItem(sc);
}
@Override
public void itemRejected(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc != null) {
sc.setSandbox(destinationSandBox);
sc.setOriginalSandBox(null);
sc.setLockedFlag(false);
structuredContentDao.addOrUpdateContentItem(sc);
}
}
@Override
public void itemReverted(SandBoxItem sandBoxItem) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc != null) {
if (sandBoxItem.getOriginalItemId() != null) {
sc.setArchivedFlag(Boolean.TRUE);
sc.setLockedFlag(Boolean.FALSE);
structuredContentDao.addOrUpdateContentItem(sc);
StructuredContent originalSc = structuredContentDao.findStructuredContentById(sandBoxItem.getOriginalItemId());
originalSc.setLockedFlag(false);
structuredContentDao.addOrUpdateContentItem(originalSc);
}
}
}
public List<StructuredContentRuleProcessor> getContentRuleProcessors() {
return contentRuleProcessors;
}
public void setContentRuleProcessors(List<StructuredContentRuleProcessor> contentRuleProcessors) {
this.contentRuleProcessors = contentRuleProcessors;
}
protected Cache getStructuredContentCache() {
if (structuredContentCache == null) {
structuredContentCache = CacheManager.getInstance().getCache("cmsStructuredContentCache");
}
return structuredContentCache;
}
protected String buildNameKey(StructuredContent sc) {
return buildNameKey(sc.getSandbox(), sc.getLocale(), sc.getStructuredContentType().getName(), sc.getContentName());
}
protected String buildTypeKey(StructuredContent sc) {
return buildTypeKey(sc.getSandbox(), sc.getLocale(), sc.getStructuredContentType().getName());
}
protected String buildNameKey(SandBox currentSandbox, Locale locale, String contentType, String contentName) {
StringBuffer key = new StringBuffer(contentType).append("-").append(contentName);
if (locale != null) {
key.append("-").append(locale.getLocaleCode());
}
if (currentSandbox != null) {
key.append("-").append(currentSandbox.getId());
}
return key.toString();
}
protected String buildTypeKey(SandBox currentSandbox, Locale locale, String contentType) {
StringBuffer key = new StringBuffer(contentType);
if (locale != null) {
key.append("-").append(locale.getLocaleCode());
}
if (currentSandbox != null) {
key.append("-").append(currentSandbox.getId());
}
return key.toString();
}
protected void addStructuredContentListToCache(String key, List<StructuredContentDTO> scDTOList) {
getStructuredContentCache().put(new Element(key, scDTOList));
}
protected List<StructuredContentDTO> getStructuredContentListFromCache(String key) {
Element scElement = getStructuredContentCache().get(key);
if (scElement != null) {
return (List<StructuredContentDTO>) scElement.getValue();
}
return null;
}
/**
* Call to evict an item from the cache.
* @param sc
*/
public void removeStructuredContentFromCache(StructuredContent sc) {
// Remove secure and non-secure instances of the page.
// Typically the page will be in one or the other if at all.
removeItemFromCache(buildNameKey(sc), buildTypeKey(sc));
}
/**
* Call to evict both secure and non-secure SC items matching
* the passed in key.
*
* @param nameKey
*/
@Override
public void removeItemFromCache(String nameKey, String typeKey) {
// Remove secure and non-secure instances of the structured content.
// Typically the structured content will be in one or the other if at all.
getStructuredContentCache().remove(nameKey+"-"+true);
getStructuredContentCache().remove(nameKey+"-"+false);
getStructuredContentCache().remove(typeKey+"-"+true);
getStructuredContentCache().remove(typeKey+"-"+false);
}
public List<ArchivedStructuredContentPublisher> getArchivedStructuredContentListeners() {
return archivedStructuredContentListeners;
}
public void setArchivedStructuredContentListeners(List<ArchivedStructuredContentPublisher> archivedStructuredContentListeners) {
this.archivedStructuredContentListeners = archivedStructuredContentListeners;
}
@Override
public boolean isAutomaticallyApproveAndPromoteStructuredContent() {
return automaticallyApproveAndPromoteStructuredContent;
}
@Override
public void setAutomaticallyApproveAndPromoteStructuredContent(boolean automaticallyApproveAndPromoteStructuredContent) {
this.automaticallyApproveAndPromoteStructuredContent = automaticallyApproveAndPromoteStructuredContent;
}
protected Locale findLanguageOnlyLocale(Locale locale) {
if (locale != null ) {
Locale languageOnlyLocale = localeService.findLocaleByCode(LocaleUtil.findLanguageCode(locale));
if (languageOnlyLocale != null) {
return languageOnlyLocale;
}
}
return locale;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_StructuredContentServiceImpl.java
|
1,152 |
public class OSQLMethodRemove extends OAbstractSQLMethod {
public static final String NAME = "remove";
public OSQLMethodRemove() {
super(NAME, 1, -1);
}
@Override
public Object execute(final OIdentifiable iCurrentRecord, final OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
if (iMethodParams != null && iMethodParams.length > 0 && iMethodParams[0] != null)
iMethodParams = OMultiValue.array(iMethodParams, Object.class, new OCallable<Object, Object>() {
@Override
public Object call(final Object iArgument) {
if (iArgument instanceof String && ((String) iArgument).startsWith("$"))
return iContext.getVariable((String) iArgument);
return iArgument;
}
});
for (Object o : iMethodParams) {
ioResult = OMultiValue.remove(ioResult, o, false);
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodRemove.java
|
367 |
public static class TestReducerFactory
implements ReducerFactory<String, Integer, Integer> {
public TestReducerFactory() {
}
@Override
public Reducer<String, Integer, Integer> newReducer(String key) {
return new TestReducer();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
348 |
transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node);
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
latch.countDown();
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_TransportNodesShutdownAction.java
|
1,091 |
public final class ODefaultSQLFunctionFactory implements OSQLFunctionFactory {
private static final Map<String, Object> FUNCTIONS = new HashMap<String, Object>();
static {
// MISC FUNCTIONS
FUNCTIONS.put(OSQLFunctionCoalesce.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionCoalesce());
FUNCTIONS.put(OSQLFunctionIf.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionIf());
FUNCTIONS.put(OSQLFunctionIfNull.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionIfNull());
FUNCTIONS.put(OSQLFunctionFormat.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionFormat());
FUNCTIONS.put(OSQLFunctionDate.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDate.class);
FUNCTIONS.put(OSQLFunctionSysdate.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSysdate.class);
FUNCTIONS.put(OSQLFunctionCount.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionCount.class);
FUNCTIONS.put(OSQLFunctionDocument.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDocument.class);
FUNCTIONS.put(OSQLFunctionDistinct.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDistinct.class);
FUNCTIONS.put(OSQLFunctionUnion.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionUnion.class);
FUNCTIONS.put(OSQLFunctionIntersect.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionIntersect.class);
FUNCTIONS.put(OSQLFunctionDifference.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDifference.class);
FUNCTIONS.put(OSQLFunctionFirst.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionFirst.class);
FUNCTIONS.put(OSQLFunctionLast.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionLast.class);
FUNCTIONS.put(OSQLFunctionList.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionList.class);
FUNCTIONS.put(OSQLFunctionSet.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSet.class);
FUNCTIONS.put(OSQLFunctionMap.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMap.class);
FUNCTIONS.put(OSQLFunctionEncode.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionEncode());
FUNCTIONS.put(OSQLFunctionDecode.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionDecode());
// MATH FUNCTIONS
FUNCTIONS.put(OSQLFunctionMin.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMin.class);
FUNCTIONS.put(OSQLFunctionMax.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMax.class);
FUNCTIONS.put(OSQLFunctionSum.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSum.class);
FUNCTIONS.put(OSQLFunctionAverage.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionAverage.class);
FUNCTIONS.put(OSQLFunctionEval.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionEval.class);
// GEO FUNCTIONS
FUNCTIONS.put(OSQLFunctionDistance.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionDistance());
}
@Override
public Set<String> getFunctionNames() {
return FUNCTIONS.keySet();
}
@Override
public boolean hasFunction(final String name) {
return FUNCTIONS.containsKey(name);
}
@Override
public OSQLFunction createFunction(final String name) {
final Object obj = FUNCTIONS.get(name);
if (obj == null)
throw new OCommandExecutionException("Unknowned function name :" + name);
if (obj instanceof OSQLFunction)
return (OSQLFunction) obj;
else {
// it's a class
final Class<?> clazz = (Class<?>) obj;
try {
return (OSQLFunction) clazz.newInstance();
} catch (Exception e) {
throw new OCommandExecutionException("Error in creation of function " + name
+ "(). Probably there is not an empty constructor or the constructor generates errors", e);
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_ODefaultSQLFunctionFactory.java
|
3,367 |
public final class BasicOperationScheduler {
public static final int TERMINATION_TIMEOUT_SECONDS = 3;
private final ILogger logger;
private final Node node;
private final ExecutionService executionService;
private final BasicOperationProcessor processor;
//the generic workqueues are shared between all generic operation threads, so that work can be stolen
//and a task gets processed as quickly as possible.
private final BlockingQueue genericWorkQueue = new LinkedBlockingQueue();
private final ConcurrentLinkedQueue genericPriorityWorkQueue = new ConcurrentLinkedQueue();
//all operations for specific partitions will be executed on these threads, .e.g map.put(key,value).
final OperationThread[] partitionOperationThreads;
//all operations that are not specific for a partition will be executed here, e.g heartbeat or map.size
final OperationThread[] genericOperationThreads;
//The genericOperationRandom is used when a generic operation is scheduled, and a generic OperationThread
//needs to be selected.
//todo:
//We could have a look at the ThreadLocalRandom, but it requires java 7. So some kind of reflection
//could to the trick to use something less painful.
private final Random genericOperationRandom = new Random();
private final ResponseThread responseThread;
private volatile boolean shutdown;
//The trigger is used when a priority message is send and offered to the operation-thread priority queue.
//To wakeup the thread, a priorityTaskTrigger is send to the regular blocking queue to wake up the operation
//thread.
private final Runnable priorityTaskTrigger = new Runnable() {
@Override
public void run() {
}
@Override
public String toString() {
return "TriggerTask";
}
};
public BasicOperationScheduler(Node node,
ExecutionService executionService,
BasicOperationProcessor processor) {
this.executionService = executionService;
this.logger = node.getLogger(BasicOperationScheduler.class);
this.node = node;
this.processor = processor;
this.genericOperationThreads = new OperationThread[getGenericOperationThreadCount()];
initOperationThreads(genericOperationThreads, new GenericOperationThreadFactory());
this.partitionOperationThreads = new OperationThread[getPartitionOperationThreadCount()];
initOperationThreads(partitionOperationThreads, new PartitionOperationThreadFactory());
this.responseThread = new ResponseThread();
responseThread.start();
logger.info("Starting with " + genericOperationThreads.length + " generic operation threads and "
+ partitionOperationThreads.length + " partition operation threads.");
}
private static void initOperationThreads(OperationThread[] operationThreads, ThreadFactory threadFactory) {
for (int threadId = 0; threadId < operationThreads.length; threadId++) {
OperationThread operationThread = (OperationThread) threadFactory.newThread(null);
operationThreads[threadId] = operationThread;
operationThread.start();
}
}
private int getGenericOperationThreadCount() {
int threadCount = node.getGroupProperties().GENERIC_OPERATION_THREAD_COUNT.getInteger();
if (threadCount <= 0) {
int coreSize = Runtime.getRuntime().availableProcessors();
threadCount = coreSize * 2;
}
return threadCount;
}
private int getPartitionOperationThreadCount() {
int threadCount = node.getGroupProperties().PARTITION_OPERATION_THREAD_COUNT.getInteger();
if (threadCount <= 0) {
int coreSize = Runtime.getRuntime().availableProcessors();
threadCount = coreSize * 2;
}
return threadCount;
}
int getPartitionIdForExecution(Operation op) {
return op instanceof PartitionAwareOperation ? op.getPartitionId() : -1;
}
boolean isAllowedToRunInCurrentThread(Operation op) {
return isAllowedToRunInCurrentThread(getPartitionIdForExecution(op));
}
boolean isInvocationAllowedFromCurrentThread(Operation op) {
return isInvocationAllowedFromCurrentThread(getPartitionIdForExecution(op));
}
boolean isAllowedToRunInCurrentThread(int partitionId) {
//todo: do we want to allow non partition specific tasks to be run on a partitionSpecific operation thread?
if (partitionId < 0) {
return true;
}
Thread currentThread = Thread.currentThread();
//we are only allowed to execute partition aware actions on an OperationThread.
if (!(currentThread instanceof OperationThread)) {
return false;
}
OperationThread operationThread = (OperationThread) currentThread;
//if the operationThread is a not a partition specific operation thread, then we are not allowed to execute
//partition specific operations on it.
if (!operationThread.isPartitionSpecific) {
return false;
}
//so it is an partition operation thread, now we need to make sure that this operation thread is allowed
//to execute operations for this particular partitionId.
int threadId = operationThread.threadId;
return toPartitionThreadIndex(partitionId) == threadId;
}
boolean isInvocationAllowedFromCurrentThread(int partitionId) {
Thread currentThread = Thread.currentThread();
if (currentThread instanceof OperationThread) {
if (partitionId > -1) {
//todo: we need to check for isPartitionSpecific
int threadId = ((OperationThread) currentThread).threadId;
return toPartitionThreadIndex(partitionId) == threadId;
}
return true;
}
return true;
}
public int getOperationExecutorQueueSize() {
int size = 0;
for (OperationThread t : partitionOperationThreads) {
size += t.workQueue.size();
}
size += genericWorkQueue.size();
return size;
}
public int getPriorityOperationExecutorQueueSize() {
int size = 0;
for (OperationThread t : partitionOperationThreads) {
size += t.priorityWorkQueue.size();
}
size += genericPriorityWorkQueue.size();
return size;
}
public int getResponseQueueSize() {
return responseThread.workQueue.size();
}
public void execute(Operation op) {
String executorName = op.getExecutorName();
if (executorName == null) {
int partitionId = getPartitionIdForExecution(op);
boolean hasPriority = op.isUrgent();
execute(op, partitionId, hasPriority);
} else {
executeOnExternalExecutor(op, executorName);
}
}
private void executeOnExternalExecutor(Operation op, String executorName) {
ExecutorService executor = executionService.getExecutor(executorName);
if (executor == null) {
throw new IllegalStateException("Could not found executor with name: " + executorName);
}
if (op instanceof PartitionAware) {
throw new IllegalStateException("PartitionAwareOperation " + op + " can't be executed on a " +
"custom executor with name: " + executorName);
}
if (op instanceof UrgentSystemOperation) {
throw new IllegalStateException("UrgentSystemOperation " + op + " can't be executed on a custom " +
"executor with name: " + executorName);
}
executor.execute(new LocalOperationProcessor(op));
}
public void execute(Packet packet) {
try {
if (packet.isHeaderSet(Packet.HEADER_RESPONSE)) {
//it is an response packet.
responseThread.workQueue.add(packet);
} else {
//it is an must be an operation packet
int partitionId = packet.getPartitionId();
boolean hasPriority = packet.isUrgent();
execute(packet, partitionId, hasPriority);
}
} catch (RejectedExecutionException e) {
if (node.nodeEngine.isActive()) {
throw e;
}
}
}
private void execute(Object task, int partitionId, boolean priority) {
if (task == null) {
throw new NullPointerException();
}
BlockingQueue workQueue;
Queue priorityWorkQueue;
if (partitionId < 0) {
workQueue = genericWorkQueue;
priorityWorkQueue = genericPriorityWorkQueue;
} else {
OperationThread partitionOperationThread = partitionOperationThreads[toPartitionThreadIndex(partitionId)];
workQueue = partitionOperationThread.workQueue;
priorityWorkQueue = partitionOperationThread.priorityWorkQueue;
}
if (priority) {
offerWork(priorityWorkQueue, task);
offerWork(workQueue, priorityTaskTrigger);
} else {
offerWork(workQueue, task);
}
}
private void offerWork(Queue queue, Object task) {
//in 3.3 we are going to apply backpressure on overload and then we are going to do something
//with the return values of the offer methods.
//Currently the queues are all unbound, so this can't happen anyway.
boolean offer = queue.offer(task);
if (!offer) {
logger.severe("Failed to offer " + task + " to BasicOperationScheduler due to overload");
}
}
private int toPartitionThreadIndex(int partitionId) {
return partitionId % partitionOperationThreads.length;
}
public void shutdown() {
shutdown = true;
interruptAll(partitionOperationThreads);
interruptAll(genericOperationThreads);
awaitTermination(partitionOperationThreads);
awaitTermination(genericOperationThreads);
}
private static void interruptAll(OperationThread[] operationThreads) {
for (OperationThread thread : operationThreads) {
thread.interrupt();
}
}
private static void awaitTermination(OperationThread[] operationThreads) {
for (OperationThread thread : operationThreads) {
try {
thread.awaitTermination(TERMINATION_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
}
@Override
public String toString() {
return "BasicOperationScheduler{"
+ "node=" + node.getThisAddress()
+ '}';
}
private class GenericOperationThreadFactory implements ThreadFactory {
private int threadId;
@Override
public OperationThread newThread(Runnable ignore) {
String threadName = node.getThreadPoolNamePrefix("generic-operation") + threadId;
OperationThread thread = new OperationThread(threadName, false, threadId, genericWorkQueue,
genericPriorityWorkQueue);
threadId++;
return thread;
}
}
private class PartitionOperationThreadFactory implements ThreadFactory {
private int threadId;
@Override
public Thread newThread(Runnable ignore) {
String threadName = node.getThreadPoolNamePrefix("partition-operation") + threadId;
//each partition operation thread, has its own workqueues because operations are partition specific and can't
//be executed by other threads.
LinkedBlockingQueue workQueue = new LinkedBlockingQueue();
ConcurrentLinkedQueue priorityWorkQueue = new ConcurrentLinkedQueue();
OperationThread thread = new OperationThread(threadName, true, threadId, workQueue, priorityWorkQueue);
threadId++;
return thread;
}
}
final class OperationThread extends Thread {
private final int threadId;
private final boolean isPartitionSpecific;
private final BlockingQueue workQueue;
private final Queue priorityWorkQueue;
public OperationThread(String name, boolean isPartitionSpecific,
int threadId, BlockingQueue workQueue, Queue priorityWorkQueue) {
super(node.threadGroup, name);
setContextClassLoader(node.getConfigClassLoader());
this.isPartitionSpecific = isPartitionSpecific;
this.workQueue = workQueue;
this.priorityWorkQueue = priorityWorkQueue;
this.threadId = threadId;
}
@Override
public void run() {
try {
doRun();
} catch (OutOfMemoryError e) {
onOutOfMemory(e);
} catch (Throwable t) {
logger.severe(t);
}
}
private void doRun() {
for (; ; ) {
Object task;
try {
task = workQueue.take();
} catch (InterruptedException e) {
if (shutdown) {
return;
}
continue;
}
if (shutdown) {
return;
}
processPriorityMessages();
process(task);
}
}
private void process(Object task) {
try {
processor.process(task);
} catch (Exception e) {
logger.severe("Failed to process task: " + task + " on partitionThread:" + getName());
}
}
private void processPriorityMessages() {
for (; ; ) {
Object task = priorityWorkQueue.poll();
if (task == null) {
return;
}
process(task);
}
}
public void awaitTermination(int timeout, TimeUnit unit) throws InterruptedException {
join(unit.toMillis(timeout));
}
}
private class ResponseThread extends Thread {
private final BlockingQueue<Packet> workQueue = new LinkedBlockingQueue<Packet>();
public ResponseThread() {
super(node.threadGroup, node.getThreadNamePrefix("response"));
setContextClassLoader(node.getConfigClassLoader());
}
public void run() {
try {
doRun();
} catch (OutOfMemoryError e) {
onOutOfMemory(e);
} catch (Throwable t) {
logger.severe(t);
}
}
private void doRun() {
for (; ; ) {
Object task;
try {
task = workQueue.take();
} catch (InterruptedException e) {
if (shutdown) {
return;
}
continue;
}
if (shutdown) {
return;
}
process(task);
}
}
private void process(Object task) {
try {
processor.process(task);
} catch (Exception e) {
logger.severe("Failed to process task: " + task + " on partitionThread:" + getName());
}
}
}
/**
* Process the operation that has been send locally to this OperationService.
*/
private class LocalOperationProcessor implements Runnable {
private final Operation op;
private LocalOperationProcessor(Operation op) {
this.op = op;
}
@Override
public void run() {
processor.process(op);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_BasicOperationScheduler.java
|
23 |
{
@Override
public boolean matchesSafely( LogEntry.Done done )
{
return done != null && done.getIdentifier() == identifier;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "Done[%d]", identifier ) );
}
};
| 1no label
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogMatchers.java
|
4,655 |
public class PercolateContext extends SearchContext {
public boolean limit;
public int size;
public boolean doSort;
public byte percolatorTypeId;
private boolean trackScores;
private final PercolateShardRequest request;
private final SearchShardTarget searchShardTarget;
private final IndexService indexService;
private final IndexFieldDataService fieldDataService;
private final IndexShard indexShard;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
private final ConcurrentMap<HashedBytesRef, Query> percolateQueries;
private String[] types;
private Engine.Searcher docEngineSearcher;
private Engine.Searcher engineSearcher;
private ContextIndexSearcher searcher;
private SearchContextHighlight highlight;
private SearchLookup searchLookup;
private ParsedQuery parsedQuery;
private Query query;
private boolean queryRewritten;
private Query percolateQuery;
private FetchSubPhase.HitContext hitContext;
private SearchContextFacets facets;
private SearchContextAggregations aggregations;
private QuerySearchResult querySearchResult;
private Sort sort;
public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, IndexService indexService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
this.request = request;
this.indexShard = indexShard;
this.indexService = indexService;
this.fieldDataService = indexService.fieldData();
this.searchShardTarget = searchShardTarget;
this.percolateQueries = indexShard.percolateRegistry().percolateQueries();
this.types = new String[]{request.documentType()};
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
this.querySearchResult = new QuerySearchResult(0, searchShardTarget);
this.engineSearcher = indexShard.acquireSearcher("percolate");
this.searcher = new ContextIndexSearcher(this, engineSearcher);
}
public void initialize(final MemoryIndex memoryIndex, ParsedDocument parsedDocument) {
final IndexSearcher docSearcher = memoryIndex.createSearcher();
final IndexReader topLevelReader = docSearcher.getIndexReader();
AtomicReaderContext readerContext = topLevelReader.leaves().get(0);
docEngineSearcher = new Engine.Searcher() {
@Override
public String source() {
return "percolate";
}
@Override
public IndexReader reader() {
return topLevelReader;
}
@Override
public IndexSearcher searcher() {
return docSearcher;
}
@Override
public boolean release() throws ElasticsearchException {
try {
docSearcher.getIndexReader().close();
memoryIndex.reset();
} catch (IOException e) {
throw new ElasticsearchException("failed to close percolator in-memory index", e);
}
return true;
}
};
lookup().setNextReader(readerContext);
lookup().setNextDocId(0);
lookup().source().setNextSource(parsedDocument.source());
Map<String, SearchHitField> fields = new HashMap<String, SearchHitField>();
for (IndexableField field : parsedDocument.rootDoc().getFields()) {
fields.put(field.name(), new InternalSearchHitField(field.name(), ImmutableList.of()));
}
hitContext = new FetchSubPhase.HitContext();
hitContext.reset(new InternalSearchHit(0, "unknown", new StringText(request.documentType()), fields), readerContext, 0, topLevelReader, 0, new JustSourceFieldsVisitor());
}
public IndexSearcher docSearcher() {
return docEngineSearcher.searcher();
}
public IndexShard indexShard() {
return indexShard;
}
public IndexService indexService() {
return indexService;
}
public ConcurrentMap<HashedBytesRef, Query> percolateQueries() {
return percolateQueries;
}
public Query percolateQuery() {
return percolateQuery;
}
public void percolateQuery(Query percolateQuery) {
this.percolateQuery = percolateQuery;
}
public FetchSubPhase.HitContext hitContext() {
return hitContext;
}
@Override
public SearchContextHighlight highlight() {
return highlight;
}
@Override
public void highlight(SearchContextHighlight highlight) {
this.highlight = highlight;
}
@Override
public SearchShardTarget shardTarget() {
return searchShardTarget;
}
@Override
public SearchLookup lookup() {
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService(), fieldData(), types);
}
return searchLookup;
}
@Override
public boolean release() throws ElasticsearchException {
try {
if (docEngineSearcher != null) {
IndexReader indexReader = docEngineSearcher.reader();
fieldDataService.clear(indexReader);
indexService.cache().clear(indexReader);
return docEngineSearcher.release();
} else {
return false;
}
} finally {
engineSearcher.release();
}
}
@Override
public MapperService mapperService() {
return indexService.mapperService();
}
@Override
public SearchContext parsedQuery(ParsedQuery query) {
this.parsedQuery = query;
this.query = query.query();
this.queryRewritten = false;
return this;
}
@Override
public ParsedQuery parsedQuery() {
return parsedQuery;
}
@Override
public Query query() {
return query;
}
@Override
public boolean queryRewritten() {
return queryRewritten;
}
@Override
public SearchContext updateRewriteQuery(Query rewriteQuery) {
queryRewritten = true;
query = rewriteQuery;
return this;
}
@Override
public String[] types() {
return types;
}
public void types(String[] types) {
this.types = types;
searchLookup = new SearchLookup(mapperService(), fieldData(), types);
}
@Override
public IndexFieldDataService fieldData() {
return fieldDataService;
}
@Override
public SearchContextAggregations aggregations() {
return aggregations;
}
@Override
public SearchContext aggregations(SearchContextAggregations aggregations) {
this.aggregations = aggregations;
return this;
}
@Override
public SearchContextFacets facets() {
return facets;
}
@Override
public SearchContext facets(SearchContextFacets facets) {
this.facets = facets;
return this;
}
// Unused:
@Override
public boolean clearAndRelease() {
throw new UnsupportedOperationException();
}
@Override
public void preProcess() {
throw new UnsupportedOperationException();
}
@Override
public Filter searchFilter(String[] types) {
throw new UnsupportedOperationException();
}
@Override
public long id() {
throw new UnsupportedOperationException();
}
@Override
public String source() {
throw new UnsupportedOperationException();
}
@Override
public ShardSearchRequest request() {
throw new UnsupportedOperationException();
}
@Override
public SearchType searchType() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext searchType(SearchType searchType) {
throw new UnsupportedOperationException();
}
@Override
public int numberOfShards() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasTypes() {
throw new UnsupportedOperationException();
}
@Override
public float queryBoost() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext queryBoost(float queryBoost) {
throw new UnsupportedOperationException();
}
@Override
public long nowInMillis() {
throw new UnsupportedOperationException();
}
@Override
public Scroll scroll() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext scroll(Scroll scroll) {
throw new UnsupportedOperationException();
}
@Override
public SuggestionSearchContext suggest() {
throw new UnsupportedOperationException();
}
@Override
public void suggest(SuggestionSearchContext suggest) {
throw new UnsupportedOperationException();
}
@Override
public List<RescoreSearchContext> rescore() {
throw new UnsupportedOperationException();
}
@Override
public void addRescore(RescoreSearchContext rescore) {
throw new UnsupportedOperationException();
}
@Override
public boolean hasFieldDataFields() {
throw new UnsupportedOperationException();
}
@Override
public FieldDataFieldsContext fieldDataFields() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasScriptFields() {
throw new UnsupportedOperationException();
}
@Override
public ScriptFieldsContext scriptFields() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasPartialFields() {
throw new UnsupportedOperationException();
}
@Override
public PartialFieldsContext partialFields() {
throw new UnsupportedOperationException();
}
@Override
public boolean sourceRequested() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasFetchSourceContext() {
throw new UnsupportedOperationException();
}
@Override
public FetchSourceContext fetchSourceContext() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
throw new UnsupportedOperationException();
}
@Override
public ContextIndexSearcher searcher() {
return searcher;
}
@Override
public AnalysisService analysisService() {
throw new UnsupportedOperationException();
}
@Override
public IndexQueryParserService queryParserService() {
throw new UnsupportedOperationException();
}
@Override
public SimilarityService similarityService() {
throw new UnsupportedOperationException();
}
@Override
public ScriptService scriptService() {
throw new UnsupportedOperationException();
}
@Override
public CacheRecycler cacheRecycler() {
return cacheRecycler;
}
@Override
public PageCacheRecycler pageCacheRecycler() {
return pageCacheRecycler;
}
@Override
public FilterCache filterCache() {
throw new UnsupportedOperationException();
}
@Override
public DocSetCache docSetCache() {
throw new UnsupportedOperationException();
}
@Override
public IdCache idCache() {
throw new UnsupportedOperationException();
}
@Override
public long timeoutInMillis() {
return -1;
}
@Override
public void timeoutInMillis(long timeoutInMillis) {
throw new UnsupportedOperationException();
}
@Override
public SearchContext minimumScore(float minimumScore) {
throw new UnsupportedOperationException();
}
@Override
public Float minimumScore() {
return null;
}
@Override
public SearchContext sort(Sort sort) {
this.sort = sort;
return this;
}
@Override
public Sort sort() {
return sort;
}
@Override
public SearchContext trackScores(boolean trackScores) {
this.trackScores = trackScores;
return this;
}
@Override
public boolean trackScores() {
return trackScores;
}
@Override
public SearchContext parsedPostFilter(ParsedFilter postFilter) {
throw new UnsupportedOperationException();
}
@Override
public ParsedFilter parsedPostFilter() {
return null;
}
@Override
public Filter aliasFilter() {
throw new UnsupportedOperationException();
}
@Override
public int from() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext from(int from) {
throw new UnsupportedOperationException();
}
@Override
public int size() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext size(int size) {
throw new UnsupportedOperationException();
}
@Override
public boolean hasFieldNames() {
throw new UnsupportedOperationException();
}
@Override
public List<String> fieldNames() {
throw new UnsupportedOperationException();
}
@Override
public void emptyFieldNames() {
throw new UnsupportedOperationException();
}
@Override
public boolean explain() {
throw new UnsupportedOperationException();
}
@Override
public void explain(boolean explain) {
throw new UnsupportedOperationException();
}
@Override
public List<String> groupStats() {
throw new UnsupportedOperationException();
}
@Override
public void groupStats(List<String> groupStats) {
throw new UnsupportedOperationException();
}
@Override
public boolean version() {
throw new UnsupportedOperationException();
}
@Override
public void version(boolean version) {
throw new UnsupportedOperationException();
}
@Override
public int[] docIdsToLoad() {
throw new UnsupportedOperationException();
}
@Override
public int docIdsToLoadFrom() {
throw new UnsupportedOperationException();
}
@Override
public int docIdsToLoadSize() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
throw new UnsupportedOperationException();
}
@Override
public void accessed(long accessTime) {
throw new UnsupportedOperationException();
}
@Override
public long lastAccessTime() {
throw new UnsupportedOperationException();
}
@Override
public long keepAlive() {
throw new UnsupportedOperationException();
}
@Override
public void keepAlive(long keepAlive) {
throw new UnsupportedOperationException();
}
@Override
public DfsSearchResult dfsResult() {
throw new UnsupportedOperationException();
}
@Override
public QuerySearchResult queryResult() {
return querySearchResult;
}
@Override
public FetchSearchResult fetchResult() {
throw new UnsupportedOperationException();
}
@Override
public void addReleasable(Releasable releasable) {
throw new UnsupportedOperationException();
}
@Override
public void clearReleasables() {
throw new UnsupportedOperationException();
}
@Override
public ScanContext scanContext() {
throw new UnsupportedOperationException();
}
@Override
public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
throw new UnsupportedOperationException();
}
@Override
public FieldMappers smartNameFieldMappers(String name) {
throw new UnsupportedOperationException();
}
@Override
public FieldMapper smartNameFieldMapper(String name) {
return mapperService().smartNameFieldMapper(name, types);
}
@Override
public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
throw new UnsupportedOperationException();
}
}
| 1no label
|
src_main_java_org_elasticsearch_percolator_PercolateContext.java
|
3,705 |
public class VersionFieldMapper extends AbstractFieldMapper<Long> implements InternalMapper, RootMapper {
public static final String NAME = "_version";
public static final String CONTENT_TYPE = "_version";
public static class Defaults {
public static final String NAME = VersionFieldMapper.NAME;
public static final float BOOST = 1.0f;
public static final FieldType FIELD_TYPE = NumericDocValuesField.TYPE;
}
public static class Builder extends Mapper.Builder<Builder, VersionFieldMapper> {
DocValuesFormatProvider docValuesFormat;
public Builder() {
super(Defaults.NAME);
}
@Override
public VersionFieldMapper build(BuilderContext context) {
return new VersionFieldMapper(docValuesFormat);
}
public Builder docValuesFormat(DocValuesFormatProvider docValuesFormat) {
this.docValuesFormat = docValuesFormat;
return this;
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder = version();
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals(DOC_VALUES_FORMAT)) {
String docValuesFormatName = fieldNode.toString();
builder.docValuesFormat(parserContext.docValuesFormatService().get(docValuesFormatName));
}
}
return builder;
}
}
private final ThreadLocal<Field> fieldCache = new ThreadLocal<Field>() {
@Override
protected Field initialValue() {
return new NumericDocValuesField(NAME, -1L);
}
};
public VersionFieldMapper() {
this(null);
}
VersionFieldMapper(DocValuesFormatProvider docValuesFormat) {
super(new Names(NAME, NAME, NAME, NAME), Defaults.BOOST, Defaults.FIELD_TYPE, null, null, null, null, docValuesFormat, null, null, null, ImmutableSettings.EMPTY);
}
@Override
protected String defaultDocValuesFormat() {
return "disk";
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
// see UidFieldMapper.parseCreateField
final Field version = fieldCache.get();
context.version(version);
fields.add(version);
}
@Override
public void parse(ParseContext context) throws IOException {
// _version added in preparse
}
@Override
public Long value(Object value) {
if (value == null || (value instanceof Long)) {
return (Long) value;
} else {
return Long.parseLong(value.toString());
}
}
@Override
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with version=0 so that Lucene doesn't write a Bitset for documents
// that don't have the field
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new NumericDocValuesField(NAME, 0L));
}
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public boolean includeInObject() {
return false;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("long");
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
if (!includeDefaults && (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (docValuesFormat != null) {
if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
}
} else {
String format = defaultDocValuesFormat();
if (format == null) {
format = DocValuesFormatService.DEFAULT_FORMAT;
}
builder.field(DOC_VALUES_FORMAT, format);
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
if (mergeContext.mergeFlags().simulate()) {
return;
}
AbstractFieldMapper<?> fieldMergeWith = (AbstractFieldMapper<?>) mergeWith;
if (fieldMergeWith.docValuesFormatProvider() != null) {
this.docValuesFormat = fieldMergeWith.docValuesFormatProvider();
}
}
@Override
public void close() {
fieldCache.remove();
}
@Override
public boolean hasDocValues() {
return true;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_VersionFieldMapper.java
|
164 |
private class CallbackImpl implements Callback<Object> {
private final ClientEndpoint endpoint;
public CallbackImpl(ClientEndpoint endpoint) {
this.endpoint = endpoint;
}
@Override
public void notify(Object object) {
endpoint.sendResponse(filter(object), getCallId());
afterResponse();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_PartitionClientRequest.java
|
3,701 |
public class UidFieldMapper extends AbstractFieldMapper<Uid> implements InternalMapper, RootMapper {
public static final String NAME = "_uid";
public static final String CONTENT_TYPE = "_uid";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = UidFieldMapper.NAME;
public static final String INDEX_NAME = UidFieldMapper.NAME;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
public static final FieldType NESTED_FIELD_TYPE;
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setStored(true);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
FIELD_TYPE.freeze();
NESTED_FIELD_TYPE = new FieldType(FIELD_TYPE);
NESTED_FIELD_TYPE.setStored(false);
NESTED_FIELD_TYPE.freeze();
}
}
public static class Builder extends AbstractFieldMapper.Builder<Builder, UidFieldMapper> {
public Builder() {
super(Defaults.NAME, Defaults.FIELD_TYPE);
indexName = Defaults.INDEX_NAME;
}
@Override
public UidFieldMapper build(BuilderContext context) {
return new UidFieldMapper(name, indexName, docValues, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder = uid();
parseField(builder, builder.name, node, parserContext);
return builder;
}
}
public UidFieldMapper() {
this(Defaults.NAME);
}
protected UidFieldMapper(String name) {
this(name, name, null, null, null, null, ImmutableSettings.EMPTY);
}
protected UidFieldMapper(String name, String indexName, Boolean docValues, PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), docValues,
Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsFormat, docValuesFormat, null, null, fieldDataSettings, indexSettings);
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("string");
}
@Override
protected String defaultPostingFormat() {
return "default";
}
@Override
public void preParse(ParseContext context) throws IOException {
// if we have the id provided, fill it, and parse now
if (context.sourceToParse().id() != null) {
context.id(context.sourceToParse().id());
super.parse(context);
}
}
@Override
public void postParse(ParseContext context) throws IOException {
if (context.id() == null && !context.sourceToParse().flyweight()) {
throw new MapperParsingException("No id found while parsing the content source");
}
// if we did not have the id as part of the sourceToParse, then we need to parse it here
// it would have been filled in the _id parse phase
if (context.sourceToParse().id() == null) {
super.parse(context);
// since we did not have the uid in the pre phase, we did not add it automatically to the nested docs
// as they were created we need to make sure we add it to all the nested docs...
if (context.docs().size() > 1) {
final IndexableField uidField = context.rootDoc().getField(UidFieldMapper.NAME);
assert uidField != null;
// we need to go over the docs and add it...
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), Defaults.NESTED_FIELD_TYPE));
}
}
}
}
@Override
public void parse(ParseContext context) throws IOException {
// nothing to do here, we either do it in post parse, or in pre parse.
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public boolean includeInObject() {
return false;
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
Field uid = new Field(NAME, Uid.createUid(context.stringBuilder(), context.type(), context.id()), Defaults.FIELD_TYPE);
context.uid(uid);
fields.add(uid);
if (hasDocValues()) {
fields.add(new BinaryDocValuesField(NAME, new BytesRef(uid.stringValue())));
}
}
@Override
public Uid value(Object value) {
if (value == null) {
return null;
}
return Uid.createUid(value.toString());
}
public Term term(String type, String id) {
return term(Uid.createUid(type, id));
}
public Term term(String uid) {
return names().createIndexNameTerm(uid);
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// if defaults, don't output
if (!includeDefaults && customFieldDataSettings == null
&& (postingsFormat == null || postingsFormat.name().equals(defaultPostingFormat()))
&& (docValuesFormat == null || docValuesFormat.name().equals(defaultDocValuesFormat()))) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (postingsFormat != null) {
if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
builder.field("postings_format", postingsFormat.name());
}
} else if (includeDefaults) {
String format = defaultPostingFormat();
if (format == null) {
format = PostingsFormatService.DEFAULT_FORMAT;
}
builder.field("postings_format", format);
}
if (docValuesFormat != null) {
if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
}
} else if (includeDefaults) {
String format = defaultDocValuesFormat();
if (format == null) {
format = DocValuesFormatService.DEFAULT_FORMAT;
}
builder.field(DOC_VALUES_FORMAT, format);
}
if (customFieldDataSettings != null) {
builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
} else if (includeDefaults) {
builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
AbstractFieldMapper<?> fieldMergeWith = (AbstractFieldMapper<?>) mergeWith;
// do nothing here, no merging, but also no exception
if (!mergeContext.mergeFlags().simulate()) {
// apply changeable values
if (fieldMergeWith.postingsFormatProvider() != null) {
this.postingsFormat = fieldMergeWith.postingsFormatProvider();
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_UidFieldMapper.java
|
1,498 |
public class AllocationService extends AbstractComponent {
private final AllocationDeciders allocationDeciders;
private final ClusterInfoService clusterInfoService;
private final ShardsAllocators shardsAllocators;
@Inject
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
super(settings);
this.allocationDeciders = allocationDeciders;
this.shardsAllocators = shardsAllocators;
this.clusterInfoService = clusterInfoService;
}
/**
* Applies the started shards. Note, shards can be called several times within this method.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.</p>
*/
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards) {
return applyStartedShards(clusterState, startedShards, true);
}
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards, boolean withReroute) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
boolean changed = applyStartedShards(routingNodes, startedShards);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
shardsAllocators.applyStartedShards(allocation);
if (withReroute) {
reroute(allocation);
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
return applyFailedShards(clusterState, ImmutableList.of(failedShard));
}
/**
* Applies the failed shards. Note, shards can be called several times within this method.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.</p>
*/
public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List<ShardRouting> failedShards) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
boolean changed = false;
for (ShardRouting failedShard : failedShards) {
changed |= applyFailedShard(allocation, failedShard, true);
}
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
shardsAllocators.applyFailedShards(allocation);
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
return reroute(clusterState, commands, false);
}
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean debug) throws ElasticsearchException {
RoutingNodes routingNodes = clusterState.routingNodes();
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
// we ignore disable allocation, because commands are explicit
allocation.ignoreDisable(true);
commands.execute(allocation);
// we revert the ignore disable flag, since when rerouting, we want the original setting to take place
allocation.ignoreDisable(false);
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
/**
* Reroutes the routing table based on the live nodes.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState) {
return reroute(clusterState, false);
}
/**
* Reroutes the routing table based on the live nodes.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
/**
* Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
* make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
* them.
*/
public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState) {
return rerouteWithNoReassign(clusterState, false);
}
/**
* Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
* make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
* them.
*/
public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState, boolean debug) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
boolean changed = false;
// first, clear from the shards any node id they used to belong to that is now dead
changed |= deassociateDeadNodes(allocation);
// create a sorted list of from nodes with least number of shards to the maximum ones
applyNewNodes(allocation);
// elect primaries *before* allocating unassigned, so backups of primaries that failed
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
private boolean reroute(RoutingAllocation allocation) {
boolean changed = false;
// first, clear from the shards any node id they used to belong to that is now dead
changed |= deassociateDeadNodes(allocation);
// create a sorted list of from nodes with least number of shards to the maximum ones
applyNewNodes(allocation);
// elect primaries *before* allocating unassigned, so backups of primaries that failed
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().hasUnassigned()) {
changed |= shardsAllocators.allocateUnassigned(allocation);
// elect primaries again, in case this is needed with unassigned allocation
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
}
// move shards that no longer can be allocated
changed |= moveShards(allocation);
// rebalance
changed |= shardsAllocators.rebalance(allocation);
assert RoutingNodes.assertShardStats(allocation.routingNodes());
return changed;
}
private boolean moveShards(RoutingAllocation allocation) {
boolean changed = false;
// create a copy of the shards interleaving between nodes, and check if they can remain
List<MutableShardRouting> shards = new ArrayList<MutableShardRouting>();
int index = 0;
boolean found = true;
final RoutingNodes routingNodes = allocation.routingNodes();
while (found) {
found = false;
for (RoutingNode routingNode : routingNodes) {
if (index >= routingNode.size()) {
continue;
}
found = true;
shards.add(routingNode.get(index));
}
index++;
}
for (int i = 0; i < shards.size(); i++) {
MutableShardRouting shardRouting = shards.get(i);
// we can only move started shards...
if (!shardRouting.started()) {
continue;
}
final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
if (!moved) {
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
} else {
assert RoutingNodes.assertShardStats(allocation.routingNodes());
changed = true;
}
}
}
return changed;
}
private boolean electPrimariesAndUnassignDanglingReplicas(RoutingAllocation allocation) {
boolean changed = false;
RoutingNodes routingNodes = allocation.routingNodes();
if (!routingNodes.hasUnassignedPrimaries()) {
// move out if we don't have unassigned primaries
return changed;
}
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
MutableShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
if (candidate != null) {
routingNodes.swapPrimaryFlag(shardEntry, candidate);
if (candidate.relocatingNodeId() != null) {
changed = true;
// its also relocating, make sure to move the other routing to primary
RoutingNode node = routingNodes.node(candidate.relocatingNodeId());
if (node != null) {
for (MutableShardRouting shardRouting : node) {
if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) {
routingNodes.swapPrimaryFlag(shardRouting);
break;
}
}
}
}
}
}
}
// go over and remove dangling replicas that are initializing, but we couldn't elect primary ones...
List<ShardRouting> shardsToFail = null;
if (routingNodes.hasUnassignedPrimaries()) {
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
for(MutableShardRouting routing : routingNodes.assignedShards(shardEntry)) {
if (!routing.primary()) {
changed = true;
if (shardsToFail == null) {
shardsToFail = new ArrayList<ShardRouting>();
}
shardsToFail.add(routing);
}
}
}
}
if (shardsToFail != null) {
for (ShardRouting shardToFail : shardsToFail) {
applyFailedShard(allocation, shardToFail, false);
}
}
}
return changed;
}
/**
* Applies the new nodes to the routing nodes and returns them (just the
* new nodes);
*/
private void applyNewNodes(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
DiscoveryNode node = cursor.value;
if (!routingNodes.isKnown(node)) {
routingNodes.addNode(node);
}
}
}
private boolean deassociateDeadNodes(RoutingAllocation allocation) {
boolean changed = false;
for (RoutingNodes.RoutingNodesIterator it = allocation.routingNodes().nodes(); it.hasNext(); ) {
RoutingNode node = it.next();
if (allocation.nodes().dataNodes().containsKey(node.nodeId())) {
// its a live node, continue
continue;
}
changed = true;
// now, go over all the shards routing on the node, and fail them
for (MutableShardRouting shardRouting : node.copyShards()) {
applyFailedShard(allocation, shardRouting, false);
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
// since it relies on the fact that the RoutingNode exists in the list of nodes
it.remove();
}
return changed;
}
private boolean applyStartedShards(RoutingNodes routingNodes, Iterable<? extends ShardRouting> startedShardEntries) {
boolean dirty = false;
// apply shards might be called several times with the same shard, ignore it
for (ShardRouting startedShard : startedShardEntries) {
assert startedShard.state() == INITIALIZING;
// retrieve the relocating node id before calling startedShard().
String relocatingNodeId = null;
RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId());
if (currentRoutingNode != null) {
for (MutableShardRouting shard : currentRoutingNode) {
if (shard.shardId().equals(startedShard.shardId())) {
relocatingNodeId = shard.relocatingNodeId();
if (!shard.started()) {
dirty = true;
routingNodes.started(shard);
}
break;
}
}
}
// startedShard is the current state of the shard (post relocation for example)
// this means that after relocation, the state will be started and the currentNodeId will be
// the node we relocated to
if (relocatingNodeId == null) {
continue;
}
RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(relocatingNodeId);
if (sourceRoutingNode != null) {
while (sourceRoutingNode.hasNext()) {
MutableShardRouting shard = sourceRoutingNode.next();
if (shard.shardId().equals(startedShard.shardId())) {
if (shard.relocating()) {
dirty = true;
sourceRoutingNode.remove();
break;
}
}
}
}
}
return dirty;
}
/**
* Applies the relevant logic to handle a failed shard. Returns <tt>true</tt> if changes happened that
* require relocation.
*/
private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList) {
// create a copy of the failed shard, since we assume we can change possible references to it without
// changing the state of failed shard
failedShard = new ImmutableShardRouting(failedShard);
IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
if (indexRoutingTable == null) {
return false;
}
RoutingNodes routingNodes = allocation.routingNodes();
if (failedShard.relocatingNodeId() != null) {
// the shard is relocating, either in initializing (recovery from another node) or relocating (moving to another node)
if (failedShard.state() == INITIALIZING) {
// the shard is initializing and recovering from another node
boolean dirty = false;
// first, we need to cancel the current node that is being initialized
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (initializingNode != null) {
while(initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
initializingNode.remove();
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
break;
}
}
}
if (dirty) {
// now, find the node that we are relocating *from*, and cancel its relocation
RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId());
if (relocatingFromNode != null) {
for (MutableShardRouting shardRouting : relocatingFromNode) {
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.relocating()) {
dirty = true;
routingNodes.cancelRelocation(shardRouting);
break;
}
}
}
}
return dirty;
} else if (failedShard.state() == RELOCATING) {
boolean dirty = false;
// the shard is relocating, meaning its the source the shard is relocating from
// first, we need to cancel the current relocation from the current node
// now, find the node that we are recovering from, cancel the relocation, remove it from the node
// and add it to the unassigned shards list...
RoutingNodes.RoutingNodeIterator relocatingFromNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (relocatingFromNode != null) {
while(relocatingFromNode.hasNext()) {
MutableShardRouting shardRouting = relocatingFromNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
relocatingFromNode.remove();
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(),
null, failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
break;
}
}
}
if (dirty) {
// next, we need to find the target initializing shard that is recovering from, and remove it...
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId());
if (initializingNode != null) {
while (initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.state() == INITIALIZING) {
dirty = true;
initializingNode.remove();
}
}
}
}
return dirty;
} else {
throw new ElasticsearchIllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard);
}
} else {
// the shard is not relocating, its either started, or initializing, just cancel it and move on...
boolean dirty = false;
RoutingNodes.RoutingNodeIterator node = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (node != null) {
while(node.hasNext()) {
MutableShardRouting shardRouting = node.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
node.remove();
// move all the shards matching the failed shard to the end of the unassigned list
// so we give a chance for other allocations and won't create poison failed allocations
// that can keep other shards from being allocated (because of limits applied on how many
// shards we can start per node)
List<MutableShardRouting> shardsToMove = Lists.newArrayList();
for (Iterator<MutableShardRouting> unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) {
MutableShardRouting unassignedShardRouting = unassignedIt.next();
if (unassignedShardRouting.shardId().equals(failedShard.shardId())) {
unassignedIt.remove();
shardsToMove.add(unassignedShardRouting);
}
}
if (!shardsToMove.isEmpty()) {
routingNodes.unassigned().addAll(shardsToMove);
}
routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(), null,
null, failedShard.restoreSource(), failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
break;
}
}
}
return dirty;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_allocation_AllocationService.java
|
1,540 |
@Service("blUpdateCartService")
public class UpdateCartServiceImpl implements UpdateCartService {
protected static final Log LOG = LogFactory.getLog(UpdateCartServiceImpl.class);
protected static BroadleafCurrency savedCurrency;
@Resource(name="blOrderService")
protected OrderService orderService;
@Resource(name = "blUpdateCartServiceExtensionManager")
protected UpdateCartServiceExtensionManager extensionManager;
@Override
public boolean currencyHasChanged() {
BroadleafCurrency currency = findActiveCurrency();
if (getSavedCurrency() == null) {
setSavedCurrency(currency);
} else if (getSavedCurrency() != currency){
return true;
}
return false;
}
@Override
public UpdateCartResponse copyCartToCurrentContext(Order currentCart) {
if(currentCart.getOrderItems() == null){
return null;
}
BroadleafCurrency currency = findActiveCurrency();
if(currency == null){
return null;
}
//Reprice order logic
List<AddToCartItem> itemsToReprice = new ArrayList<AddToCartItem>();
List<OrderItem> itemsToRemove = new ArrayList<OrderItem>();
List<OrderItem> itemsToReset = new ArrayList<OrderItem>();
boolean repriceOrder = true;
for(OrderItem orderItem: currentCart.getOrderItems()){
//Lookup price in price list, if null, then add to itemsToRemove
if (orderItem instanceof DiscreteOrderItem){
DiscreteOrderItem doi = (DiscreteOrderItem) orderItem;
if(checkAvailabilityInLocale(doi, currency)){
AddToCartItem itemRequest = new AddToCartItem();
itemRequest.setProductId(doi.getProduct().getId());
itemRequest.setQuantity(doi.getQuantity());
itemsToReprice.add(itemRequest);
itemsToReset.add(orderItem);
} else {
itemsToRemove.add(orderItem);
}
} else if (orderItem instanceof BundleOrderItem) {
BundleOrderItem boi = (BundleOrderItem) orderItem;
for (DiscreteOrderItem doi : boi.getDiscreteOrderItems()) {
if(checkAvailabilityInLocale(doi, currency)){
AddToCartItem itemRequest = new AddToCartItem();
itemRequest.setProductId(doi.getProduct().getId());
itemRequest.setQuantity(doi.getQuantity());
itemsToReprice.add(itemRequest);
itemsToReset.add(orderItem);
} else {
itemsToRemove.add(orderItem);
}
}
}
}
for(OrderItem orderItem: itemsToReset){
try {
currentCart = orderService.removeItem(currentCart.getId(), orderItem.getId(), false);
} catch (RemoveFromCartException e) {
e.printStackTrace();
}
}
for(AddToCartItem itemRequest: itemsToReprice){
try {
currentCart = orderService.addItem(currentCart.getId(), itemRequest, false);
} catch (AddToCartException e) {
e.printStackTrace();
}
}
// Reprice and save the cart
try {
currentCart = orderService.save(currentCart, repriceOrder);
} catch (PricingException e) {
e.printStackTrace();
}
setSavedCurrency(currency);
UpdateCartResponse updateCartResponse = new UpdateCartResponse();
updateCartResponse.setRemovedItems(itemsToRemove);
updateCartResponse.setOrder(currentCart);
return updateCartResponse;
}
@Override
public void validateCart(Order cart) {
// hook to allow override
}
@Override
public void updateAndValidateCart(Order cart) {
if (extensionManager != null) {
ExtensionResultHolder erh = new ExtensionResultHolder();
extensionManager.getProxy().updateAndValidateCart(cart, erh);
Boolean clearCart = (Boolean) erh.getContextMap().get("clearCart");
Boolean repriceCart = (Boolean) erh.getContextMap().get("repriceCart");
Boolean saveCart = (Boolean) erh.getContextMap().get("saveCart");
if (clearCart != null && clearCart.booleanValue()) {
orderService.cancelOrder(cart);
cart = orderService.createNewCartForCustomer(cart.getCustomer());
} else {
try {
if (repriceCart != null && repriceCart.booleanValue()) {
cart.updatePrices();
orderService.save(cart, true);
} else if (saveCart != null && saveCart.booleanValue()) {
orderService.save(cart, false);
}
} catch (PricingException pe) {
LOG.error("Pricing Exception while validating cart. Clearing cart.", pe);
orderService.cancelOrder(cart);
cart = orderService.createNewCartForCustomer(cart.getCustomer());
}
}
}
}
protected BroadleafCurrency findActiveCurrency(){
if(BroadleafRequestContext.hasLocale()){
return BroadleafRequestContext.getBroadleafRequestContext().getBroadleafCurrency();
}
return null;
}
protected boolean checkAvailabilityInLocale(DiscreteOrderItem doi, BroadleafCurrency currency) {
if (doi.getSku() != null && extensionManager != null) {
Sku sku = doi.getSku();
return sku.isAvailable();
}
return false;
}
@Override
public void setSavedCurrency(BroadleafCurrency savedCurrency) {
this.savedCurrency = savedCurrency;
}
@Override
public BroadleafCurrency getSavedCurrency() {
return savedCurrency;
}
}
| 1no label
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_service_UpdateCartServiceImpl.java
|
259 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ExecutionDelayTest extends HazelcastTestSupport {
private static final int NODES = 3;
private final List<HazelcastInstance> hzs = new ArrayList<HazelcastInstance>(NODES);
static final AtomicInteger counter = new AtomicInteger();
@Before
public void init() {
counter.set(0);
for (int i = 0; i < NODES; i++) {
hzs.add(Hazelcast.newHazelcastInstance());
}
}
@After
public void destroy() throws InterruptedException {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testExecutorOneNodeFailsUnexpectedly() throws InterruptedException, ExecutionException {
final int executions = 20;
ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor();
try {
ex.schedule(new Runnable() {
@Override
public void run() {
hzs.get(1).getLifecycleService().terminate();
}
}, 1000, TimeUnit.MILLISECONDS);
Task task = new Task();
runClient(task, executions);
assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertEquals(executions, counter.get());
}
});
} finally {
ex.shutdown();
}
}
@Test
public void testExecutorOneNodeShutdown() throws InterruptedException, ExecutionException {
final int executions = 20;
ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor();
try {
ex.schedule(new Runnable() {
@Override
public void run() {
hzs.get(1).shutdown();
}
}, 1000, TimeUnit.MILLISECONDS);
Task task = new Task();
runClient(task, executions);
assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertEquals(executions, counter.get());
}
});
} finally {
ex.shutdown();
}
}
private void runClient(Task task, int executions) throws InterruptedException, ExecutionException {
final ClientConfig clientConfig = new ClientConfig();
clientConfig.getNetworkConfig().setRedoOperation(true);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
IExecutorService executor = client.getExecutorService("executor");
for (int i = 0; i < executions; i++) {
Future future = executor.submitToKeyOwner(task, i);
future.get();
Thread.sleep(100);
}
}
private static class Task implements Serializable, Callable {
@Override
public Object call() throws Exception {
counter.incrementAndGet();
return null;
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ExecutionDelayTest.java
|
281 |
public class OCommandFunction extends OCommandRequestTextAbstract {
private static final long serialVersionUID = 1L;
public OCommandFunction() {
}
public OCommandFunction(final String iName) {
super(iName);
}
public boolean isIdempotent() {
return false;
}
@Override
public String toString() {
return "function." + OIOUtils.getStringMaxLength(text, 200, "...");
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OCommandFunction.java
|
304 |
public abstract class ClusterAction<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
extends GenericAction<Request, Response> {
protected ClusterAction(String name) {
super(name);
}
public abstract RequestBuilder newRequestBuilder(ClusterAdminClient client);
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_ClusterAction.java
|
99 |
@SuppressWarnings("serial")
static final class SearchValuesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<? super V, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final Fun<? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchValuesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p.val)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
break;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
84 |
protected enum RESULT {
OK, ERROR, EXIT
};
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_OConsoleApplication.java
|
2,596 |
private static class MasterPingRequest extends TransportRequest {
private String nodeId;
private String masterNodeId;
private MasterPingRequest() {
}
private MasterPingRequest(String nodeId, String masterNodeId) {
this.nodeId = nodeId;
this.masterNodeId = masterNodeId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodeId = in.readString();
masterNodeId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(nodeId);
out.writeString(masterNodeId);
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java
|
1,624 |
public class TimedMemberStateFactory {
private final HazelcastInstanceImpl instance;
private final int maxVisibleInstanceCount;
public TimedMemberStateFactory(HazelcastInstanceImpl instance) {
this.instance = instance;
maxVisibleInstanceCount = instance.node.groupProperties.MC_MAX_INSTANCE_COUNT.getInteger();
}
public TimedMemberState createTimedMemberState() {
MemberStateImpl memberState = new MemberStateImpl();
createMemberState(memberState);
GroupConfig groupConfig = instance.getConfig().getGroupConfig();
TimedMemberState timedMemberState = new TimedMemberState();
timedMemberState.setMaster(instance.node.isMaster());
if (timedMemberState.getMaster()) {
timedMemberState.setMemberList(new ArrayList<String>());
Set<Member> memberSet = instance.getCluster().getMembers();
for (Member member : memberSet) {
MemberImpl memberImpl = (MemberImpl) member;
Address address = memberImpl.getAddress();
timedMemberState.getMemberList().add(address.getHost() + ":" + address.getPort());
}
}
timedMemberState.setMemberState(memberState);
timedMemberState.setClusterName(groupConfig.getName());
timedMemberState.setInstanceNames(getLongInstanceNames());
return timedMemberState;
}
private void createMemberState(MemberStateImpl memberState) {
final Node node = instance.node;
memberState.setAddress(node.getThisAddress());
PartitionService partitionService = instance.getPartitionService();
Set<Partition> partitions = partitionService.getPartitions();
memberState.clearPartitions();
for (Partition partition : partitions) {
if (partition.getOwner() != null && partition.getOwner().localMember()) {
memberState.addPartition(partition.getPartitionId());
}
}
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
createRuntimeProps(memberState);
createMemState(memberState, proxyObjects);
}
private void createRuntimeProps(MemberStateImpl memberState) {
Runtime runtime = Runtime.getRuntime();
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
ClassLoadingMXBean clMxBean = ManagementFactory.getClassLoadingMXBean();
MemoryMXBean memoryMxBean = ManagementFactory.getMemoryMXBean();
MemoryUsage heapMemory = memoryMxBean.getHeapMemoryUsage();
MemoryUsage nonHeapMemory = memoryMxBean.getNonHeapMemoryUsage();
Map<String, Long> map = new HashMap<String, Long>();
map.put("runtime.availableProcessors", Integer.valueOf(runtime.availableProcessors()).longValue());
map.put("date.startTime", runtimeMxBean.getStartTime());
map.put("seconds.upTime", runtimeMxBean.getUptime());
map.put("memory.maxMemory", runtime.maxMemory());
map.put("memory.freeMemory", runtime.freeMemory());
map.put("memory.totalMemory", runtime.totalMemory());
map.put("memory.heapMemoryMax", heapMemory.getMax());
map.put("memory.heapMemoryUsed", heapMemory.getUsed());
map.put("memory.nonHeapMemoryMax", nonHeapMemory.getMax());
map.put("memory.nonHeapMemoryUsed", nonHeapMemory.getUsed());
map.put("runtime.totalLoadedClassCount", clMxBean.getTotalLoadedClassCount());
map.put("runtime.loadedClassCount", Integer.valueOf(clMxBean.getLoadedClassCount()).longValue());
map.put("runtime.unloadedClassCount", clMxBean.getUnloadedClassCount());
map.put("runtime.totalStartedThreadCount", threadMxBean.getTotalStartedThreadCount());
map.put("runtime.threadCount", Integer.valueOf(threadMxBean.getThreadCount()).longValue());
map.put("runtime.peakThreadCount", Integer.valueOf(threadMxBean.getPeakThreadCount()).longValue());
map.put("runtime.daemonThreadCount", Integer.valueOf(threadMxBean.getDaemonThreadCount()).longValue());
OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
map.put("osMemory.freePhysicalMemory", get(osMxBean, "getFreePhysicalMemorySize", 0L));
map.put("osMemory.committedVirtualMemory", get(osMxBean, "getCommittedVirtualMemorySize", 0L));
map.put("osMemory.totalPhysicalMemory", get(osMxBean, "getTotalPhysicalMemorySize", 0L));
map.put("osSwap.freeSwapSpace", get(osMxBean, "getFreeSwapSpaceSize", 0L));
map.put("osSwap.totalSwapSpace", get(osMxBean, "getTotalSwapSpaceSize", 0L));
map.put("os.maxFileDescriptorCount", get(osMxBean, "getMaxFileDescriptorCount", 0L));
map.put("os.openFileDescriptorCount", get(osMxBean, "getOpenFileDescriptorCount", 0L));
map.put("os.processCpuLoad", get(osMxBean, "getProcessCpuLoad", -1L));
map.put("os.systemLoadAverage", get(osMxBean, "getSystemLoadAverage", -1L));
map.put("os.systemCpuLoad", get(osMxBean, "getSystemCpuLoad", -1L));
map.put("os.processCpuTime", get(osMxBean, "getProcessCpuTime", 0L));
map.put("os.availableProcessors", get(osMxBean, "getAvailableProcessors", 0L));
memberState.setRuntimeProps(map);
}
private static Long get(OperatingSystemMXBean mbean, String methodName, Long defaultValue) {
try {
Method method = mbean.getClass().getMethod(methodName);
method.setAccessible(true);
Object value = method.invoke(mbean);
if (value == null) {
return defaultValue;
}
if (value instanceof Integer) {
return (long) (Integer) value;
}
if (value instanceof Double) {
double v = (Double) value;
return Math.round(v * 100);
}
if (value instanceof Long) {
return (Long) value;
}
return defaultValue;
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
return defaultValue;
}
}
private void createMemState(MemberStateImpl memberState,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
memberState.putLocalMapStats(map.getName(), (LocalMapStatsImpl) map.getLocalMapStats());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
LocalQueueStatsImpl stats = (LocalQueueStatsImpl) queue.getLocalQueueStats();
memberState.putLocalQueueStats(queue.getName(), stats);
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
LocalTopicStatsImpl stats = (LocalTopicStatsImpl) topic.getLocalTopicStats();
memberState.putLocalTopicStats(topic.getName(), stats);
count++;
}
} else if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
LocalMultiMapStatsImpl stats = (LocalMultiMapStatsImpl) multiMap.getLocalMultiMapStats();
memberState.putLocalMultiMapStats(multiMap.getName(), stats);
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
LocalExecutorStatsImpl stats = (LocalExecutorStatsImpl) executorService.getLocalExecutorStats();
memberState.putLocalExecutorStats(executorService.getName(), stats);
count++;
}
}
}
}
}
private Set<String> getLongInstanceNames() {
Set<String> setLongInstanceNames = new HashSet<String>(maxVisibleInstanceCount);
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
collectInstanceNames(setLongInstanceNames, proxyObjects);
return setLongInstanceNames;
}
private void collectInstanceNames(Set<String> setLongInstanceNames,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("m:" + multiMap.getName());
count++;
}
} else if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("c:" + map.getName());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("q:" + queue.getName());
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("t:" + topic.getName());
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("e:" + executorService.getName());
count++;
}
}
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_management_TimedMemberStateFactory.java
|
45 |
public class OByteArrayComparator implements Comparator<byte[]> {
public static final OByteArrayComparator INSTANCE = new OByteArrayComparator();
public int compare(final byte[] arrayOne, final byte[] arrayTwo) {
final int lenDiff = arrayOne.length - arrayTwo.length;
if (lenDiff != 0)
return lenDiff;
for (int i = 0; i < arrayOne.length; i++) {
final int valOne = arrayOne[i] & 0xFF;
final int valTwo = arrayTwo[i] & 0xFF;
final int diff = valOne - valTwo;
if (diff != 0)
return diff;
}
return 0;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_comparator_OByteArrayComparator.java
|
56 |
@SuppressWarnings("serial")
static final class ForEachTransformedEntryTask<K,V,U>
extends BulkTask<K,V,Void> {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final Action<? super U> action;
ForEachTransformedEntryTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
super(p, b, i, f, t);
this.transformer = transformer; this.action = action;
}
public final void compute() {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final Action<? super U> action;
if ((transformer = this.transformer) != null &&
(action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachTransformedEntryTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
transformer, action).fork();
}
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p)) != null)
action.apply(u);
}
propagateCompletion();
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
553 |
public class WeightUnitOfMeasureType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, WeightUnitOfMeasureType> TYPES = new LinkedHashMap<String, WeightUnitOfMeasureType>();
public static final WeightUnitOfMeasureType POUNDS = new WeightUnitOfMeasureType("POUNDS", "Pounds");
public static final WeightUnitOfMeasureType KILOGRAMS = new WeightUnitOfMeasureType("KILOGRAMS", "Kilograms");
public static WeightUnitOfMeasureType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public WeightUnitOfMeasureType() {
//do nothing
}
public WeightUnitOfMeasureType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
WeightUnitOfMeasureType other = (WeightUnitOfMeasureType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_util_WeightUnitOfMeasureType.java
|
4,676 |
abstract class QueryCollector extends Collector {
final IndexFieldData<?> idFieldData;
final IndexSearcher searcher;
final ConcurrentMap<HashedBytesRef, Query> queries;
final ESLogger logger;
final Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
final HashedBytesRef spare = new HashedBytesRef(new BytesRef());
BytesValues values;
final List<Collector> facetCollectors = new ArrayList<Collector>();
final Collector facetAndAggregatorCollector;
QueryCollector(ESLogger logger, PercolateContext context) {
this.logger = logger;
this.queries = context.percolateQueries();
this.searcher = context.docSearcher();
final FieldMapper<?> idMapper = context.mapperService().smartNameFieldMapper(IdFieldMapper.NAME);
this.idFieldData = context.fieldData().getForField(idMapper);
if (context.facets() != null) {
for (SearchContextFacets.Entry entry : context.facets().entries()) {
if (entry.isGlobal()) {
continue; // not supported for now
}
Collector collector = entry.getFacetExecutor().collector();
if (entry.getFilter() != null) {
if (collector instanceof NestedFacetExecutor.Collector) {
collector = new NestedFacetExecutor.Collector((NestedFacetExecutor.Collector) collector, entry.getFilter());
} else {
collector = new FilteredCollector(collector, entry.getFilter());
}
}
facetCollectors.add(collector);
}
}
List<Collector> collectors = new ArrayList<Collector>(facetCollectors);
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
List<Aggregator> aggregatorCollectors = new ArrayList<Aggregator>();
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
if (aggregator.shouldCollect()) {
aggregatorCollectors.add(aggregator);
}
}
}
context.aggregations().aggregators(aggregators);
if (!aggregatorCollectors.isEmpty()) {
collectors.add(new AggregationPhase.AggregationsCollector(aggregatorCollectors, aggregationContext));
}
}
int size = collectors.size();
if (size == 0) {
facetAndAggregatorCollector = null;
} else if (size == 1) {
facetAndAggregatorCollector = collectors.get(0);
} else {
facetAndAggregatorCollector = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.setScorer(scorer);
}
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
// we use the UID because id might not be indexed
values = idFieldData.load(context).getBytesValues(true);
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.setNextReader(context);
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
static Match match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
return new Match(logger, context, highlightPhase);
}
static Count count(ESLogger logger, PercolateContext context) {
return new Count(logger, context);
}
static MatchAndScore matchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
return new MatchAndScore(logger, context, highlightPhase);
}
static MatchAndSort matchAndSort(ESLogger logger, PercolateContext context) {
return new MatchAndSort(logger, context);
}
protected final Query getQuery(int doc) {
final int numValues = values.setDocument(doc);
if (numValues == 0) {
return null;
}
assert numValues == 1;
spare.reset(values.nextValue(), values.currentValueHash());
return queries.get(spare);
}
final static class Match extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<BytesRef>();
final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
final boolean limit;
final int size;
long counter = 0;
Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
super(logger, context);
this.limit = context.limit;
this.size = context.size;
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
context.hitContext().cache().clear();
}
searcher.search(query, collector);
if (collector.exists()) {
if (!limit || counter < size) {
matches.add(values.copyShared());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class MatchAndSort extends QueryCollector {
private final TopScoreDocCollector topDocsCollector;
MatchAndSort(ESLogger logger, PercolateContext context) {
super(logger, context);
// TODO: Use TopFieldCollector.create(...) for ascending and decending scoring?
topDocsCollector = TopScoreDocCollector.create(context.size, false);
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
searcher.search(query, collector);
if (collector.exists()) {
topDocsCollector.collect(doc);
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
super.setNextReader(context);
topDocsCollector.setNextReader(context);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
topDocsCollector.setScorer(scorer);
}
TopDocs topDocs() {
return topDocsCollector.topDocs();
}
}
final static class MatchAndScore extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<BytesRef>();
final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
// TODO: Use thread local in order to cache the scores lists?
final FloatArrayList scores = new FloatArrayList();
final boolean limit;
final int size;
long counter = 0;
private Scorer scorer;
MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
super(logger, context);
this.limit = context.limit;
this.size = context.size;
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
context.hitContext().cache().clear();
}
searcher.search(query, collector);
if (collector.exists()) {
if (!limit || counter < size) {
matches.add(values.copyShared());
scores.add(scorer.score());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
FloatArrayList scores() {
return scores;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
final static class Count extends QueryCollector {
private long counter = 0;
Count(ESLogger logger, PercolateContext context) {
super(logger, context);
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
searcher.search(query, collector);
if (collector.exists()) {
counter++;
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_percolator_QueryCollector.java
|
3,259 |
public class MapPermission extends InstancePermission {
private static final int PUT = 0x4;
private static final int REMOVE = 0x8;
private static final int READ = 0x16;
private static final int LISTEN = 0x32;
private static final int LOCK = 0x64;
private static final int INDEX = 0x128;
private static final int INTERCEPT = 0x256;
private static final int ALL = CREATE | DESTROY | PUT | REMOVE | READ | LISTEN | LOCK | INDEX | INTERCEPT;
public MapPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
} else if (ActionConstants.ACTION_PUT.equals(action)) {
mask |= PUT;
} else if (ActionConstants.ACTION_REMOVE.equals(action)) {
mask |= REMOVE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_LISTEN.equals(action)) {
mask |= LISTEN;
} else if (ActionConstants.ACTION_LOCK.equals(action)) {
mask |= LOCK;
} else if (ActionConstants.ACTION_INDEX.equals(action)) {
mask |= INDEX;
} else if (ActionConstants.ACTION_INTERCEPT.equals(action)) {
mask |= INTERCEPT;
}
}
return mask;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_security_permission_MapPermission.java
|
1,483 |
public class JavaSearch {
public static SearchPattern createSearchPattern(
Declaration declaration, int limitTo) {
String pattern;
try {
pattern = getJavaNameOfDeclaration(declaration);
}
catch (IllegalArgumentException iae) {
return null;
}
if (declaration instanceof Method) {
return createPattern(pattern, METHOD, limitTo, R_EXACT_MATCH);
}
else if (declaration instanceof Value) {
int loc = pattern.lastIndexOf('.') + 1;
String setter = pattern.substring(0,loc) +
"set" + pattern.substring(loc+3);
SearchPattern getterPattern =
createPattern(pattern, METHOD, limitTo, R_EXACT_MATCH);
SearchPattern setterPattern =
createPattern(setter, METHOD, limitTo, R_EXACT_MATCH);
switch (limitTo) {
case IJavaSearchConstants.WRITE_ACCESSES:
return setterPattern;
case IJavaSearchConstants.READ_ACCESSES:
return getterPattern;
default:
return createOrPattern(getterPattern, setterPattern);
}
}
else {
SearchPattern searchPattern =
createPattern(pattern, CLASS_AND_INTERFACE,
limitTo, R_EXACT_MATCH);
//weirdly, ALL_OCCURRENCES doesn't return all occurrences
/*if (limitTo==IJavaSearchConstants.ALL_OCCURRENCES) {
searchPattern = createOrPattern(createPattern(pattern, CLASS_AND_INTERFACE,
IJavaSearchConstants.IMPLEMENTORS, R_EXACT_MATCH),
searchPattern);
}*/
return searchPattern;
}
}
public static IProject[] getProjectAndReferencingProjects(IProject project) {
IProject[] referencingProjects = project.getReferencingProjects();
IProject[] projects = new IProject[referencingProjects.length+1];
projects[0] = project;
System.arraycopy(referencingProjects, 0, projects, 1, referencingProjects.length);
return projects;
}
public static IProject[] getProjectAndReferencedProjects(IProject project) {
IProject[] referencedProjects;
try {
referencedProjects = project.getReferencedProjects();
IProject[] projects = new IProject[referencedProjects.length+1];
projects[0] = project;
System.arraycopy(referencedProjects, 0, projects, 1, referencedProjects.length);
return projects;
}
catch (Exception e) {
e.printStackTrace();
return new IProject[] { project };
}
}
public static void runSearch(IProgressMonitor pm, SearchEngine searchEngine,
SearchPattern searchPattern, IProject[] projects,
SearchRequestor requestor)
throws OperationCanceledException {
try {
searchEngine.search(searchPattern,
SearchUtils.getDefaultSearchParticipants(),
SearchEngine.createJavaSearchScope(projects),
requestor, pm);
}
catch (OperationCanceledException oce) {
throw oce;
}
catch (Exception e) {
e.printStackTrace();
}
}
public static String getQualifiedName(IMember dec) {
IPackageFragment packageFragment = (IPackageFragment)
dec.getAncestor(IJavaElement.PACKAGE_FRAGMENT);
IType type = (IType) dec.getAncestor(IJavaElement.TYPE);
String qualifier = packageFragment.getElementName();
String name = dec.getElementName();
if (dec instanceof IMethod && name.equals("get_")) {
return getQualifiedName(type);
}
else if (dec instanceof IType && name.endsWith("_")) {
return qualifier + '.' +
name.substring(0, name.length()-1);
}
if (dec instanceof IMethod) {
if (name.startsWith("$")) {
name = name.substring(1);
}
else if (name.startsWith("get") ||
name.startsWith("set")) {
name = Character.toLowerCase(name.charAt(3)) +
name.substring(4);
}
}
if (dec!=type) {
String typeName = type.getElementName();
if (typeName.endsWith(name + "_")) {
return qualifier + '.' + name;
}
else {
return qualifier + '.' +
type.getElementName() + '.' + name;
}
}
else {
return qualifier + '.' + name;
}
}
public static boolean isDeclarationOfLinkedElement(Declaration d,
IJavaElement javaElement) {
return d.getQualifiedNameString().replace("::", ".")
.equals(getQualifiedName((IMember) javaElement));
}
public static IProject[] getProjectsToSearch(IProject project) {
if (project.getName().equals("Ceylon Source Archives")) {
return CeylonBuilder.getProjects().toArray(new IProject[0]);
}
else {
return getProjectAndReferencingProjects(project);
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_util_JavaSearch.java
|
5,824 |
public class PlainHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-plain";
@Override
public String[] names() {
return new String[] { "plain", "highlighter" };
}
public HighlightField highlight(HighlighterContext highlighterContext) {
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
FieldMapper<?> mapper = highlighterContext.mapper;
Encoder encoder = field.encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter> mappers = Maps.newHashMap();
hitContext.cache().put(CACHE_KEY, mappers);
}
Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter> cache = (Map<FieldMapper<?>, org.apache.lucene.search.highlight.Highlighter>) hitContext.cache().get(CACHE_KEY);
org.apache.lucene.search.highlight.Highlighter entry = cache.get(mapper);
if (entry == null) {
Query query = highlighterContext.query.originalQuery();
QueryScorer queryScorer = new CustomQueryScorer(query, field.requireFieldMatch() ? mapper.names().indexName() : null);
queryScorer.setExpandMultiTermQuery(true);
Fragmenter fragmenter;
if (field.numberOfFragments() == 0) {
fragmenter = new NullFragmenter();
} else if (field.fragmenter() == null) {
fragmenter = new SimpleSpanFragmenter(queryScorer, field.fragmentCharSize());
} else if ("simple".equals(field.fragmenter())) {
fragmenter = new SimpleFragmenter(field.fragmentCharSize());
} else if ("span".equals(field.fragmenter())) {
fragmenter = new SimpleSpanFragmenter(queryScorer, field.fragmentCharSize());
} else {
throw new ElasticsearchIllegalArgumentException("unknown fragmenter option [" + field.fragmenter() + "] for the field [" + highlighterContext.fieldName + "]");
}
Formatter formatter = new SimpleHTMLFormatter(field.preTags()[0], field.postTags()[0]);
entry = new org.apache.lucene.search.highlight.Highlighter(formatter, encoder, queryScorer);
entry.setTextFragmenter(fragmenter);
// always highlight across all data
entry.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
cache.put(mapper, entry);
}
// a HACK to make highlighter do highlighting, even though its using the single frag list builder
int numberOfFragments = field.numberOfFragments() == 0 ? 1 : field.numberOfFragments();
ArrayList<TextFragment> fragsList = new ArrayList<TextFragment>();
List<Object> textsToHighlight;
try {
textsToHighlight = HighlightUtils.loadFieldValues(mapper, context, hitContext, field.forceSource());
for (Object textToHighlight : textsToHighlight) {
String text = textToHighlight.toString();
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
TokenStream tokenStream = analyzer.tokenStream(mapper.names().indexName(), text);
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
// can't perform highlighting if the stream has no terms (binary token stream) or no offsets
continue;
}
TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
for (TextFragment bestTextFragment : bestTextFragments) {
if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
fragsList.add(bestTextFragment);
}
}
}
} catch (Exception e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
if (field.scoreOrdered()) {
CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {
public int compare(TextFragment o1, TextFragment o2) {
return Math.round(o2.getScore() - o1.getScore());
}
});
}
String[] fragments;
// number_of_fragments is set to 0 but we have a multivalued field
if (field.numberOfFragments() == 0 && textsToHighlight.size() > 1 && fragsList.size() > 0) {
fragments = new String[fragsList.size()];
for (int i = 0; i < fragsList.size(); i++) {
fragments[i] = fragsList.get(i).toString();
}
} else {
// refine numberOfFragments if needed
numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
fragments = new String[numberOfFragments];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = fragsList.get(i).toString();
}
}
if (fragments != null && fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.noMatchSize();
if (noMatchSize > 0 && textsToHighlight.size() > 0) {
// Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
String fieldContents = textsToHighlight.get(0).toString();
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
int end;
try {
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer.tokenStream(mapper.names().indexName(), fieldContents));
} catch (Exception e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
if (end > 0) {
return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) });
}
}
return null;
}
private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, TokenStream tokenStream) throws IOException {
try {
if (!tokenStream.hasAttribute(OffsetAttribute.class)) {
// Can't split on term boundaries without offsets
return -1;
}
int end = -1;
tokenStream.reset();
while (tokenStream.incrementToken()) {
OffsetAttribute attr = tokenStream.getAttribute(OffsetAttribute.class);
if (attr.endOffset() >= noMatchSize) {
// Jump to the end of this token if it wouldn't put us past the boundary
if (attr.endOffset() == noMatchSize) {
end = noMatchSize;
}
return end;
}
end = attr.endOffset();
}
// We've exhausted the token stream so we should just highlight everything.
return end;
} finally {
tokenStream.end();
tokenStream.close();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_PlainHighlighter.java
|
1,455 |
public class OCommandExecutorSQLCreateEdge extends OCommandExecutorSQLSetAware {
public static final String NAME = "CREATE EDGE";
private String from;
private String to;
private OClass clazz;
private String clusterName;
private LinkedHashMap<String, Object> fields;
@SuppressWarnings("unchecked")
public OCommandExecutorSQLCreateEdge parse(final OCommandRequest iRequest) {
final ODatabaseRecord database = getDatabase();
init((OCommandRequestText) iRequest);
parserRequiredKeyword("CREATE");
parserRequiredKeyword("EDGE");
String className = null;
String temp = parseOptionalWord(true);
while (temp != null) {
if (temp.equals("CLUSTER")) {
clusterName = parserRequiredWord(false);
} else if (temp.equals(KEYWORD_FROM)) {
from = parserRequiredWord(false, "Syntax error", " =><,\r\n");
} else if (temp.equals("TO")) {
to = parserRequiredWord(false, "Syntax error", " =><,\r\n");
} else if (temp.equals(KEYWORD_SET)) {
fields = new LinkedHashMap<String, Object>();
parseSetFields(fields);
} else if (temp.equals(KEYWORD_CONTENT)) {
parseContent();
} else if (className == null && temp.length() > 0)
className = temp;
temp = parseOptionalWord(true);
if (parserIsEnded())
break;
}
if (className == null)
// ASSIGN DEFAULT CLASS
className = "E";
// GET/CHECK CLASS NAME
clazz = database.getMetadata().getSchema().getClass(className);
if (clazz == null)
throw new OCommandSQLParsingException("Class " + className + " was not found");
return this;
}
/**
* Execute the command and return the ODocument object created.
*/
public Object execute(final Map<Object, Object> iArgs) {
if (clazz == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final OrientBaseGraph graph = OGraphCommandExecutorSQLFactory.getGraph();
final Set<ORID> fromIds = OSQLEngine.getInstance().parseRIDTarget(graph.getRawGraph(), from);
final Set<ORID> toIds = OSQLEngine.getInstance().parseRIDTarget(graph.getRawGraph(), to);
// CREATE EDGES
final List<Object> edges = new ArrayList<Object>();
for (ORID from : fromIds) {
final OrientVertex fromVertex = graph.getVertex(from);
if (fromVertex == null)
throw new OCommandExecutionException("Source vertex '" + from + "' not exists");
for (ORID to : toIds) {
final OrientVertex toVertex;
if (from.equals(to)) {
toVertex = fromVertex;
} else {
toVertex = graph.getVertex(to);
}
final String clsName = clazz.getName();
if (fields != null)
// EVALUATE FIELDS
for (Entry<String, Object> f : fields.entrySet()) {
if (f.getValue() instanceof OSQLFunctionRuntime)
fields.put(f.getKey(), ((OSQLFunctionRuntime) f.getValue()).getValue(to, context));
}
final OrientEdge edge = fromVertex.addEdge(null, toVertex, clsName, clusterName, fields);
if (fields != null && !fields.isEmpty()) {
if (!edge.getRecord().getIdentity().isValid())
edge.convertToDocument();
OSQLHelper.bindParameters(edge.getRecord(), fields, new OCommandParameters(iArgs), context);
}
if (content != null) {
if (!edge.getRecord().getIdentity().isValid())
// LIGHTWEIGHT EDGE, TRANSFORM IT BEFORE
edge.convertToDocument();
edge.getRecord().merge(content, true, false);
}
edge.save(clusterName);
edges.add(edge);
}
}
return edges;
}
@Override
public String getSyntax() {
return "CREATE EDGE [<class>] [CLUSTER <cluster>] FROM <rid>|(<query>|[<rid>]*) TO <rid>|(<query>|[<rid>]*) [SET <field> = <expression>[,]*]|CONTENT {<JSON>}";
}
}
| 1no label
|
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_OCommandExecutorSQLCreateEdge.java
|
428 |
public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterStateRequest, ClusterStateResponse, ClusterStateRequestBuilder> {
public ClusterStateRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterStateRequest());
}
/**
* Include all data
*/
public ClusterStateRequestBuilder all() {
request.all();
return this;
}
/**
* Do not include any data
*/
public ClusterStateRequestBuilder clear() {
request.clear();
return this;
}
public ClusterStateRequestBuilder setBlocks(boolean filter) {
request.blocks(filter);
return this;
}
/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.metadata.MetaData}. Defaults
* to <tt>true</tt>.
*/
public ClusterStateRequestBuilder setMetaData(boolean filter) {
request.metaData(filter);
return this;
}
/**
* Should the cluster state result include the {@link org.elasticsearch.cluster.node.DiscoveryNodes}. Defaults
* to <tt>true</tt>.
*/
public ClusterStateRequestBuilder setNodes(boolean filter) {
request.nodes(filter);
return this;
}
/**
* Should the cluster state result include teh {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults
* to <tt>true</tt>.
*/
public ClusterStateRequestBuilder setRoutingTable(boolean filter) {
request.routingTable(filter);
return this;
}
/**
* When {@link #setMetaData(boolean)} is set, which indices to return the {@link org.elasticsearch.cluster.metadata.IndexMetaData}
* for. Defaults to all indices.
*/
public ClusterStateRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
public ClusterStateRequestBuilder setIndexTemplates(String... templates) {
request.indexTemplates(templates);
return this;
}
@Override
protected void doExecute(ActionListener<ClusterStateResponse> listener) {
((ClusterAdminClient) client).state(request, listener);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_state_ClusterStateRequestBuilder.java
|
586 |
public class RefreshResponse extends BroadcastOperationResponse {
RefreshResponse() {
}
RefreshResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_refresh_RefreshResponse.java
|
581 |
executionService.scheduleWithFixedDelay(executorName, new Runnable() {
public void run() {
sendMasterConfirmation();
}
}, masterConfirmationInterval, masterConfirmationInterval, TimeUnit.SECONDS);
| 1no label
|
hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java
|
132 |
public abstract class RecursiveAction extends ForkJoinTask<Void> {
private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
*/
protected abstract void compute();
/**
* Always returns {@code null}.
*
* @return {@code null} always
*/
public final Void getRawResult() { return null; }
/**
* Requires null completion value.
*/
protected final void setRawResult(Void mustBeNull) { }
/**
* Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
return true;
}
}
| 0true
|
src_main_java_jsr166e_RecursiveAction.java
|
3,549 |
public class BinaryFieldMapper extends AbstractFieldMapper<BytesReference> {
public static final String CONTENT_TYPE = "binary";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final long COMPRESS_THRESHOLD = -1;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setIndexed(false);
FIELD_TYPE.freeze();
}
}
public static class Builder extends AbstractFieldMapper.Builder<Builder, BinaryFieldMapper> {
private Boolean compress = null;
private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder compress(boolean compress) {
this.compress = compress;
return this;
}
public Builder compressThreshold(long compressThreshold) {
this.compressThreshold = compressThreshold;
return this;
}
@Override
public BinaryFieldMapper build(BuilderContext context) {
return new BinaryFieldMapper(buildNames(context), fieldType, compress, compressThreshold, postingsProvider,
docValuesProvider, multiFieldsBuilder.build(this, context), copyTo);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
BinaryFieldMapper.Builder builder = binaryField(name);
parseField(builder, name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("compress") && fieldNode != null) {
builder.compress(nodeBooleanValue(fieldNode));
} else if (fieldName.equals("compress_threshold") && fieldNode != null) {
if (fieldNode instanceof Number) {
builder.compressThreshold(((Number) fieldNode).longValue());
builder.compress(true);
} else {
builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes());
builder.compress(true);
}
}
}
return builder;
}
}
private Boolean compress;
private long compressThreshold;
protected BinaryFieldMapper(Names names, FieldType fieldType, Boolean compress, long compressThreshold,
PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
MultiFields multiFields, CopyTo copyTo) {
super(names, 1.0f, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null, null, multiFields, copyTo);
this.compress = compress;
this.compressThreshold = compressThreshold;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return null;
}
@Override
public Object valueForSearch(Object value) {
return value(value);
}
@Override
public BytesReference value(Object value) {
if (value == null) {
return null;
}
BytesReference bytes;
if (value instanceof BytesRef) {
bytes = new BytesArray((BytesRef) value);
} else if (value instanceof BytesReference) {
bytes = (BytesReference) value;
} else if (value instanceof byte[]) {
bytes = new BytesArray((byte[]) value);
} else {
try {
bytes = new BytesArray(Base64.decode(value.toString()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to convert bytes", e);
}
}
try {
return CompressorFactory.uncompressIfNeeded(bytes);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to decompress source", e);
}
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
if (!fieldType().stored()) {
return;
}
byte[] value;
if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
return;
} else {
value = context.parser().binaryValue();
if (compress != null && compress && !CompressorFactory.isCompressed(value, 0, value.length)) {
if (compressThreshold == -1 || value.length > compressThreshold) {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream);
stream.writeBytes(value, 0, value.length);
stream.close();
value = bStream.bytes().toBytes();
}
}
}
if (value == null) {
return;
}
fields.add(new Field(names.indexName(), value, fieldType));
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
builder.field("type", contentType());
if (includeDefaults || !names.name().equals(names.indexNameClean())) {
builder.field("index_name", names.indexNameClean());
}
if (compress != null) {
builder.field("compress", compress);
} else if (includeDefaults) {
builder.field("compress", false);
}
if (compressThreshold != -1) {
builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
} else if (includeDefaults) {
builder.field("compress_threshold", -1);
}
if (includeDefaults || fieldType.stored() != defaultFieldType().stored()) {
builder.field("store", fieldType.stored());
}
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
BinaryFieldMapper sourceMergeWith = (BinaryFieldMapper) mergeWith;
if (!mergeContext.mergeFlags().simulate()) {
if (sourceMergeWith.compress != null) {
this.compress = sourceMergeWith.compress;
}
if (sourceMergeWith.compressThreshold != -1) {
this.compressThreshold = sourceMergeWith.compressThreshold;
}
}
}
@Override
public boolean hasDocValues() {
return false;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_core_BinaryFieldMapper.java
|
2,627 |
public final class BinaryClassDefinitionProxy extends BinaryClassDefinition implements ClassDefinition {
public BinaryClassDefinitionProxy(int factoryId, int classId, int version, byte[] binary) {
this.classId = classId;
this.version = version;
this.factoryId = factoryId;
setBinary(binary);
}
public ClassDefinition toReal(SerializationContext context) throws IOException {
final ClassDefinition cd = context.lookup(factoryId, classId, version);
return cd != null ? cd : context.createClassDefinition(factoryId, getBinary());
}
public FieldDefinition get(String name) {
throw new UnsupportedOperationException();
}
public FieldDefinition get(int fieldIndex) {
throw new UnsupportedOperationException();
}
public boolean hasField(String fieldName) {
throw new UnsupportedOperationException();
}
public Set<String> getFieldNames() {
throw new UnsupportedOperationException();
}
public FieldType getFieldType(String fieldName) {
throw new UnsupportedOperationException();
}
public int getFieldClassId(String fieldName) {
throw new UnsupportedOperationException();
}
public int getFieldCount() {
throw new UnsupportedOperationException();
}
public void writeData(ObjectDataOutput out) throws IOException {
throw new UnsupportedOperationException();
}
public void readData(ObjectDataInput in) throws IOException {
throw new UnsupportedOperationException();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_serialization_BinaryClassDefinitionProxy.java
|
69 |
public interface TitanRelation extends TitanElement {
/**
* Establishes a unidirectional edge between this relation and the given vertex for the specified label.
* The label must be defined {@link EdgeLabel#isUnidirected()}.
*
* @param label
* @param vertex
*/
public void setProperty(EdgeLabel label, TitanVertex vertex);
/**
* Returns the vertex associated to this relation by a unidirected edge of the given label or NULL if such does not exist.
*
* @param label
* @return
*/
public TitanVertex getProperty(EdgeLabel label);
/**
* Returns the type of this relation.
* <p/>
* The type is either a label ({@link EdgeLabel} if this relation is an edge or a key ({@link PropertyKey}) if this
* relation is a property.
*
* @return Type of this relation
*/
public RelationType getType();
/**
* Returns the direction of this relation from the perspective of the specified vertex.
*
* @param vertex vertex on which the relation is incident
* @return The direction of this relation from the perspective of the specified vertex.
* @throws InvalidElementException if this relation is not incident on the vertex
*/
public Direction getDirection(TitanVertex vertex);
/**
* Checks whether this relation is incident on the specified vertex.
*
* @param vertex vertex to check incidence for
* @return true, if this relation is incident on the vertex, else false
*/
public boolean isIncidentOn(TitanVertex vertex);
/**
* Checks whether this relation is a loop.
* An relation is a loop if it connects a vertex with itself.
*
* @return true, if this relation is a loop, else false.
*/
boolean isLoop();
/**
* Checks whether this relation is a property.
*
* @return true, if this relation is a property, else false.
* @see TitanProperty
*/
boolean isProperty();
/**
* Checks whether this relation is an edge.
*
* @return true, if this relation is an edge, else false.
* @see TitanEdge
*/
boolean isEdge();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanRelation.java
|
90 |
public interface StaticAssetStorageService {
StaticAssetStorage findStaticAssetStorageById(Long id);
/**
* @deprecated Use createStaticAssetStorageFromFile instead.
* @return
*/
StaticAssetStorage create();
StaticAssetStorage readStaticAssetStorageByStaticAssetId(Long id);
StaticAssetStorage save(StaticAssetStorage assetStorage);
void delete(StaticAssetStorage assetStorage);
/**
* @deprecated Use createStaticAssetStorageFromFile instead.
*
* @param uploadedFile
* @return
* @throws IOException
*/
Blob createBlob(MultipartFile uploadedFile) throws IOException;
/**
* Stores the file on the filesystem by performing an MD5 hash of the
* the staticAsset.fullUrl.
*
* To ensure that files can be stored and accessed in an efficient manner, the
* system creates directories based on the characters in the hash.
*
* For example, if the URL is /product/myproductimage.jpg, then the MD5 would be
* 35ec52a8dbd8cf3e2c650495001fe55f resulting in the following file on the filesystem
* {assetFileSystemPath}/35/ec/myproductimage.jpg.
*
* If there is a "siteId" in the BroadleafRequestContext then the site is also distributed
* using a similar algorithm but the system attempts to keep images for sites in their own
* directory resulting in an extra two folders required to reach any given product. So, for
* site with id 125, the system will MD5 "/site-125" in order to build the URL string. "/site-125" has an md5
* string of "7fde295edac6ca7f85d0368ea741b241".
*
* So, in this case with the above product URL in site125, the full URL on the filesystem
* will be:
*
* {assetFileSystemPath}/7f/site-125/35/ec/myproductimage.jpg.
*
* This algorithm has the following benefits:
* - Efficient file-system storage with
* - Balanced tree of files that supports 10 million files
*
* If support for more files is needed, implementors should consider one of the following approaches:
* 1. Overriding the maxGeneratedFileSystemDirectories property from its default of 2 to 3
* 2. Overriding this method to introduce an alternate approach
*
* @param fullUrl The URL used to represent an asset for which a name on the fileSystem is desired.
* @param useSharedPath If false, the system will generate a path using {@link Site} information if available.
*
* @return
*/
String generateStorageFileName(String fullUrl, boolean useSharedPath);
/**
* By default, delegates a call to {@link #generateStorageFileName(String)} using <code>staticAsset.getFullUrl()</code>
* as the passed in argument.
*
* @param staticAsset StaticAsset for which a filename is desired.
* @param useSharedPath If false, the system will generate a path using {@link Site} information if available.
* @return
*/
String generateStorageFileName(StaticAsset staticAsset, boolean useSharedPath);
Map<String, String> getCacheFileModel(String fullUrl, SandBox sandBox, Map<String, String> parameterMap) throws Exception;
/**
* Persists the file being based in according to the staticAsset's StorageType.
*
* @param file
* @param id
* @throws IOException
*/
void createStaticAssetStorageFromFile(MultipartFile file, StaticAsset staticAsset) throws IOException;
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_service_StaticAssetStorageService.java
|
1,564 |
public class VerticesMap {
public static final String PROCESS_EDGES = Tokens.makeNamespace(VerticesMap.class) + ".processEdges";
public enum Counters {
VERTICES_PROCESSED,
EDGES_PROCESSED
}
public static Configuration createConfiguration(final boolean processEdges) {
final Configuration configuration = new EmptyConfiguration();
configuration.setBoolean(PROCESS_EDGES, processEdges);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean processEdges;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.processEdges = context.getConfiguration().getBoolean(PROCESS_EDGES, true);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
value.startPath();
long edgesProcessed = 0;
if (this.processEdges) {
for (final Edge edge : value.getEdges(Direction.BOTH)) {
((StandardFaunusEdge) edge).clearPaths();
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_PROCESSED, edgesProcessed);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_VerticesMap.java
|
374 |
public static class TestCombiner
extends Combiner<String, Integer, Integer> {
private transient int sum;
@Override
public void combine(String key, Integer value) {
sum += value;
}
@Override
public Integer finalizeChunk() {
int v = sum;
sum = 0;
return v;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_DistributedMapperClientMapReduceTest.java
|
2,599 |
private class MasterPinger implements Runnable {
private volatile boolean running = true;
public void stop() {
this.running = false;
}
@Override
public void run() {
if (!running) {
// return and don't spawn...
return;
}
final DiscoveryNode masterToPing = masterNode;
if (masterToPing == null) {
// master is null, should not happen, but we are still running, so reschedule
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
return;
}
transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout),
new BaseTransportResponseHandler<MasterPingResponseResponse>() {
@Override
public MasterPingResponseResponse newInstance() {
return new MasterPingResponseResponse();
}
@Override
public void handleResponse(MasterPingResponseResponse response) {
if (!running) {
return;
}
// reset the counter, we got a good result
MasterFaultDetection.this.retryCount = 0;
// check if the master node did not get switched on us..., if it did, we simply return with no reschedule
if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
if (!response.connectedToMaster) {
logger.trace("[master] [{}] does not have us registered with it...", masterToPing);
notifyDisconnectedFromMaster();
}
// we don't stop on disconnection from master, we keep pinging it
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
}
}
@Override
public void handleException(TransportException exp) {
if (!running) {
return;
}
if (exp instanceof ConnectTransportException) {
// ignore this one, we already handle it by registering a connection listener
return;
}
synchronized (masterNodeMutex) {
// check if the master node did not get switched on us...
if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
if (exp.getCause() instanceof NoLongerMasterException) {
logger.debug("[master] pinging a master {} that is no longer a master", masterNode);
notifyMasterFailure(masterToPing, "no longer master");
return;
} else if (exp.getCause() instanceof NotMasterException) {
logger.debug("[master] pinging a master {} that is not the master", masterNode);
notifyMasterFailure(masterToPing, "not master");
return;
} else if (exp.getCause() instanceof NodeDoesNotExistOnMasterException) {
logger.debug("[master] pinging a master {} but we do not exists on it, act as if its master failure", masterNode);
notifyMasterFailure(masterToPing, "do not exists on master, act as master failure");
return;
}
int retryCount = ++MasterFaultDetection.this.retryCount;
logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount, pingRetryCount);
if (retryCount >= pingRetryCount) {
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout);
// not good, failure
notifyMasterFailure(masterToPing, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
} else {
// resend the request, not reschedule, rely on send timeout
transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout), this);
}
}
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java
|
161 |
private static final class SingleTargetCallback implements Callback<Object> {
final Address target;
final MultiTargetCallback parent;
private SingleTargetCallback(Address target, MultiTargetCallback parent) {
this.target = target;
this.parent = parent;
}
@Override
public void notify(Object object) {
parent.notify(target, object);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_MultiTargetClientRequest.java
|
916 |
public class LockProxy extends AbstractDistributedObject<LockServiceImpl> implements ILock {
private final String name;
private final LockProxySupport lockSupport;
private final Data key;
private final int partitionId;
public LockProxy(NodeEngine nodeEngine, LockServiceImpl lockService, String name) {
super(nodeEngine, lockService);
this.name = name;
this.key = getNameAsPartitionAwareData();
this.lockSupport = new LockProxySupport(new InternalLockNamespace(name));
this.partitionId = getNodeEngine().getPartitionService().getPartitionId(key);
}
@Override
public boolean isLocked() {
return lockSupport.isLocked(getNodeEngine(), key);
}
@Override
public boolean isLockedByCurrentThread() {
return lockSupport.isLockedByCurrentThread(getNodeEngine(), key);
}
@Override
public int getLockCount() {
return lockSupport.getLockCount(getNodeEngine(), key);
}
@Override
public long getRemainingLeaseTime() {
return lockSupport.getRemainingLeaseTime(getNodeEngine(), key);
}
@Override
public void lock() {
lockSupport.lock(getNodeEngine(), key);
}
@Override
public void lock(long leaseTime, TimeUnit timeUnit) {
shouldBePositive(leaseTime, "leaseTime");
lockSupport.lock(getNodeEngine(), key, timeUnit.toMillis(leaseTime));
}
@Override
public void lockInterruptibly() throws InterruptedException {
lock();
}
@Override
public boolean tryLock() {
return lockSupport.tryLock(getNodeEngine(), key);
}
@Override
public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
if (unit == null) {
throw new NullPointerException("unit can't be null");
}
return lockSupport.tryLock(getNodeEngine(), key, time, unit);
}
@Override
public void unlock() {
lockSupport.unlock(getNodeEngine(), key);
}
@Override
public void forceUnlock() {
lockSupport.forceUnlock(getNodeEngine(), key);
}
@Override
public Condition newCondition() {
throw new UnsupportedOperationException("Use ICondition.newCondition(String name) instead!");
}
@Override
public ICondition newCondition(String name) {
if (name == null) {
throw new NullPointerException("Condition name can't be null");
}
return new ConditionImpl(this, name);
}
@Override
public String getName() {
return name;
}
@Override
public String getServiceName() {
return LockService.SERVICE_NAME;
}
@Deprecated
public Object getKey() {
return getName();
}
public Data getKeyData() {
return key;
}
public int getPartitionId() {
return partitionId;
}
ObjectNamespace getNamespace() {
return lockSupport.getNamespace();
}
// will be removed when HazelcastInstance.getLock(Object key) is removed from API
public static String convertToStringKey(Object key, SerializationService serializationService) {
if (key instanceof String) {
return String.valueOf(key);
} else {
Data data = serializationService.toData(key, PARTITIONING_STRATEGY);
// name = Integer.toString(data.hashCode());
byte[] buffer = data.getBuffer();
return Arrays.toString(buffer);
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ILock{");
sb.append("name='").append(name).append('\'');
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockProxy.java
|
14 |
static final class AsyncRun extends Async {
final Runnable fn;
final CompletableFuture<Void> dst;
AsyncRun(Runnable fn, CompletableFuture<Void> dst) {
this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<Void> d; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fn.run();
ex = null;
} catch (Throwable rex) {
ex = rex;
}
d.internalComplete(null, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
323 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientMapTest {
static HazelcastInstance client;
static HazelcastInstance server;
static TestMapStore flushMapStore = new TestMapStore();
static TestMapStore transientMapStore = new TestMapStore();
@BeforeClass
public static void init() {
Config config = new Config();
config.getMapConfig("flushMap").
setMapStoreConfig(new MapStoreConfig()
.setWriteDelaySeconds(1000)
.setImplementation(flushMapStore));
config.getMapConfig("putTransientMap").
setMapStoreConfig(new MapStoreConfig()
.setWriteDelaySeconds(1000)
.setImplementation(transientMapStore));
server = Hazelcast.newHazelcastInstance(config);
client = HazelcastClient.newHazelcastClient(null);
}
public IMap createMap() {
return client.getMap(randomString());
}
@AfterClass
public static void destroy() {
client.shutdown();
Hazelcast.shutdownAll();
}
@Test
public void testIssue537() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(2);
final CountDownLatch nullLatch = new CountDownLatch(2);
final IMap map = createMap();
final EntryListener listener = new EntryAdapter() {
public void entryAdded(EntryEvent event) {
latch.countDown();
}
public void entryEvicted(EntryEvent event) {
final Object value = event.getValue();
final Object oldValue = event.getOldValue();
if (value != null) {
nullLatch.countDown();
}
if (oldValue != null) {
nullLatch.countDown();
}
latch.countDown();
}
};
final String id = map.addEntryListener(listener, true);
map.put("key1", new GenericEvent("value1"), 2, TimeUnit.SECONDS);
assertTrue(latch.await(10, TimeUnit.SECONDS));
assertTrue(nullLatch.await(1, TimeUnit.SECONDS));
map.removeEntryListener(id);
map.put("key2", new GenericEvent("value2"));
assertEquals(1, map.size());
}
@Test
public void testContains() throws Exception {
final IMap map = createMap();
fillMap(map);
assertFalse(map.containsKey("key10"));
assertTrue(map.containsKey("key1"));
assertFalse(map.containsValue("value10"));
assertTrue(map.containsValue("value1"));
}
@Test
public void testGet() {
final IMap map = createMap();
fillMap(map);
for (int i = 0; i < 10; i++) {
Object o = map.get("key" + i);
assertEquals("value" + i, o);
}
}
@Test
public void testRemoveAndDelete() {
final IMap map = createMap();
fillMap(map);
assertNull(map.remove("key10"));
map.delete("key9");
assertEquals(9, map.size());
for (int i = 0; i < 9; i++) {
Object o = map.remove("key" + i);
assertEquals("value" + i, o);
}
assertEquals(0, map.size());
}
@Test
public void testRemoveIfSame() {
final IMap map = createMap();
fillMap(map);
assertFalse(map.remove("key2", "value"));
assertEquals(10, map.size());
assertTrue(map.remove("key2", "value2"));
assertEquals(9, map.size());
}
@Test
public void testFlush() throws InterruptedException {
flushMapStore.latch = new CountDownLatch(1);
IMap<Object, Object> map = client.getMap("flushMap");
map.put(1l, "value");
map.flush();
assertOpenEventually(flushMapStore.latch, 5);
}
@Test
public void testGetAllPutAll() {
final IMap map = createMap();
Map mm = new HashMap();
for (int i = 0; i < 100; i++) {
mm.put(i, i);
}
map.putAll(mm);
assertEquals(map.size(), 100);
for (int i = 0; i < 100; i++) {
assertEquals(map.get(i), i);
}
Set ss = new HashSet();
ss.add(1);
ss.add(3);
Map m2 = map.getAll(ss);
assertEquals(m2.size(), 2);
assertEquals(m2.get(1), 1);
assertEquals(m2.get(3), 3);
}
@Test
public void testAsyncGet() throws Exception {
final IMap map = createMap();
fillMap(map);
Future f = map.getAsync("key1");
Object o = f.get();
assertEquals("value1", o);
}
@Test
public void testAsyncPut() throws Exception {
final IMap map = createMap();
fillMap(map);
Future f = map.putAsync("key3", "value");
Object o = f.get();
assertEquals("value3", o);
assertEquals("value", map.get("key3"));
}
@Test
public void testAsyncPutWithTtl() throws Exception {
final IMap map = createMap();
final CountDownLatch latch = new CountDownLatch(1);
map.addEntryListener(new EntryAdapter<String, String>() {
public void entryEvicted(EntryEvent<String, String> event) {
latch.countDown();
}
}, true);
Future<String> f1 = map.putAsync("key", "value1", 3, TimeUnit.SECONDS);
String f1Val = f1.get();
assertNull(f1Val);
assertEquals("value1", map.get("key"));
assertTrue(latch.await(10, TimeUnit.SECONDS));
assertNull(map.get("key"));
}
@Test
public void testAsyncRemove() throws Exception {
final IMap map = createMap();
fillMap(map);
Future f = map.removeAsync("key4");
Object o = f.get();
assertEquals("value4", o);
assertEquals(9, map.size());
}
@Test
public void testTryPutRemove() throws Exception {
final IMap map = createMap();
assertTrue(map.tryPut("key1", "value1", 1, TimeUnit.SECONDS));
assertTrue(map.tryPut("key2", "value2", 1, TimeUnit.SECONDS));
map.lock("key1");
map.lock("key2");
final CountDownLatch latch = new CountDownLatch(2);
new Thread() {
public void run() {
boolean result = map.tryPut("key1", "value3", 1, TimeUnit.SECONDS);
if (!result) {
latch.countDown();
}
}
}.start();
new Thread() {
public void run() {
boolean result = map.tryRemove("key2", 1, TimeUnit.SECONDS);
if (!result) {
latch.countDown();
}
}
}.start();
assertTrue(latch.await(20, TimeUnit.SECONDS));
assertEquals("value1", map.get("key1"));
assertEquals("value2", map.get("key2"));
map.forceUnlock("key1");
map.forceUnlock("key2");
}
@Test
public void testPutTtl() throws Exception {
final IMap map = createMap();
map.put("key1", "value1", 1, TimeUnit.SECONDS);
assertNotNull(map.get("key1"));
Thread.sleep(2000);
assertNull(map.get("key1"));
}
@Test
public void testPutIfAbsent() throws Exception {
final IMap map = createMap();
assertNull(map.putIfAbsent("key1", "value1"));
assertEquals("value1", map.putIfAbsent("key1", "value3"));
}
@Test
public void testPutIfAbsentTtl() throws Exception {
final IMap map = createMap();
assertNull(map.putIfAbsent("key1", "value1", 1, TimeUnit.SECONDS));
assertEquals("value1", map.putIfAbsent("key1", "value3", 1, TimeUnit.SECONDS));
Thread.sleep(6000);
assertNull(map.putIfAbsent("key1", "value3", 1, TimeUnit.SECONDS));
assertEquals("value3", map.putIfAbsent("key1", "value4", 1, TimeUnit.SECONDS));
}
@Test
public void testSet() throws Exception {
final IMap map = createMap();
map.set("key1", "value1");
assertEquals("value1", map.get("key1"));
map.set("key1", "value2");
assertEquals("value2", map.get("key1"));
map.set("key1", "value3", 1, TimeUnit.SECONDS);
assertEquals("value3", map.get("key1"));
Thread.sleep(2000);
assertNull(map.get("key1"));
}
@Test
public void testPutTransient() throws InterruptedException {
transientMapStore.latch = new CountDownLatch(1);
IMap<Object, Object> map = client.getMap("putTransientMap");
map.putTransient(3l, "value1", 100, TimeUnit.SECONDS);
map.flush();
assertFalse(transientMapStore.latch.await(5, TimeUnit.SECONDS));
}
@Test
public void testLock() throws Exception {
final IMap map = createMap();
map.put("key1", "value1");
assertEquals("value1", map.get("key1"));
map.lock("key1");
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
public void run() {
map.tryPut("key1", "value2", 1, TimeUnit.SECONDS);
latch.countDown();
}
}.start();
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertEquals("value1", map.get("key1"));
map.forceUnlock("key1");
}
@Test
public void testLockTtl() throws Exception {
final IMap map = createMap();
map.put("key1", "value1");
assertEquals("value1", map.get("key1"));
map.lock("key1", 2, TimeUnit.SECONDS);
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
public void run() {
map.tryPut("key1", "value2", 5, TimeUnit.SECONDS);
latch.countDown();
}
}.start();
assertTrue(latch.await(10, TimeUnit.SECONDS));
assertFalse(map.isLocked("key1"));
assertEquals("value2", map.get("key1"));
map.forceUnlock("key1");
}
@Test
public void testLockTtl2() throws Exception {
final IMap map = createMap();
map.lock("key1", 3, TimeUnit.SECONDS);
final CountDownLatch latch = new CountDownLatch(2);
new Thread() {
public void run() {
if (!map.tryLock("key1")) {
latch.countDown();
}
try {
if (map.tryLock("key1", 5, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
assertTrue(latch.await(10, TimeUnit.SECONDS));
map.forceUnlock("key1");
}
@Test
public void testTryLock() throws Exception {
final IMap map = createMap();
final IMap tempMap = map;
assertTrue(tempMap.tryLock("key1", 2, TimeUnit.SECONDS));
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
public void run() {
try {
if (!tempMap.tryLock("key1", 2, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
assertTrue(latch.await(100, TimeUnit.SECONDS));
assertTrue(tempMap.isLocked("key1"));
final CountDownLatch latch2 = new CountDownLatch(1);
new Thread() {
public void run() {
try {
if (tempMap.tryLock("key1", 20, TimeUnit.SECONDS)) {
latch2.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
Thread.sleep(1000);
tempMap.unlock("key1");
assertTrue(latch2.await(100, TimeUnit.SECONDS));
assertTrue(tempMap.isLocked("key1"));
tempMap.forceUnlock("key1");
}
@Test
public void testForceUnlock() throws Exception {
final IMap map = createMap();
map.lock("key1");
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
public void run() {
map.forceUnlock("key1");
latch.countDown();
}
}.start();
assertTrue(latch.await(100, TimeUnit.SECONDS));
assertFalse(map.isLocked("key1"));
}
@Test
public void testValues() {
final IMap map = createMap();
fillMap(map);
final Collection values = map.values(new SqlPredicate("this == value1"));
assertEquals(1, values.size());
assertEquals("value1", values.iterator().next());
}
@Test
public void testReplace() throws Exception {
final IMap map = createMap();
assertNull(map.replace("key1", "value1"));
map.put("key1", "value1");
assertEquals("value1", map.replace("key1", "value2"));
assertEquals("value2", map.get("key1"));
assertFalse(map.replace("key1", "value1", "value3"));
assertEquals("value2", map.get("key1"));
assertTrue(map.replace("key1", "value2", "value3"));
assertEquals("value3", map.get("key1"));
}
@Test
public void testSubmitToKey() throws Exception {
final IMap map = createMap();
map.put(1, 1);
Future f = map.submitToKey(1, new IncrementorEntryProcessor());
assertEquals(2, f.get());
assertEquals(2, map.get(1));
}
@Test
public void testSubmitToNonExistentKey() throws Exception {
final IMap map = createMap();
Future f = map.submitToKey(11, new IncrementorEntryProcessor());
assertEquals(1, f.get());
assertEquals(1, map.get(11));
}
@Test
public void testSubmitToKeyWithCallback() throws Exception {
final IMap map = createMap();
map.put(1, 1);
final CountDownLatch latch = new CountDownLatch(1);
ExecutionCallback executionCallback = new ExecutionCallback() {
@Override
public void onResponse(Object response) {
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
};
map.submitToKey(1, new IncrementorEntryProcessor(), executionCallback);
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertEquals(2, map.get(1));
}
@Test
public void testListener() throws InterruptedException {
final IMap map = createMap();
final CountDownLatch latch1Add = new CountDownLatch(5);
final CountDownLatch latch1Remove = new CountDownLatch(2);
final CountDownLatch latch2Add = new CountDownLatch(1);
final CountDownLatch latch2Remove = new CountDownLatch(1);
EntryListener listener1 = new EntryAdapter() {
public void entryAdded(EntryEvent event) {
latch1Add.countDown();
}
public void entryRemoved(EntryEvent event) {
latch1Remove.countDown();
}
};
EntryListener listener2 = new EntryAdapter() {
public void entryAdded(EntryEvent event) {
latch2Add.countDown();
}
public void entryRemoved(EntryEvent event) {
latch2Remove.countDown();
}
};
map.addEntryListener(listener1, false);
map.addEntryListener(listener2, "key3", true);
Thread.sleep(1000);
map.put("key1", "value1");
map.put("key2", "value2");
map.put("key3", "value3");
map.put("key4", "value4");
map.put("key5", "value5");
map.remove("key1");
map.remove("key3");
assertTrue(latch1Add.await(10, TimeUnit.SECONDS));
assertTrue(latch1Remove.await(10, TimeUnit.SECONDS));
assertTrue(latch2Add.await(5, TimeUnit.SECONDS));
assertTrue(latch2Remove.await(5, TimeUnit.SECONDS));
}
@Test
public void testPredicateListenerWithPortableKey() throws InterruptedException {
final IMap tradeMap = createMap();
final CountDownLatch countDownLatch = new CountDownLatch(1);
final AtomicInteger atomicInteger = new AtomicInteger(0);
EntryListener listener = new EntryAdapter() {
@Override
public void entryAdded(EntryEvent event) {
atomicInteger.incrementAndGet();
countDownLatch.countDown();
}
};
AuthenticationRequest key = new AuthenticationRequest(new UsernamePasswordCredentials("a", "b"));
tradeMap.addEntryListener(listener, key, true);
AuthenticationRequest key2 = new AuthenticationRequest(new UsernamePasswordCredentials("a", "c"));
tradeMap.put(key2, 1);
assertFalse(countDownLatch.await(5, TimeUnit.SECONDS));
assertEquals(0, atomicInteger.get());
}
@Test
public void testBasicPredicate() {
final IMap map = createMap();
fillMap(map);
final Collection collection = map.values(new SqlPredicate("this == value1"));
assertEquals("value1", collection.iterator().next());
final Set set = map.keySet(new SqlPredicate("this == value1"));
assertEquals("key1", set.iterator().next());
final Set<Map.Entry<String, String>> set1 = map.entrySet(new SqlPredicate("this == value1"));
assertEquals("key1", set1.iterator().next().getKey());
assertEquals("value1", set1.iterator().next().getValue());
}
private void fillMap(IMap map) {
for (int i = 0; i < 10; i++) {
map.put("key" + i, "value" + i);
}
}
/**
* Issue #923
*/
@Test
public void testPartitionAwareKey() {
String name = randomString();
PartitionAwareKey key = new PartitionAwareKey("key", "123");
String value = "value";
IMap<Object, Object> map1 = server.getMap(name);
map1.put(key, value);
assertEquals(value, map1.get(key));
IMap<Object, Object> map2 = client.getMap(name);
assertEquals(value, map2.get(key));
}
private static class PartitionAwareKey implements PartitionAware, Serializable {
private final String key;
private final String pk;
private PartitionAwareKey(String key, String pk) {
this.key = key;
this.pk = pk;
}
@Override
public Object getPartitionKey() {
return pk;
}
}
@Test
public void testExecuteOnKeys() throws Exception {
String name = randomString();
IMap<Integer, Integer> map = client.getMap(name);
IMap<Integer, Integer> map2 = client.getMap(name);
for (int i = 0; i < 10; i++) {
map.put(i, 0);
}
Set keys = new HashSet();
keys.add(1);
keys.add(4);
keys.add(7);
keys.add(9);
final Map<Integer, Object> resultMap = map2.executeOnKeys(keys, new IncrementorEntryProcessor());
assertEquals(1, resultMap.get(1));
assertEquals(1, resultMap.get(4));
assertEquals(1, resultMap.get(7));
assertEquals(1, resultMap.get(9));
assertEquals(1, (int) map.get(1));
assertEquals(0, (int) map.get(2));
assertEquals(0, (int) map.get(3));
assertEquals(1, (int) map.get(4));
assertEquals(0, (int) map.get(5));
assertEquals(0, (int) map.get(6));
assertEquals(1, (int) map.get(7));
assertEquals(0, (int) map.get(8));
assertEquals(1, (int) map.get(9));
}
/**
* Issue #996
*/
@Test
public void testEntryListener() throws InterruptedException {
final CountDownLatch gateAdd = new CountDownLatch(2);
final CountDownLatch gateRemove = new CountDownLatch(1);
final CountDownLatch gateEvict = new CountDownLatch(1);
final CountDownLatch gateUpdate = new CountDownLatch(1);
final String mapName = randomString();
final IMap<Object, Object> serverMap = server.getMap(mapName);
serverMap.put(3, new Deal(3));
final IMap<Object, Object> clientMap = client.getMap(mapName);
assertEquals(1, clientMap.size());
final EntryListener listener = new EntListener(gateAdd, gateRemove, gateEvict, gateUpdate);
clientMap.addEntryListener(listener, new SqlPredicate("id=1"), 2, true);
clientMap.put(2, new Deal(1));
clientMap.put(2, new Deal(1));
clientMap.remove(2);
clientMap.put(2, new Deal(1));
clientMap.evict(2);
assertTrue(gateAdd.await(10, TimeUnit.SECONDS));
assertTrue(gateRemove.await(10, TimeUnit.SECONDS));
assertTrue(gateEvict.await(10, TimeUnit.SECONDS));
assertTrue(gateUpdate.await(10, TimeUnit.SECONDS));
}
static class EntListener implements EntryListener<Integer, Deal>, Serializable {
private final CountDownLatch _gateAdd;
private final CountDownLatch _gateRemove;
private final CountDownLatch _gateEvict;
private final CountDownLatch _gateUpdate;
EntListener(CountDownLatch gateAdd, CountDownLatch gateRemove, CountDownLatch gateEvict, CountDownLatch gateUpdate) {
_gateAdd = gateAdd;
_gateRemove = gateRemove;
_gateEvict = gateEvict;
_gateUpdate = gateUpdate;
}
@Override
public void entryAdded(EntryEvent<Integer, Deal> arg0) {
_gateAdd.countDown();
}
@Override
public void entryEvicted(EntryEvent<Integer, Deal> arg0) {
_gateEvict.countDown();
}
@Override
public void entryRemoved(EntryEvent<Integer, Deal> arg0) {
_gateRemove.countDown();
}
@Override
public void entryUpdated(EntryEvent<Integer, Deal> arg0) {
_gateUpdate.countDown();
}
}
static class Deal implements Serializable {
Integer id;
Deal(Integer id) {
this.id = id;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
}
private static class IncrementorEntryProcessor extends AbstractEntryProcessor implements DataSerializable {
IncrementorEntryProcessor() {
super(true);
}
public Object process(Map.Entry entry) {
Integer value = (Integer) entry.getValue();
if (value == null) {
value = 0;
}
if (value == -1) {
entry.setValue(null);
return null;
}
value++;
entry.setValue(value);
return value;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
}
@Override
public void readData(ObjectDataInput in) throws IOException {
}
public void processBackup(Map.Entry entry) {
entry.setValue((Integer) entry.getValue() + 1);
}
}
@Test
public void testMapStatistics() throws Exception {
String name = randomString();
final LocalMapStats localMapStats = server.getMap(name).getLocalMapStats();
final IMap map = client.getMap(name);
final int operationCount = 1000;
for (int i = 0; i < operationCount; i++) {
map.put(i, i);
map.get(i);
map.remove(i);
}
assertEquals("put count", operationCount, localMapStats.getPutOperationCount());
assertEquals("get count", operationCount, localMapStats.getGetOperationCount());
assertEquals("remove count", operationCount, localMapStats.getRemoveOperationCount());
assertTrue("put latency", 0 < localMapStats.getTotalPutLatency());
assertTrue("get latency", 0 < localMapStats.getTotalGetLatency());
assertTrue("remove latency", 0 < localMapStats.getTotalRemoveLatency());
}
static class TestMapStore extends MapStoreAdapter<Long, String> {
public volatile CountDownLatch latch;
@Override
public void store(Long key, String value) {
if (latch != null) {
latch.countDown();
}
}
@Override
public void storeAll(Map<Long, String> map) {
if (latch != null) {
latch.countDown();
}
}
@Override
public void deleteAll(Collection<Long> keys) {
if (latch != null) {
latch.countDown();
}
}
@Override
public void delete(Long key) {
if (latch != null) {
latch.countDown();
}
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
5,841 |
public class DocIdSetCollector extends XCollector {
private final DocSetCache docSetCache;
private final Collector collector;
private final List<ContextDocIdSet> docSets;
private boolean currentHasDocs;
private ContextDocIdSet currentContext;
private FixedBitSet currentSet;
public DocIdSetCollector(DocSetCache docSetCache, Collector collector) {
this.docSetCache = docSetCache;
this.collector = collector;
this.docSets = new ArrayList<ContextDocIdSet>();
}
public List<ContextDocIdSet> docSets() {
return docSets;
}
public void release() {
for (ContextDocIdSet docSet : docSets) {
docSetCache.release(docSet);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(int doc) throws IOException {
collector.collect(doc);
currentHasDocs = true;
currentSet.set(doc);
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
if (currentContext != null) {
if (currentHasDocs) {
docSets.add(currentContext);
} else {
docSetCache.release(currentContext);
}
}
currentContext = docSetCache.obtain(context);
currentSet = (FixedBitSet) currentContext.docSet;
currentHasDocs = false;
}
@Override
public void postCollection() {
if (collector instanceof XCollector) {
((XCollector) collector).postCollection();
}
if (currentContext != null) {
if (currentHasDocs) {
docSets.add(currentContext);
} else {
docSetCache.release(currentContext);
}
currentContext = null;
currentSet = null;
currentHasDocs = false;
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_DocIdSetCollector.java
|
1,126 |
public class OSQLFunctionSysdate extends OSQLFunctionAbstract {
public static final String NAME = "sysdate";
private final Date now;
private SimpleDateFormat format;
/**
* Get the date at construction to have the same date for all the iteration.
*/
public OSQLFunctionSysdate() {
super(NAME, 0, 2);
now = new Date();
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
if (iParameters.length == 0)
return now;
if (format == null) {
format = new SimpleDateFormat((String) iParameters[0]);
if (iParameters.length == 2)
format.setTimeZone(TimeZone.getTimeZone(iParameters[1].toString()));
else
format.setTimeZone(ODateHelper.getDatabaseTimeZone());
}
return format.format(now);
}
public boolean aggregateResults(final Object[] configuredParameters) {
return false;
}
public String getSyntax() {
return "Syntax error: sysdate([<format>] [,<timezone>])";
}
@Override
public Object getResult() {
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionSysdate.java
|
43 |
public interface BiFun<A,B,T> { T apply(A a, B b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
1,220 |
final class CompileErrorReporter implements
DiagnosticListener<JavaFileObject> {
private IProject project;
private boolean errorReported;
private List<IFolder> sourceDirectories;
public CompileErrorReporter(IProject project) {
this.project = project;
sourceDirectories = CeylonBuilder.getSourceFolders(project);
}
public void failed() {
if (!errorReported) {
setupMarker(project, null);
}
}
public void failed(final ExitState exitState) {
Diagnostic<? extends JavaFileObject> diagnostic = null;
if (exitState.javacExitCode == Main.EXIT_ABNORMAL) {
diagnostic = new Diagnostic<JavaFileObject>() {
@Override
public javax.tools.Diagnostic.Kind getKind() {
return javax.tools.Diagnostic.Kind.ERROR;
}
@Override
public JavaFileObject getSource() {
return null;
}
@Override
public long getPosition() {
return 0;
}
@Override
public long getStartPosition() {
return 0;
}
@Override
public long getEndPosition() {
return 0;
}
@Override
public long getLineNumber() {
return 0;
}
@Override
public long getColumnNumber() {
return 0;
}
@Override
public String getCode() {
return null;
}
@Override
public String getMessage(Locale locale) {
return "The Ceylon Java backend compiler failed abnormally" +
(exitState.ceylonCodegenExceptionCount > 0 ? "\n with " + exitState.ceylonCodegenExceptionCount + " code generation exceptions" : "") +
(exitState.ceylonCodegenErroneousCount > 0 ? "\n with " + exitState.ceylonCodegenErroneousCount + " erroneous code generations" : "") +
(exitState.ceylonCodegenGarbageCount > 0 ? "\n with " + exitState.ceylonCodegenGarbageCount + " malformed Javac tree cases" : "") +
(exitState.abortingException != null ? "\n with a throwable : " + exitState.abortingException.toString() : "") +
"";
}
};
}
if (!errorReported || diagnostic != null) {
setupMarker(project, diagnostic);
}
}
@Override
public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
errorReported = true;
JavaFileObject source = diagnostic.getSource();
if (source == null) {
// no source file
if (!diagnostic.toString().startsWith("Note: Created module")) {
setupMarker(project, diagnostic);
}
}
else {
IPath absolutePath = new Path(source.getName());
IFile file = null;
for (IFolder sourceDirectory : sourceDirectories) {
IPath sourceDirPath = sourceDirectory.getLocation();
if (sourceDirPath.isPrefixOf(absolutePath)) {
IResource r = sourceDirectory.findMember(absolutePath.makeRelativeTo(sourceDirPath));
if (r instanceof IFile) {
file = (IFile) r;
}
}
}
if (file == null) {
file = getWorkspace().getRoot()
.getFileForLocation(new Path(source.getName()));
}
if(file != null) {
if (CeylonBuilder.isCeylon(file)){
try {
for (IMarker m: file.findMarkers(PROBLEM_MARKER_ID, true, DEPTH_ZERO)) {
int sev = ((Integer) m.getAttribute(IMarker.SEVERITY)).intValue();
if (sev==IMarker.SEVERITY_ERROR) {
return;
}
}
}
catch (CoreException e) {
e.printStackTrace();
}
setupMarker(file, diagnostic);
}
if (CeylonBuilder.isJava(file)){
try {
for (IMarker m: file.findMarkers(JAVA_MODEL_PROBLEM_MARKER, false, DEPTH_ZERO)) {
int sev = ((Integer) m.getAttribute(IMarker.SEVERITY)).intValue();
if (sev==IMarker.SEVERITY_ERROR) {
return;
}
}
}
catch (CoreException e) {
e.printStackTrace();
}
setupMarker(file, diagnostic);
}
}else{
setupMarker(project, diagnostic);
}
}
}
private void setupMarker(IResource resource, Diagnostic<? extends JavaFileObject> diagnostic) {
try {
long line = diagnostic==null ? -1 : diagnostic.getLineNumber();
String markerId = PROBLEM_MARKER_ID + ".backend";
if (resource instanceof IFile) {
if (CeylonBuilder.isJava((IFile)resource)) {
markerId = JAVA_MODEL_PROBLEM_MARKER;
}
// if (line<0) {
//TODO: use the Symbol to get a location for the javac error
// String name = ((Symbol)((JCDiagnostic) diagnostic).getArgs()[0]).name.toString();
// Declaration member = CeylonBuilder.getPackage((IFile)resource).getDirectMember(name, null, false);
// }
}
IMarker marker = resource.createMarker(markerId);
if (line>=0) {
//Javac doesn't have line number info for certain errors
marker.setAttribute(IMarker.LINE_NUMBER, (int) line);
marker.setAttribute(IMarker.CHAR_START,
(int) diagnostic.getStartPosition());
marker.setAttribute(IMarker.CHAR_END,
(int) diagnostic.getEndPosition());
}
if (markerId.equals(JAVA_MODEL_PROBLEM_MARKER)) {
marker.setAttribute(IMarker.SOURCE_ID, PLUGIN_ID);
}
String message = diagnostic==null ?
"unexplained compilation problem" :
diagnostic.getMessage(Locale.getDefault());
marker.setAttribute(IMarker.MESSAGE, message);
marker.setAttribute(IMarker.PRIORITY, IMarker.PRIORITY_HIGH);
switch (diagnostic==null ? Diagnostic.Kind.ERROR : diagnostic.getKind()) {
case ERROR:
marker.setAttribute(IMarker.SEVERITY, IMarker.SEVERITY_ERROR);
break;
case WARNING:
case MANDATORY_WARNING:
marker.setAttribute(IMarker.SEVERITY, IMarker.SEVERITY_WARNING);
break;
default:
marker.setAttribute(IMarker.SEVERITY, IMarker.SEVERITY_INFO);
}
}
catch (CoreException ce) {
ce.printStackTrace();
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_builder_CompileErrorReporter.java
|
2,610 |
class PingRequestHandler extends BaseTransportRequestHandler<PingRequest> {
public static final String ACTION = "discovery/zen/fd/ping";
@Override
public PingRequest newInstance() {
return new PingRequest();
}
@Override
public void messageReceived(PingRequest request, TransportChannel channel) throws Exception {
// if we are not the node we are supposed to be pinged, send an exception
// this can happen when a kill -9 is sent, and another node is started using the same port
if (!latestNodes.localNodeId().equals(request.nodeId)) {
throw new ElasticsearchIllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + latestNodes.localNodeId() + "]");
}
channel.sendResponse(new PingResponse());
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_NodesFaultDetection.java
|
92 |
@SuppressWarnings("serial")
static final class ReduceEntriesTask<K,V>
extends BulkTask<K,V,Map.Entry<K,V>> {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
Map.Entry<K,V> result;
ReduceEntriesTask<K,V> rights, nextRight;
ReduceEntriesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceEntriesTask<K,V> nextRight,
BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final Map.Entry<K,V> getRawResult() { return result; }
public final void compute() {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceEntriesTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
Map.Entry<K,V> r = null;
for (Node<K,V> p; (p = advance()) != null; )
r = (r == null) ? p : reducer.apply(r, p);
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
t = (ReduceEntriesTask<K,V>)c,
s = t.rights;
while (s != null) {
Map.Entry<K,V> tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
357 |
public class NodesStatsRequest extends NodesOperationRequest<NodesStatsRequest> {
private CommonStatsFlags indices = new CommonStatsFlags();
private boolean os;
private boolean process;
private boolean jvm;
private boolean threadPool;
private boolean network;
private boolean fs;
private boolean transport;
private boolean http;
private boolean breaker;
protected NodesStatsRequest() {
}
/**
* Get stats from nodes based on the nodes ids specified. If none are passed, stats
* for all nodes will be returned.
*/
public NodesStatsRequest(String... nodesIds) {
super(nodesIds);
}
/**
* Sets all the request flags.
*/
public NodesStatsRequest all() {
this.indices.all();
this.os = true;
this.process = true;
this.jvm = true;
this.threadPool = true;
this.network = true;
this.fs = true;
this.transport = true;
this.http = true;
this.breaker = true;
return this;
}
/**
* Clears all the request flags.
*/
public NodesStatsRequest clear() {
this.indices.clear();
this.os = false;
this.process = false;
this.jvm = false;
this.threadPool = false;
this.network = false;
this.fs = false;
this.transport = false;
this.http = false;
this.breaker = false;
return this;
}
public CommonStatsFlags indices() {
return indices;
}
public NodesStatsRequest indices(CommonStatsFlags indices) {
this.indices = indices;
return this;
}
/**
* Should indices stats be returned.
*/
public NodesStatsRequest indices(boolean indices) {
if (indices) {
this.indices.all();
} else {
this.indices.clear();
}
return this;
}
/**
* Should the node OS be returned.
*/
public boolean os() {
return this.os;
}
/**
* Should the node OS be returned.
*/
public NodesStatsRequest os(boolean os) {
this.os = os;
return this;
}
/**
* Should the node Process be returned.
*/
public boolean process() {
return this.process;
}
/**
* Should the node Process be returned.
*/
public NodesStatsRequest process(boolean process) {
this.process = process;
return this;
}
/**
* Should the node JVM be returned.
*/
public boolean jvm() {
return this.jvm;
}
/**
* Should the node JVM be returned.
*/
public NodesStatsRequest jvm(boolean jvm) {
this.jvm = jvm;
return this;
}
/**
* Should the node Thread Pool be returned.
*/
public boolean threadPool() {
return this.threadPool;
}
/**
* Should the node Thread Pool be returned.
*/
public NodesStatsRequest threadPool(boolean threadPool) {
this.threadPool = threadPool;
return this;
}
/**
* Should the node Network be returned.
*/
public boolean network() {
return this.network;
}
/**
* Should the node Network be returned.
*/
public NodesStatsRequest network(boolean network) {
this.network = network;
return this;
}
/**
* Should the node file system stats be returned.
*/
public boolean fs() {
return this.fs;
}
/**
* Should the node file system stats be returned.
*/
public NodesStatsRequest fs(boolean fs) {
this.fs = fs;
return this;
}
/**
* Should the node Transport be returned.
*/
public boolean transport() {
return this.transport;
}
/**
* Should the node Transport be returned.
*/
public NodesStatsRequest transport(boolean transport) {
this.transport = transport;
return this;
}
/**
* Should the node HTTP be returned.
*/
public boolean http() {
return this.http;
}
/**
* Should the node HTTP be returned.
*/
public NodesStatsRequest http(boolean http) {
this.http = http;
return this;
}
public boolean breaker() {
return this.breaker;
}
/**
* Should the node's circuit breaker stats be returned.
*/
public NodesStatsRequest breaker(boolean breaker) {
this.breaker = breaker;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = CommonStatsFlags.readCommonStatsFlags(in);
os = in.readBoolean();
process = in.readBoolean();
jvm = in.readBoolean();
threadPool = in.readBoolean();
network = in.readBoolean();
fs = in.readBoolean();
transport = in.readBoolean();
http = in.readBoolean();
breaker = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
indices.writeTo(out);
out.writeBoolean(os);
out.writeBoolean(process);
out.writeBoolean(jvm);
out.writeBoolean(threadPool);
out.writeBoolean(network);
out.writeBoolean(fs);
out.writeBoolean(transport);
out.writeBoolean(http);
out.writeBoolean(breaker);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_stats_NodesStatsRequest.java
|
346 |
public class TransportNodesShutdownAction extends TransportMasterNodeOperationAction<NodesShutdownRequest, NodesShutdownResponse> {
private final Node node;
private final ClusterName clusterName;
private final boolean disabled;
private final TimeValue delay;
@Inject
public TransportNodesShutdownAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
Node node, ClusterName clusterName) {
super(settings, transportService, clusterService, threadPool);
this.node = node;
this.clusterName = clusterName;
this.disabled = settings.getAsBoolean("action.disable_shutdown", componentSettings.getAsBoolean("disabled", false));
this.delay = componentSettings.getAsTime("delay", TimeValue.timeValueMillis(200));
this.transportService.registerHandler(NodeShutdownRequestHandler.ACTION, new NodeShutdownRequestHandler());
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return NodesShutdownAction.NAME;
}
@Override
protected NodesShutdownRequest newRequest() {
return new NodesShutdownRequest();
}
@Override
protected NodesShutdownResponse newResponse() {
return new NodesShutdownResponse();
}
@Override
protected void processBeforeDelegationToMaster(NodesShutdownRequest request, ClusterState state) {
String[] nodesIds = request.nodesIds;
if (nodesIds != null) {
for (int i = 0; i < nodesIds.length; i++) {
// replace the _local one, since it looses its meaning when going over to the master...
if ("_local".equals(nodesIds[i])) {
nodesIds[i] = state.nodes().localNodeId();
}
}
}
}
@Override
protected void masterOperation(final NodesShutdownRequest request, final ClusterState state, final ActionListener<NodesShutdownResponse> listener) throws ElasticsearchException {
if (disabled) {
throw new ElasticsearchIllegalStateException("Shutdown is disabled");
}
final ObjectOpenHashSet<DiscoveryNode> nodes = new ObjectOpenHashSet<DiscoveryNode>();
if (state.nodes().isAllNodes(request.nodesIds)) {
logger.info("[cluster_shutdown]: requested, shutting down in [{}]", request.delay);
nodes.addAll(state.nodes().dataNodes().values());
nodes.addAll(state.nodes().masterNodes().values());
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(request.delay.millis());
} catch (InterruptedException e) {
// ignore
}
// first, stop the cluster service
logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur");
clusterService.stop();
final CountDownLatch latch = new CountDownLatch(nodes.size());
for (ObjectCursor<DiscoveryNode> cursor : nodes) {
final DiscoveryNode node = cursor.value;
if (node.id().equals(state.nodes().masterNodeId())) {
// don't shutdown the master yet...
latch.countDown();
} else {
logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node);
transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node);
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
latch.countDown();
}
});
}
}
try {
latch.await();
} catch (InterruptedException e) {
// ignore
}
logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master");
// now, kill the master
logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode());
transportService.sendRequest(state.nodes().masterNode(), NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[cluster_shutdown]: received shutdown response from master");
}
@Override
public void handleException(TransportException exp) {
logger.warn("[cluster_shutdown]: received failed shutdown response master", exp);
}
});
}
});
t.start();
} else {
final String[] nodesIds = state.nodes().resolveNodesIds(request.nodesIds);
logger.info("[partial_cluster_shutdown]: requested, shutting down [{}] in [{}]", nodesIds, request.delay);
for (String nodeId : nodesIds) {
final DiscoveryNode node = state.nodes().get(nodeId);
if (node != null) {
nodes.add(node);
}
}
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(request.delay.millis());
} catch (InterruptedException e) {
// ignore
}
final CountDownLatch latch = new CountDownLatch(nodesIds.length);
for (String nodeId : nodesIds) {
final DiscoveryNode node = state.nodes().get(nodeId);
if (node == null) {
logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId);
latch.countDown();
continue;
}
logger.trace("[partial_cluster_shutdown]: sending shutdown request to [{}]", node);
transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[partial_cluster_shutdown]: received shutdown response from [{}]", node);
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
logger.warn("[partial_cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
latch.countDown();
}
});
}
try {
latch.await();
} catch (InterruptedException e) {
// ignore
}
logger.info("[partial_cluster_shutdown]: done shutting down [{}]", ((Object) nodesIds));
}
});
t.start();
}
listener.onResponse(new NodesShutdownResponse(clusterName, nodes.toArray(DiscoveryNode.class)));
}
private class NodeShutdownRequestHandler extends BaseTransportRequestHandler<NodeShutdownRequest> {
static final String ACTION = "/cluster/nodes/shutdown/node";
@Override
public NodeShutdownRequest newInstance() {
return new NodeShutdownRequest();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(final NodeShutdownRequest request, TransportChannel channel) throws Exception {
if (disabled) {
throw new ElasticsearchIllegalStateException("Shutdown is disabled");
}
logger.info("shutting down in [{}]", delay);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(delay.millis());
} catch (InterruptedException e) {
// ignore
}
if (!request.exit) {
logger.info("initiating requested shutdown (no exit)...");
try {
node.close();
} catch (Exception e) {
logger.warn("Failed to shutdown", e);
}
return;
}
boolean shutdownWithWrapper = false;
if (System.getProperty("elasticsearch-service") != null) {
try {
Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager");
logger.info("initiating requested shutdown (using service)");
wrapperManager.getMethod("stopAndReturn", int.class).invoke(null, 0);
shutdownWithWrapper = true;
} catch (Throwable e) {
logger.error("failed to initial shutdown on service wrapper", e);
}
}
if (!shutdownWithWrapper) {
logger.info("initiating requested shutdown...");
try {
node.close();
} catch (Exception e) {
logger.warn("Failed to shutdown", e);
} finally {
// make sure we initiate the shutdown hooks, so the Bootstrap#main thread will exit
System.exit(0);
}
}
}
});
t.start();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
static class NodeShutdownRequest extends TransportRequest {
boolean exit;
NodeShutdownRequest() {
}
NodeShutdownRequest(NodesShutdownRequest request) {
super(request);
this.exit = request.exit();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
exit = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(exit);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_TransportNodesShutdownAction.java
|
1,596 |
public class ThrottlingAllocationDecider extends AllocationDecider {
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries";
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries";
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
private volatile int primariesInitialRecoveries;
private volatile int concurrentRecoveries;
@Inject
public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES);
this.concurrentRecoveries = settings.getAsInt("cluster.routing.allocation.concurrent_recoveries", settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES));
logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
nodeSettingsService.addListener(new ApplySettings());
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (shardRouting.primary()) {
assert shardRouting.unassigned() || shardRouting.active();
if (shardRouting.unassigned()) {
// primary is unassigned, means we are going to do recovery from gateway
// count *just the primary* currently doing recovery on the node and check against concurrent_recoveries
int primariesInRecovery = 0;
for (MutableShardRouting shard : node) {
// when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*
// we only count initial recoveries here, so we need to make sure that relocating node is null
if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary() && shard.relocatingNodeId() == null) {
primariesInRecovery++;
}
}
if (primariesInRecovery >= primariesInitialRecoveries) {
return allocation.decision(Decision.THROTTLE, "too many primaries currently recovering [%d], limit: [%d]",
primariesInRecovery, primariesInitialRecoveries);
} else {
return allocation.decision(Decision.YES, "below primary recovery limit of [%d]", primariesInitialRecoveries);
}
}
}
// either primary or replica doing recovery (from peer shard)
// count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING)
return canAllocate(node, allocation);
}
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
int currentRecoveries = 0;
for (MutableShardRouting shard : node) {
if (shard.state() == ShardRoutingState.INITIALIZING || shard.state() == ShardRoutingState.RELOCATING) {
currentRecoveries++;
}
}
if (currentRecoveries >= concurrentRecoveries) {
return allocation.decision(Decision.THROTTLE, "too many shards currently recovering [%d], limit: [%d]",
currentRecoveries, concurrentRecoveries);
} else {
return allocation.decision(Decision.YES, "below shard recovery limit of [%d]", concurrentRecoveries);
}
}
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries);
if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) {
logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries);
ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries;
}
int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries);
if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) {
logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries);
ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_allocation_decider_ThrottlingAllocationDecider.java
|
189 |
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
private static final long multiplier = 0x5DEECE66DL;
private static final long addend = 0xBL;
private static final long mask = (1L << 48) - 1;
/**
* The random seed. We can't use super.seed.
*/
private long rnd;
/**
* Initialization flag to permit calls to setSeed to succeed only
* while executing the Random constructor. We can't allow others
* since it would cause setting seed in one part of a program to
* unintentionally impact other usages by the thread.
*/
boolean initialized;
// Padding to help avoid memory contention among seed updates in
// different TLRs in the common case that they are located near
// each other.
private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
/**
* The actual ThreadLocal
*/
private static final ThreadLocal<ThreadLocalRandom> localRandom =
new ThreadLocal<ThreadLocalRandom>() {
protected ThreadLocalRandom initialValue() {
return new ThreadLocalRandom();
}
};
/**
* Constructor called only by localRandom.initialValue.
*/
ThreadLocalRandom() {
super();
initialized = true;
}
/**
* Returns the current thread's {@code ThreadLocalRandom}.
*
* @return the current thread's {@code ThreadLocalRandom}
*/
public static ThreadLocalRandom current() {
return localRandom.get();
}
/**
* Throws {@code UnsupportedOperationException}. Setting seeds in
* this generator is not supported.
*
* @throws UnsupportedOperationException always
*/
public void setSeed(long seed) {
if (initialized)
throw new UnsupportedOperationException();
rnd = (seed ^ multiplier) & mask;
}
protected int next(int bits) {
rnd = (rnd * multiplier + addend) & mask;
return (int) (rnd >>> (48-bits));
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public int nextInt(int least, int bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextInt(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public long nextLong(long n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
// Divide n by two until small enough for nextInt. On each
// iteration (at most 31 of them but usually much less),
// randomly choose both whether to include high bit in result
// (offset) and whether to continue with the lower vs upper
// half (which makes a difference only if odd).
long offset = 0;
while (n >= Integer.MAX_VALUE) {
int bits = next(2);
long half = n >>> 1;
long nextn = ((bits & 2) == 0) ? half : n - half;
if ((bits & 1) == 0)
offset += n - nextn;
n = nextn;
}
return offset + nextInt((int) n);
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public long nextLong(long least, long bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextLong(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed {@code double} value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public double nextDouble(double n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
return nextDouble() * n;
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public double nextDouble(double least, double bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextDouble() * (bound - least) + least;
}
private static final long serialVersionUID = -5851777807851030925L;
}
| 0true
|
src_main_java_jsr166y_ThreadLocalRandom.java
|
317 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
651 |
public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetIndexTemplatesRequest, GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> {
public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new GetIndexTemplatesRequest());
}
public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient, String... names) {
super((InternalIndicesAdminClient) indicesClient, new GetIndexTemplatesRequest(names));
}
@Override
protected void doExecute(ActionListener<GetIndexTemplatesResponse> listener) {
((IndicesAdminClient) client).getTemplates(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_get_GetIndexTemplatesRequestBuilder.java
|
284 |
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> {
protected final Request request;
protected final InternalGenericClient client;
protected ActionRequestBuilder(InternalGenericClient client, Request request) {
this.client = client;
this.request = request;
}
public Request request() {
return this.request;
}
@SuppressWarnings("unchecked")
public final RequestBuilder setListenerThreaded(boolean listenerThreaded) {
request.listenerThreaded(listenerThreaded);
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
public final RequestBuilder putHeader(String key, Object value) {
request.putHeader(key, value);
return (RequestBuilder) this;
}
public ListenableActionFuture<Response> execute() {
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<Response>(request.listenerThreaded(), client.threadPool());
execute(future);
return future;
}
/**
* Short version of execute().actionGet().
*/
public Response get() throws ElasticsearchException {
return execute().actionGet();
}
/**
* Short version of execute().actionGet().
*/
public Response get(TimeValue timeout) throws ElasticsearchException {
return execute().actionGet(timeout);
}
/**
* Short version of execute().actionGet().
*/
public Response get(String timeout) throws ElasticsearchException {
return execute().actionGet(timeout);
}
public void execute(ActionListener<Response> listener) {
doExecute(listener);
}
protected abstract void doExecute(ActionListener<Response> listener);
}
| 0true
|
src_main_java_org_elasticsearch_action_ActionRequestBuilder.java
|
6,018 |
public final class Correction {
public static final Correction[] EMPTY = new Correction[0];
public double score;
public final Candidate[] candidates;
public Correction(double score, Candidate[] candidates) {
this.score = score;
this.candidates = candidates;
}
@Override
public String toString() {
return "Correction [score=" + score + ", candidates=" + Arrays.toString(candidates) + "]";
}
public BytesRef join(BytesRef separator) {
return join(separator, null, null);
}
public BytesRef join(BytesRef separator, BytesRef preTag, BytesRef postTag) {
return join(separator, new BytesRef(), preTag, postTag);
}
public BytesRef join(BytesRef separator, BytesRef result, BytesRef preTag, BytesRef postTag) {
BytesRef[] toJoin = new BytesRef[this.candidates.length];
int len = separator.length * this.candidates.length - 1;
for (int i = 0; i < toJoin.length; i++) {
Candidate candidate = candidates[i];
if (preTag == null || candidate.userInput) {
toJoin[i] = candidate.term;
} else {
final int maxLen = preTag.length + postTag.length + candidate.term.length;
final BytesRef highlighted = new BytesRef(maxLen);// just allocate once
if (i == 0 || candidates[i-1].userInput) {
highlighted.append(preTag);
}
highlighted.append(candidate.term);
if (toJoin.length == i + 1 || candidates[i+1].userInput) {
highlighted.append(postTag);
}
toJoin[i] = highlighted;
}
len += toJoin[i].length;
}
result.offset = 0;
result.grow(len);
return SuggestUtils.joinPreAllocated(separator, result, toJoin);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_suggest_phrase_Correction.java
|
282 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientRandomLBTest {
@AfterClass
public static void destroy() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testRandomLB_withoutMembers() {
RandomLB lb = new RandomLB();
Member m = lb.next();
assertNull(m);
}
@Test
public void testRandomLB_withMembers() {
RandomLB randomLB = new RandomLB();
TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(1);
HazelcastInstance server = factory.newHazelcastInstance();
Cluster cluster = server.getCluster();
ClientConfig clientConfig = new ClientConfig();
clientConfig.setLoadBalancer(randomLB);
randomLB.init(cluster, clientConfig);
Member member = cluster.getLocalMember();
Member nextMember = randomLB.next();
assertEquals(member, nextMember);
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_loadBalancer_ClientRandomLBTest.java
|
123 |
client.getLifecycleService().addLifecycleListener(new LifecycleListener() {
@Override
public void stateChanged(LifecycleEvent event) {
connectedLatch.countDown();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientReconnectTest.java
|
668 |
public class DeleteWarmerAction extends IndicesAction<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction();
public static final String NAME = "indices/warmer/delete";
private DeleteWarmerAction() {
super(NAME);
}
@Override
public DeleteWarmerResponse newResponse() {
return new DeleteWarmerResponse();
}
@Override
public DeleteWarmerRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new DeleteWarmerRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_delete_DeleteWarmerAction.java
|
595 |
public class IndicesSegmentsRequest extends BroadcastOperationRequest<IndicesSegmentsRequest> {
public IndicesSegmentsRequest() {
this(Strings.EMPTY_ARRAY);
}
public IndicesSegmentsRequest(String... indices) {
super(indices);
indicesOptions(IndicesOptions.fromOptions(false, false, true, false));
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_segments_IndicesSegmentsRequest.java
|
692 |
client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
try {
listener.afterBulk(executionId, bulkRequest, response);
} finally {
semaphore.release();
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.afterBulk(executionId, bulkRequest, e);
} finally {
semaphore.release();
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_bulk_BulkProcessor.java
|
56 |
public class AddAnnotionProposal extends CorrectionProposal {
private static final List<String> ANNOTATIONS_ORDER =
asList("doc", "throws", "see", "tagged", "shared", "abstract",
"actual", "formal", "default", "variable");
private static final List<String> ANNOTATIONS_ON_SEPARATE_LINE =
asList("doc", "throws", "see", "tagged");
private final Declaration dec;
private final String annotation;
AddAnnotionProposal(Declaration dec, String annotation,
String desc, int offset, TextFileChange change,
Region selection) {
super(desc, change, selection);
this.dec = dec;
this.annotation = annotation;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof AddAnnotionProposal) {
AddAnnotionProposal that = (AddAnnotionProposal) obj;
return that.dec.equals(dec) &&
that.annotation.equals(annotation);
}
else {
return super.equals(obj);
}
}
@Override
public int hashCode() {
return dec.hashCode();
}
private static void addAddAnnotationProposal(Node node, String annotation,
String desc, Declaration dec, Collection<ICompletionProposal> proposals,
IProject project) {
if (dec!=null && dec.getName()!=null &&
!(node instanceof Tree.MissingDeclaration)) {
for (PhasedUnit unit: getUnits(project)) {
if (dec.getUnit().equals(unit.getUnit())) {
FindDeclarationNodeVisitor fdv =
new FindDeclarationNodeVisitor(dec);
getRootNode(unit).visit(fdv);
Tree.Declaration decNode =
(Tree.Declaration) fdv.getDeclarationNode();
if (decNode!=null) {
addAddAnnotationProposal(annotation, desc, dec,
proposals, unit, node, decNode);
}
break;
}
}
}
}
private static void addAddAnnotationProposal(String annotation, String desc,
Declaration dec, Collection<ICompletionProposal> proposals,
PhasedUnit unit, Node node, Tree.Declaration decNode) {
IFile file = getFile(unit);
TextFileChange change = new TextFileChange(desc, file);
change.setEdit(new MultiTextEdit());
TextEdit edit = createReplaceAnnotationEdit(annotation, node, change);
if (edit==null) {
edit = createInsertAnnotationEdit(annotation, decNode,
EditorUtil.getDocument(change));
}
change.addEdit(edit);
if (decNode instanceof Tree.TypedDeclaration &&
!(decNode instanceof Tree.ObjectDefinition)) {
Tree.Type type = ((Tree.TypedDeclaration) decNode).getType();
if (type.getToken()!=null &&
(type instanceof Tree.FunctionModifier ||
type instanceof Tree.ValueModifier)) {
ProducedType it = type.getTypeModel();
if (it!=null && !(it.getDeclaration() instanceof UnknownType)) {
String explicitType = it.getProducedTypeName();
change.addEdit(new ReplaceEdit(type.getStartIndex(),
type.getText().length(), explicitType));
}
}
}
Region selection;
if (node!=null && node.getUnit().equals(decNode.getUnit())) {
selection = new Region(edit.getOffset(), annotation.length());
}
else {
selection = null;
}
Scope container = dec.getContainer();
String containerDesc = container instanceof TypeDeclaration ?
" in '" + ((TypeDeclaration) container).getName() + "'" : "";
String description =
"Make '" + dec.getName() + "' " + annotation + containerDesc;
AddAnnotionProposal p = new AddAnnotionProposal(dec, annotation,
description, edit.getOffset(), change, selection);
if (!proposals.contains(p)) {
proposals.add(p);
}
}
private static ReplaceEdit createReplaceAnnotationEdit(String annotation,
Node node, TextFileChange change) {
String toRemove;
if ("formal".equals(annotation)) {
toRemove = "default";
}
else if ("abstract".equals(annotation)) {
toRemove = "final";
}
else {
return null;
}
Tree.AnnotationList annotationList = getAnnotationList(node);
if (annotationList != null) {
for (Tree.Annotation ann:
annotationList.getAnnotations()) {
if (toRemove.equals(getAnnotationIdentifier(ann))) {
return new ReplaceEdit(ann.getStartIndex(),
ann.getStopIndex()+1-ann.getStartIndex(),
annotation);
}
}
}
return null;
}
public static InsertEdit createInsertAnnotationEdit(String newAnnotation,
Node node, IDocument doc) {
String newAnnotationName = getAnnotationWithoutParam(newAnnotation);
Tree.Annotation prevAnnotation = null;
Tree.Annotation nextAnnotation = null;
Tree.AnnotationList annotationList = getAnnotationList(node);
if (annotationList != null) {
for (Tree.Annotation annotation:
annotationList.getAnnotations()) {
if (isAnnotationAfter(newAnnotationName,
getAnnotationIdentifier(annotation))) {
prevAnnotation = annotation;
} else if (nextAnnotation == null) {
nextAnnotation = annotation;
break;
}
}
}
int nextNodeStartIndex;
if (nextAnnotation != null) {
nextNodeStartIndex = nextAnnotation.getStartIndex();
}
else {
if (node instanceof Tree.AnyAttribute ||
node instanceof Tree.AnyMethod ) {
nextNodeStartIndex =
((Tree.TypedDeclaration) node).getType().getStartIndex();
}
else if (node instanceof Tree.ObjectDefinition ) {
nextNodeStartIndex =
((CommonToken) node.getMainToken()).getStartIndex();
}
else if (node instanceof Tree.ClassOrInterface) {
nextNodeStartIndex =
((CommonToken) node.getMainToken()).getStartIndex();
}
else {
nextNodeStartIndex = node.getStartIndex();
}
}
int newAnnotationOffset;
StringBuilder newAnnotationText = new StringBuilder();
if (isAnnotationOnSeparateLine(newAnnotationName) &&
!(node instanceof Tree.Parameter)) {
if (prevAnnotation != null &&
isAnnotationOnSeparateLine(getAnnotationIdentifier(prevAnnotation))) {
newAnnotationOffset = prevAnnotation.getStopIndex() + 1;
newAnnotationText.append(System.lineSeparator());
newAnnotationText.append(getIndent(node, doc));
newAnnotationText.append(newAnnotation);
} else {
newAnnotationOffset = nextNodeStartIndex;
newAnnotationText.append(newAnnotation);
newAnnotationText.append(System.lineSeparator());
newAnnotationText.append(getIndent(node, doc));
}
} else {
newAnnotationOffset = nextNodeStartIndex;
newAnnotationText.append(newAnnotation);
newAnnotationText.append(" ");
}
return new InsertEdit(newAnnotationOffset,
newAnnotationText.toString());
}
public static Tree.AnnotationList getAnnotationList(Node node) {
Tree.AnnotationList annotationList = null;
if (node instanceof Tree.Declaration) {
annotationList =
((Tree.Declaration) node).getAnnotationList();
}
else if (node instanceof Tree.ModuleDescriptor) {
annotationList =
((Tree.ModuleDescriptor) node).getAnnotationList();
}
else if (node instanceof Tree.PackageDescriptor) {
annotationList =
((Tree.PackageDescriptor) node).getAnnotationList();
}
else if (node instanceof Tree.Assertion) {
annotationList =
((Tree.Assertion) node).getAnnotationList();
}
return annotationList;
}
public static String getAnnotationIdentifier(Tree.Annotation annotation) {
String annotationName = null;
if (annotation != null) {
if (annotation.getPrimary() instanceof Tree.BaseMemberExpression) {
Tree.BaseMemberExpression bme =
(Tree.BaseMemberExpression) annotation.getPrimary();
annotationName = bme.getIdentifier().getText();
}
}
return annotationName;
}
private static String getAnnotationWithoutParam(String annotation) {
int index = annotation.indexOf("(");
if (index != -1) {
return annotation.substring(0, index).trim();
}
index = annotation.indexOf("\"");
if (index != -1) {
return annotation.substring(0, index).trim();
}
index = annotation.indexOf(" ");
if (index != -1) {
return annotation.substring(0, index).trim();
}
return annotation.trim();
}
private static boolean isAnnotationAfter(String annotation1, String annotation2) {
int index1 = ANNOTATIONS_ORDER.indexOf(annotation1);
int index2 = ANNOTATIONS_ORDER.indexOf(annotation2);
return index1 >= index2;
}
private static boolean isAnnotationOnSeparateLine(String annotation) {
return ANNOTATIONS_ON_SEPARATE_LINE.contains(annotation);
}
static void addMakeActualDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec;
if (node instanceof Tree.Declaration) {
dec = ((Tree.Declaration) node).getDeclarationModel();
}
else {
dec = (Declaration) node.getScope();
}
boolean shared = dec.isShared();
addAddAnnotationProposal(node,
shared ? "actual" : "shared actual",
shared ? "Make Actual" : "Make Shared Actual",
dec, proposals, project);
}
static void addMakeDefaultProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration d;
if (node instanceof Tree.Declaration) {
Tree.Declaration decNode = (Tree.Declaration) node;
d = decNode.getDeclarationModel();
}
else if (node instanceof Tree.BaseMemberExpression) {
d = ((Tree.BaseMemberExpression) node).getDeclaration();
}
else {
return;
}
if (d.isClassOrInterfaceMember()) {
List<Declaration> rds =
((ClassOrInterface) d.getContainer())
.getInheritedMembers(d.getName());
Declaration rd=null;
if (rds.isEmpty()) {
rd=d; //TODO: is this really correct? What case does it handle?
}
else {
for (Declaration r: rds) {
if (!r.isDefault()) {
//just take the first one :-/
//TODO: this is very wrong! Instead, make them all default!
rd = r;
break;
}
}
}
if (rd!=null) {
addAddAnnotationProposal(node,
"default", "Make Default",
rd, proposals, project);
}
}
}
static void addMakeDefaultDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec;
if (node instanceof Tree.Declaration) {
dec = ((Tree.Declaration) node).getDeclarationModel();
}
else {
dec = (Declaration) node.getScope();
}
addAddAnnotationProposal(node,
dec.isShared() ? "default" : "shared default",
dec.isShared() ? "Make Default" : "Make Shared Default",
dec, proposals, project);
}
static void addMakeFormalDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec;
if (node instanceof Tree.Declaration) {
dec = ((Tree.Declaration) node).getDeclarationModel();
}
else {
dec = (Declaration) node.getScope();
}
addAddAnnotationProposal(node,
dec.isShared() ? "formal" : "shared formal",
dec.isShared() ? "Make Formal" : "Make Shared Formal",
dec, proposals, project);
}
static void addMakeAbstractDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec;
if (node instanceof Tree.Declaration) {
dec = ((Tree.Declaration) node).getDeclarationModel();
}
else {
dec = (Declaration) node.getScope();
}
if (dec instanceof Class) {
addAddAnnotationProposal(node,
"abstract", "Make Abstract",
dec, proposals, project);
}
}
static void addMakeContainerAbstractProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec;
if (node instanceof Tree.Declaration) {
Scope container =
((Tree.Declaration) node).getDeclarationModel().getContainer();
if (container instanceof Declaration) {
dec = (Declaration) container;
}
else {
return;
}
}
else {
dec = (Declaration) node.getScope();
}
addAddAnnotationProposal(node,
"abstract", "Make Abstract",
dec, proposals, project);
}
static void addMakeVariableProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Tree.Term term;
if (node instanceof Tree.AssignmentOp) {
term = ((Tree.AssignOp) node).getLeftTerm();
}
else if (node instanceof Tree.UnaryOperatorExpression) {
term = ((Tree.PrefixOperatorExpression) node).getTerm();
}
else if (node instanceof Tree.MemberOrTypeExpression) {
term = (Tree.MemberOrTypeExpression) node;
}
else if (node instanceof Tree.SpecifierStatement) {
term = ((Tree.SpecifierStatement) node).getBaseMemberExpression();
}
else {
return;
}
if (term instanceof Tree.MemberOrTypeExpression) {
Declaration dec =
((Tree.MemberOrTypeExpression) term).getDeclaration();
if (dec instanceof Value) {
if (((Value) dec).getOriginalDeclaration()==null) {
addAddAnnotationProposal(node,
"variable", "Make Variable",
dec, proposals, project);
}
}
}
}
static void addMakeVariableDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Tree.Declaration node) {
final Declaration dec = node.getDeclarationModel();
if (dec instanceof Value && node instanceof Tree.AttributeDeclaration) {
final Value v = (Value) dec;
if (!v.isVariable() && !v.isTransient()) {
addAddAnnotationProposal(node,
"variable", "Make Variable",
dec, proposals, project);
}
}
}
static void addMakeVariableDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Tree.CompilationUnit cu, Node node) {
final Tree.SpecifierOrInitializerExpression sie =
(Tree.SpecifierOrInitializerExpression) node;
class GetInitializedVisitor extends Visitor {
Value dec;
@Override
public void visit(Tree.AttributeDeclaration that) {
super.visit(that);
if (that.getSpecifierOrInitializerExpression()==sie) {
dec = that.getDeclarationModel();
}
}
}
GetInitializedVisitor v = new GetInitializedVisitor();
v.visit(cu);
addAddAnnotationProposal(node,
"variable", "Make Variable",
v.dec, proposals, project);
}
static void addMakeSharedProposalForSupertypes(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
if (node instanceof Tree.ClassOrInterface) {
Tree.ClassOrInterface c = (Tree.ClassOrInterface) node;
ProducedType extendedType =
c.getDeclarationModel().getExtendedType();
if (extendedType!=null) {
addMakeSharedProposal(proposals, project,
extendedType.getDeclaration());
for (ProducedType typeArgument:
extendedType.getTypeArgumentList()) {
addMakeSharedProposal(proposals, project,
typeArgument.getDeclaration());
}
}
List<ProducedType> satisfiedTypes =
c.getDeclarationModel().getSatisfiedTypes();
if (satisfiedTypes!=null) {
for (ProducedType satisfiedType: satisfiedTypes) {
addMakeSharedProposal(proposals, project,
satisfiedType.getDeclaration());
for (ProducedType typeArgument:
satisfiedType.getTypeArgumentList()) {
addMakeSharedProposal(proposals, project,
typeArgument.getDeclaration());
}
}
}
}
}
static void addMakeRefinedSharedProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
if (node instanceof Tree.Declaration) {
Declaration refined = ((Tree.Declaration) node).getDeclarationModel()
.getRefinedDeclaration();
if (refined.isDefault() || refined.isFormal()) {
addMakeSharedProposal(proposals, project, refined);
}
else {
addAddAnnotationProposal(node,
"shared default", "Make Shared Default",
refined, proposals, project);
}
}
}
static void addMakeSharedProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
Declaration dec = null;
List<ProducedType> typeArgumentList = null;
if (node instanceof Tree.StaticMemberOrTypeExpression) {
Tree.StaticMemberOrTypeExpression qmte =
(Tree.StaticMemberOrTypeExpression) node;
dec = qmte.getDeclaration();
}
//TODO: handle much more kinds of types!
else if (node instanceof Tree.SimpleType) {
Tree.SimpleType st = (Tree.SimpleType) node;
dec = st.getDeclarationModel();
}
else if (node instanceof Tree.OptionalType) {
Tree.OptionalType ot = (Tree.OptionalType) node;
if (ot.getDefiniteType() instanceof Tree.SimpleType) {
Tree.SimpleType st = (Tree.SimpleType) ot.getDefiniteType();
dec = st.getDeclarationModel();
}
}
else if (node instanceof Tree.IterableType) {
Tree.IterableType it = (Tree.IterableType) node;
if (it.getElementType() instanceof Tree.SimpleType) {
Tree.SimpleType st = (Tree.SimpleType) it.getElementType();
dec = st.getDeclarationModel();
}
}
else if (node instanceof Tree.SequenceType) {
Tree.SequenceType qt = (Tree.SequenceType) node;
if (qt.getElementType() instanceof Tree.SimpleType) {
Tree.SimpleType st = (Tree.SimpleType) qt.getElementType();
dec = st.getDeclarationModel();
}
}
else if (node instanceof Tree.ImportMemberOrType) {
Tree.ImportMemberOrType imt = (Tree.ImportMemberOrType) node;
dec = imt.getDeclarationModel();
}
else if (node instanceof Tree.TypedDeclaration) {
Tree.TypedDeclaration td = ((Tree.TypedDeclaration) node);
if (td.getDeclarationModel() != null) {
ProducedType pt = td.getDeclarationModel().getType();
dec = pt.getDeclaration();
typeArgumentList = pt.getTypeArgumentList();
}
}
else if (node instanceof Tree.Parameter) {
Tree.Parameter parameter = ((Tree.Parameter) node);
if (parameter.getParameterModel()!=null &&
parameter.getParameterModel().getType()!=null) {
ProducedType pt = parameter.getParameterModel().getType();
dec = pt.getDeclaration();
typeArgumentList = pt.getTypeArgumentList();
}
}
addMakeSharedProposal(proposals, project, dec);
if (typeArgumentList != null) {
for (ProducedType typeArgument : typeArgumentList) {
addMakeSharedProposal(proposals, project,
typeArgument.getDeclaration());
}
}
}
static void addMakeSharedProposal(Collection<ICompletionProposal> proposals,
IProject project, Declaration dec) {
if (dec!=null) {
if (dec instanceof UnionType) {
List<ProducedType> caseTypes =
((UnionType) dec).getCaseTypes();
for (ProducedType caseType: caseTypes) {
addMakeSharedProposal(proposals, project,
caseType.getDeclaration());
for (ProducedType typeArgument:
caseType.getTypeArgumentList()) {
addMakeSharedProposal(proposals, project,
typeArgument.getDeclaration());
}
}
}
else if (dec instanceof IntersectionType) {
List<ProducedType> satisfiedTypes =
((IntersectionType) dec).getSatisfiedTypes();
for (ProducedType satisfiedType: satisfiedTypes) {
addMakeSharedProposal(proposals, project,
satisfiedType.getDeclaration());
for (ProducedType typeArgument:
satisfiedType.getTypeArgumentList()) {
addMakeSharedProposal(proposals, project,
typeArgument.getDeclaration());
}
}
}
else if (dec instanceof TypedDeclaration ||
dec instanceof ClassOrInterface ||
dec instanceof TypeAlias) {
if (!dec.isShared()) {
addAddAnnotationProposal(null,
"shared", "Make Shared",
dec, proposals, project);
}
}
}
}
static void addMakeSharedDecProposal(Collection<ICompletionProposal> proposals,
IProject project, Node node) {
if (node instanceof Tree.Declaration) {
Declaration d = ((Tree.Declaration) node).getDeclarationModel();
addAddAnnotationProposal(node,
"shared", "Make Shared",
d, proposals, project);
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_AddAnnotionProposal.java
|
474 |
public abstract class ClientProxy implements DistributedObject {
protected final String instanceName;
private final String serviceName;
private final String objectName;
private volatile ClientContext context;
protected ClientProxy(String instanceName, String serviceName, String objectName) {
this.instanceName = instanceName;
this.serviceName = serviceName;
this.objectName = objectName;
}
protected final String listen(ClientRequest registrationRequest, Object partitionKey, EventHandler handler) {
return ListenerUtil.listen(context, registrationRequest, partitionKey, handler);
}
protected final String listen(ClientRequest registrationRequest, EventHandler handler) {
return ListenerUtil.listen(context, registrationRequest, null, handler);
}
protected final boolean stopListening(BaseClientRemoveListenerRequest request, String registrationId) {
return ListenerUtil.stopListening(context, request, registrationId);
}
protected final ClientContext getContext() {
final ClientContext ctx = context;
if (ctx == null) {
throw new DistributedObjectDestroyedException(serviceName, objectName);
}
return ctx;
}
protected final void setContext(ClientContext context) {
this.context = context;
}
@Deprecated
public final Object getId() {
return objectName;
}
public final String getName() {
return objectName;
}
public String getPartitionKey() {
return StringPartitioningStrategy.getPartitionKey(getName());
}
public final String getServiceName() {
return serviceName;
}
public final void destroy() {
onDestroy();
ClientDestroyRequest request = new ClientDestroyRequest(objectName, getServiceName());
try {
context.getInvocationService().invokeOnRandomTarget(request).get();
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
context.removeProxy(this);
context = null;
}
protected abstract void onDestroy();
protected void onShutdown() {
}
protected <T> T invoke(ClientRequest req, Object key) {
try {
final Future future = getInvocationService().invokeOnKeyOwner(req, key);
Object result = future.get();
return toObject(result);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
protected <T> T invokeInterruptibly(ClientRequest req, Object key) throws InterruptedException {
try {
final Future future = getInvocationService().invokeOnKeyOwner(req, key);
Object result = future.get();
return toObject(result);
} catch (Exception e) {
throw ExceptionUtil.rethrowAllowInterrupted(e);
}
}
private ClientInvocationService getInvocationService() {
return getContext().getInvocationService();
}
protected <T> T invoke(ClientRequest req) {
try {
final Future future = getInvocationService().invokeOnRandomTarget(req);
Object result = future.get();
return toObject(result);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
protected <T> T invoke(ClientRequest req, Address address) {
try {
final Future future = getInvocationService().invokeOnTarget(req, address);
Object result = future.get();
return toObject(result);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
protected Data toData(Object o) {
return getContext().getSerializationService().toData(o);
}
protected <T> T toObject(Object data) {
return getContext().getSerializationService().toObject(data);
}
protected void throwExceptionIfNull(Object o) {
if (o == null) {
throw new NullPointerException("Object is null");
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClientProxy that = (ClientProxy) o;
if (!instanceName.equals(that.instanceName)) {
return false;
}
if (!objectName.equals(that.objectName)) {
return false;
}
if (!serviceName.equals(that.serviceName)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = instanceName.hashCode();
result = 31 * result + serviceName.hashCode();
result = 31 * result + objectName.hashCode();
return result;
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_ClientProxy.java
|
9 |
static final class AsyncAccept<T> extends Async {
final T arg;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
AsyncAccept(T arg, Action<? super T> fn,
CompletableFuture<Void> dst) {
this.arg = arg; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<Void> d; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fn.accept(arg);
ex = null;
} catch (Throwable rex) {
ex = rex;
}
d.internalComplete(null, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
83 |
public static class Order {
public static final int File_Details = 2000;
public static final int Advanced = 3000;
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetImpl.java
|
49 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings({ "EI_EXPOSE_REP", "MS_MUTABLE_ARRAY", "MS_PKGPROTECT" })
public abstract class HttpCommand extends AbstractTextCommand {
public static final String HEADER_CONTENT_TYPE = "content-type: ";
public static final String HEADER_CONTENT_LENGTH = "content-length: ";
public static final String HEADER_CHUNKED = "transfer-encoding: chunked";
public static final String HEADER_EXPECT_100 = "expect: 100";
public static final byte[] RES_200 = stringToBytes("HTTP/1.1 200 OK\r\n");
public static final byte[] RES_400 = stringToBytes("HTTP/1.1 400 Bad Request\r\nContent-Length: 0\r\n\r\n");
public static final byte[] RES_403 = stringToBytes("HTTP/1.1 403 Forbidden\r\n\r\n");
public static final byte[] RES_404 = stringToBytes("HTTP/1.1 404 Not Found\r\n\r\n");
public static final byte[] RES_100 = stringToBytes("HTTP/1.1 100 Continue\r\n\r\n");
public static final byte[] RES_204 = stringToBytes("HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n");
public static final byte[] RES_503 = stringToBytes("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n");
public static final byte[] RES_500 = stringToBytes("HTTP/1.1 500 Internal Server Error\r\nContent-Length: 0\r\n\r\n");
public static final byte[] CONTENT_TYPE = stringToBytes("Content-Type: ");
public static final byte[] CONTENT_LENGTH = stringToBytes("Content-Length: ");
public static final byte[] CONTENT_TYPE_PLAIN_TEXT = stringToBytes("text/plain");
public static final byte[] CONTENT_TYPE_BINARY = stringToBytes("application/binary");
protected final String uri;
protected ByteBuffer response;
public HttpCommand(TextCommandType type, String uri) {
super(type);
this.uri = uri;
}
public boolean shouldReply() {
return true;
}
public String getURI() {
return uri;
}
public void send204() {
this.response = ByteBuffer.wrap(RES_204);
}
public void send400() {
this.response = ByteBuffer.wrap(RES_400);
}
public void setResponse(byte[] value) {
this.response = ByteBuffer.wrap(value);
}
public void send200() {
setResponse(null, null);
}
/**
* HTTP/1.0 200 OK
* Date: Fri, 31 Dec 1999 23:59:59 GMT
* Content-TextCommandType: text/html
* Content-Length: 1354
*
* @param contentType
* @param value
*/
public void setResponse(byte[] contentType, byte[] value) {
int valueSize = (value == null) ? 0 : value.length;
byte[] len = stringToBytes(String.valueOf(valueSize));
int size = RES_200.length;
if (contentType != null) {
size += CONTENT_TYPE.length;
size += contentType.length;
size += RETURN.length;
}
size += CONTENT_LENGTH.length;
size += len.length;
size += RETURN.length;
size += RETURN.length;
size += valueSize;
size += RETURN.length;
this.response = ByteBuffer.allocate(size);
response.put(RES_200);
if (contentType != null) {
response.put(CONTENT_TYPE);
response.put(contentType);
response.put(RETURN);
}
response.put(CONTENT_LENGTH);
response.put(len);
response.put(RETURN);
response.put(RETURN);
if (value != null) {
response.put(value);
}
response.put(RETURN);
response.flip();
}
public boolean writeTo(ByteBuffer bb) {
IOUtil.copyToHeapBuffer(response, bb);
return !response.hasRemaining();
}
@Override
public String toString() {
return "HttpCommand ["
+ type + "]{"
+ "uri='"
+ uri
+ '\''
+ '}'
+ super.toString();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_rest_HttpCommand.java
|
341 |
protected static class NodeRestartRequest extends NodeOperationRequest {
TimeValue delay;
private NodeRestartRequest() {
}
private NodeRestartRequest(String nodeId, NodesRestartRequest request) {
super(request, nodeId);
this.delay = request.delay;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
delay = readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
delay.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_TransportNodesRestartAction.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.