Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
1,488 |
@SuppressWarnings("unchecked")
public class OObjectDatabaseTxPooled extends OObjectDatabaseTx implements ODatabasePooled {
private OObjectDatabasePool ownerPool;
public OObjectDatabaseTxPooled(final OObjectDatabasePool iOwnerPool, final String iURL, final String iUserName,
final String iUserPassword) {
super(iURL);
ownerPool = iOwnerPool;
super.open(iUserName, iUserPassword);
}
public void reuse(final Object iOwner, final Object[] iAdditionalArgs) {
ownerPool = (OObjectDatabasePool) iOwner;
if (isClosed())
open((String) iAdditionalArgs[0], (String) iAdditionalArgs[1]);
init();
// getMetadata().reload();
ODatabaseRecordThreadLocal.INSTANCE.set(getUnderlying());
try {
ODatabase current = underlying;
while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null)
current = ((ODatabaseComplex<?>) current).getUnderlying();
((ODatabaseRaw) current).callOnOpenListeners();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on reusing database '%s' in pool", e, getName());
}
}
@Override
public OObjectDatabaseTxPooled open(String iUserName, String iUserPassword) {
throw new UnsupportedOperationException(
"Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a OObjectDatabaseTx instance if you want to manually open the connection");
}
@Override
public OObjectDatabaseTxPooled create() {
throw new UnsupportedOperationException(
"Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a OObjectDatabaseTx instance if you want to manually open the connection");
}
@Override
public boolean isClosed() {
return ownerPool == null || super.isClosed();
}
/**
* Avoid to close it but rather release itself to the owner pool.
*/
@Override
public void close() {
if (isClosed())
return;
objects2Records.clear();
records2Objects.clear();
rid2Records.clear();
checkOpeness();
try {
rollback();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName());
}
try {
ODatabase current = underlying;
while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null)
current = ((ODatabaseComplex<?>) current).getUnderlying();
((ODatabaseRaw) current).callOnCloseListeners();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName());
}
getLevel1Cache().clear();
if (ownerPool != null) {
final OObjectDatabasePool localCopy = ownerPool;
ownerPool = null;
localCopy.release(this);
}
}
public void forceClose() {
super.close();
}
@Override
protected void checkOpeness() {
if (ownerPool == null)
throw new ODatabaseException(
"Database instance has been released to the pool. Get another database instance from the pool with the right username and password");
super.checkOpeness();
}
public boolean isUnderlyingOpen() {
return !super.isClosed();
}
}
| 1no label
|
object_src_main_java_com_orientechnologies_orient_object_db_OObjectDatabaseTxPooled.java
|
741 |
public class ProductOptionType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, ProductOptionType> TYPES = new LinkedHashMap<String, ProductOptionType>();
public static final ProductOptionType COLOR = new ProductOptionType("COLOR","Color");
public static final ProductOptionType SIZE = new ProductOptionType("SIZE","Size");
public static final ProductOptionType DATE = new ProductOptionType("DATE","Date");
public static final ProductOptionType TEXT = new ProductOptionType("TEXT","Text");
public static final ProductOptionType BOOLEAN = new ProductOptionType("BOOLEAN","Boolean");
public static final ProductOptionType DECIMAL = new ProductOptionType("DECIMAL","Decimal");
public static final ProductOptionType INTEGER = new ProductOptionType("INTEGER","Integer");
public static final ProductOptionType INPUT = new ProductOptionType("INPUT","Input");
public static final ProductOptionType PRODUCT = new ProductOptionType("PRODUCT","Product");
public static ProductOptionType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public ProductOptionType() {
//do nothing
}
public ProductOptionType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ProductOptionType other = (ProductOptionType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_service_type_ProductOptionType.java
|
8 |
public interface OLazyIterator<T> extends Iterator<T> {
public T update(T iValue);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OLazyIterator.java
|
1,460 |
public class OCommandExecutorSQLDeleteVertex extends OCommandExecutorSQLAbstract implements OCommandResultListener {
public static final String NAME = "DELETE VERTEX";
private ORecordId rid;
private int removed = 0;
private ODatabaseRecord database;
private OCommandRequest query;
@SuppressWarnings("unchecked")
public OCommandExecutorSQLDeleteVertex parse(final OCommandRequest iRequest) {
database = getDatabase();
init((OCommandRequestText) iRequest);
parserRequiredKeyword("DELETE");
parserRequiredKeyword("VERTEX");
OClass clazz = null;
String temp = parseOptionalWord(true);
while (temp != null) {
if (temp.startsWith("#")) {
rid = new ORecordId(temp);
} else if (temp.equals(KEYWORD_WHERE)) {
if (clazz == null)
// ASSIGN DEFAULT CLASS
clazz = database.getMetadata().getSchema().getClass(OrientVertex.CLASS_NAME);
final String condition = parserGetCurrentPosition() > -1 ? " " + parserText.substring(parserGetPreviousPosition()) : "";
query = database.command(new OSQLAsynchQuery<ODocument>("select from " + clazz.getName() + condition, this));
break;
} else if (temp.length() > 0) {
// GET/CHECK CLASS NAME
clazz = database.getMetadata().getSchema().getClass(temp);
if (clazz == null)
throw new OCommandSQLParsingException("Class '" + temp + " was not found");
}
if (rid == null && clazz == null)
// DELETE ALL VERTEXES
query = database.command(new OSQLAsynchQuery<ODocument>("select from V", this));
temp = parseOptionalWord(true);
if (parserIsEnded())
break;
}
return this;
}
/**
* Execute the command and return the ODocument object created.
*/
public Object execute(final Map<Object, Object> iArgs) {
if (rid == null && query == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final OrientBaseGraph graph = OGraphCommandExecutorSQLFactory.getGraph();
if (rid != null) {
// REMOVE PUNCTUAL RID
final OrientVertex v = graph.getVertex(rid);
if (v != null) {
v.remove();
removed = 1;
}
} else if (query != null)
// TARGET IS A CLASS + OPTIONAL CONDITION
query.execute(iArgs);
else
throw new OCommandExecutionException("Invalid target");
return removed;
}
/**
* Delete the current vertex.
*/
public boolean result(final Object iRecord) {
final OIdentifiable id = (OIdentifiable) iRecord;
if (id.getIdentity().isValid()) {
final OrientBaseGraph graph = OGraphCommandExecutorSQLFactory.getGraph();
final OrientVertex v = graph.getVertex(id);
if (v != null) {
v.remove();
removed++;
return true;
}
}
return false;
}
@Override
public String getSyntax() {
return "DELETE VERTEX <rid>|<[<class>] [WHERE <conditions>] [LIMIT <max-records>]>";
}
@Override
public void end() {
}
}
| 1no label
|
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_OCommandExecutorSQLDeleteVertex.java
|
110 |
private static class LatchMembershipListener implements MembershipListener {
private final CountDownLatch latch;
private LatchMembershipListener(CountDownLatch latch) {
this.latch = latch;
}
@Override
public void memberAdded(MembershipEvent membershipEvent) {
}
@Override
public void memberRemoved(MembershipEvent membershipEvent) {
}
@Override
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
latch.countDown();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientMemberAttributeTest.java
|
16 |
public static interface AsynchronousCompletionTask {
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
655 |
public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndexTemplateRequest> {
private String name;
private String cause = "";
private String template;
private int order;
private boolean create;
private Settings settings = EMPTY_SETTINGS;
private Map<String, String> mappings = newHashMap();
private Map<String, IndexMetaData.Custom> customs = newHashMap();
PutIndexTemplateRequest() {
}
/**
* Constructs a new put index template request with the provided name.
*/
public PutIndexTemplateRequest(String name) {
this.name = name;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
if (template == null) {
validationException = addValidationError("template is missing", validationException);
}
return validationException;
}
/**
* Sets the name of the index template.
*/
public PutIndexTemplateRequest name(String name) {
this.name = name;
return this;
}
/**
* The name of the index template.
*/
public String name() {
return this.name;
}
public PutIndexTemplateRequest template(String template) {
this.template = template;
return this;
}
public String template() {
return this.template;
}
public PutIndexTemplateRequest order(int order) {
this.order = order;
return this;
}
public int order() {
return this.order;
}
/**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
*/
public PutIndexTemplateRequest create(boolean create) {
this.create = create;
return this;
}
public boolean create() {
return create;
}
/**
* The settings to create the index template with.
*/
public PutIndexTemplateRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* The settings to create the index template with.
*/
public PutIndexTemplateRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* The settings to create the index template with (either json/yaml/properties format).
*/
public PutIndexTemplateRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* The settings to crete the index template with (either json/yaml/properties format).
*/
public PutIndexTemplateRequest settings(Map<String, Object> source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
Settings settings() {
return this.settings;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, String source) {
mappings.put(type, source);
return this;
}
/**
* The cause for this index template creation.
*/
public PutIndexTemplateRequest cause(String cause) {
this.cause = cause;
return this;
}
public String cause() {
return this.cause;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, XContentBuilder source) {
try {
mappings.put(type, source.string());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
}
return this;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, Map<String, Object> source) {
// wrap it in a type map if its not
if (source.size() != 1 || !source.containsKey(type)) {
source = MapBuilder.<String, Object>newMapBuilder().put(type, source).map();
}
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
return mapping(type, builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
}
Map<String, String> mappings() {
return this.mappings;
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(XContentBuilder templateBuilder) {
try {
return source(templateBuilder.bytes());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for template request", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(Map templateSource) {
Map<String, Object> source = templateSource;
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("template")) {
template(entry.getValue().toString());
} else if (name.equals("order")) {
order(XContentMapValues.nodeIntegerValue(entry.getValue(), order()));
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("mappings")) {
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
if (!(entry1.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping");
}
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
}
} else {
// maybe custom?
IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name);
if (factory != null) {
try {
customs.put(name, factory.fromMap((Map<String, Object>) entry.getValue()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]");
}
}
}
}
return this;
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(String templateSource) {
try {
return source(XContentFactory.xContent(templateSource).createParser(templateSource).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source [" + templateSource + "]", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) {
customs.put(custom.type(), custom);
return this;
}
Map<String, IndexMetaData.Custom> customs() {
return this.customs;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
cause = in.readString();
name = in.readString();
template = in.readString();
order = in.readInt();
create = in.readBoolean();
settings = readSettingsFromStream(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
mappings.put(in.readString(), in.readString());
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
customs.put(type, customIndexMetaData);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(cause);
out.writeString(name);
out.writeString(template);
out.writeInt(order);
out.writeBoolean(create);
writeSettingsToStream(settings, out);
out.writeVInt(mappings.size());
for (Map.Entry<String, String> entry : mappings.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(customs.size());
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
out.writeString(entry.getKey());
IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_put_PutIndexTemplateRequest.java
|
111 |
public class DoubleMaxUpdater extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Long representation of negative infinity. See class Double
* internal documentation for explanation.
*/
private static final long MIN_AS_LONG = 0xfff0000000000000L;
/**
* Update function. See class DoubleAdder for rationale
* for using conversions from/to long.
*/
final long fn(long v, long x) {
return Double.longBitsToDouble(v) > Double.longBitsToDouble(x) ? v : x;
}
/**
* Creates a new instance with initial value of {@code
* Double.NEGATIVE_INFINITY}.
*/
public DoubleMaxUpdater() {
base = MIN_AS_LONG;
}
/**
* Updates the maximum to be at least the given value.
*
* @param x the value to update
*/
public void update(double x) {
long lx = Double.doubleToRawLongBits(x);
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null ||
(Double.longBitsToDouble(b = base) < x && !casBase(b, lx))) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
(Double.longBitsToDouble(v = a.value) < x &&
!(uncontended = a.cas(v, lx))))
retryUpdate(lx, hc, uncontended);
}
}
/**
* Returns the current maximum. The returned value is
* <em>NOT</em> an atomic snapshot; invocation in the absence of
* concurrent updates returns an accurate result, but concurrent
* updates that occur while the value is being calculated might
* not be incorporated.
*
* @return the maximum
*/
public double max() {
Cell[] as = cells;
double max = Double.longBitsToDouble(base);
if (as != null) {
int n = as.length;
double v;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null && (v = Double.longBitsToDouble(a.value)) > max)
max = v;
}
}
return max;
}
/**
* Resets variables maintaining updates to {@code
* Double.NEGATIVE_INFINITY}. This method may be a useful
* alternative to creating a new updater, but is only effective if
* there are no concurrent updates. Because this method is
* intrinsically racy, it should only be used when it is known
* that no threads are concurrently updating.
*/
public void reset() {
internalReset(MIN_AS_LONG);
}
/**
* Equivalent in effect to {@link #max} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the maximum
*/
public double maxThenReset() {
Cell[] as = cells;
double max = Double.longBitsToDouble(base);
base = MIN_AS_LONG;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
double v = Double.longBitsToDouble(a.value);
a.value = MIN_AS_LONG;
if (v > max)
max = v;
}
}
}
return max;
}
/**
* Returns the String representation of the {@link #max}.
* @return the String representation of the {@link #max}
*/
public String toString() {
return Double.toString(max());
}
/**
* Equivalent to {@link #max}.
*
* @return the max
*/
public double doubleValue() {
return max();
}
/**
* Returns the {@link #max} as a {@code long} after a
* narrowing primitive conversion.
*/
public long longValue() {
return (long)max();
}
/**
* Returns the {@link #max} as an {@code int} after a
* narrowing primitive conversion.
*/
public int intValue() {
return (int)max();
}
/**
* Returns the {@link #max} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float)max();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(max());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = Double.doubleToRawLongBits(s.readDouble());
}
}
| 0true
|
src_main_java_jsr166e_DoubleMaxUpdater.java
|
5,224 |
static class Bucket extends InternalHistogram.Bucket implements DateHistogram.Bucket {
private final ValueFormatter formatter;
Bucket(long key, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
super(key, docCount, aggregations);
this.formatter = formatter;
}
@Override
public String getKey() {
return formatter != null ? formatter.format(key) : DateFieldMapper.Defaults.DATE_TIME_FORMATTER.printer().print(key);
}
@Override
public DateTime getKeyAsDate() {
return new DateTime(key);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_histogram_InternalDateHistogram.java
|
199 |
public class ODatabaseHelper {
public static void createDatabase(ODatabase database, final String url) throws IOException {
createDatabase(database, url, "server", "plocal");
}
public static void createDatabase(ODatabase database, final String url, String type) throws IOException {
createDatabase(database, url, "server", type);
}
public static void createDatabase(ODatabase database, final String url, String directory, String type) throws IOException {
if (url.startsWith(OEngineRemote.NAME)) {
new OServerAdmin(url).connect("root", getServerRootPassword(directory)).createDatabase("document", type).close();
} else {
database.create();
database.close();
}
}
public static void deleteDatabase(final ODatabase database, String storageType) throws IOException {
deleteDatabase(database, "server", storageType);
}
@Deprecated
public static void deleteDatabase(final ODatabase database, final String directory, String storageType) throws IOException {
dropDatabase(database, directory, storageType);
}
public static void dropDatabase(final ODatabase database, String storageType) throws IOException {
dropDatabase(database, "server", storageType);
}
public static void dropDatabase(final ODatabase database, final String directory, String storageType) throws IOException {
if (existsDatabase(database, storageType)) {
if (database.getURL().startsWith("remote:")) {
new OServerAdmin(database.getURL()).connect("root", getServerRootPassword(directory)).dropDatabase(storageType);
} else {
if (database.isClosed())
database.open("admin", "admin");
database.drop();
}
}
}
public static boolean existsDatabase(final ODatabase database, String storageType) throws IOException {
if (database.getURL().startsWith("remote")) {
return new OServerAdmin(database.getURL()).connect("root", getServerRootPassword()).existsDatabase(storageType);
} else {
return database.exists();
}
}
public static void freezeDatabase(final ODatabase database) throws IOException {
if (database.getURL().startsWith("remote")) {
final OServerAdmin serverAdmin = new OServerAdmin(database.getURL());
serverAdmin.connect("root", getServerRootPassword()).freezeDatabase("plocal");
serverAdmin.close();
} else {
database.freeze();
}
}
public static void releaseDatabase(final ODatabase database) throws IOException {
if (database.getURL().startsWith("remote")) {
final OServerAdmin serverAdmin = new OServerAdmin(database.getURL());
serverAdmin.connect("root", getServerRootPassword()).releaseDatabase("plocal");
serverAdmin.close();
} else {
database.release();
}
}
public static File getConfigurationFile() {
return getConfigurationFile(null);
}
protected static String getServerRootPassword() throws IOException {
return getServerRootPassword("server");
}
protected static String getServerRootPassword(final String iDirectory) throws IOException {
File file = getConfigurationFile(iDirectory);
FileReader f = new FileReader(file);
final char[] buffer = new char[(int) file.length()];
f.read(buffer);
f.close();
String fileContent = new String(buffer);
// TODO search is wrong because if first user is not root tests will fail
int pos = fileContent.indexOf("password=\"");
pos += "password=\"".length();
return fileContent.substring(pos, fileContent.indexOf('"', pos));
}
protected static File getConfigurationFile(final String iDirectory) {
// LOAD SERVER CONFIG FILE TO EXTRACT THE ROOT'S PASSWORD
String sysProperty = System.getProperty("orientdb.config.file");
File file = new File(sysProperty != null ? sysProperty : "");
if (!file.exists()) {
sysProperty = System.getenv("CONFIG_FILE");
file = new File(sysProperty != null ? sysProperty : "");
}
if (!file.exists())
file = new File("../releases/orientdb-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../releases/orientdb-community-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../../releases/orientdb-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../../releases/orientdb-community-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists() && iDirectory != null) {
file = new File(iDirectory + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../" + iDirectory + "/config/orientdb-server-config.xml");
}
if (!file.exists())
file = new File(OSystemVariableResolver.resolveSystemVariables("${" + Orient.ORIENTDB_HOME
+ "}/config/orientdb-server-config.xml"));
if (!file.exists())
throw new OConfigurationException(
"Cannot load file orientdb-server-config.xml to execute remote tests. Current directory is "
+ new File(".").getAbsolutePath());
return file;
}
}
| 0true
|
client_src_main_java_com_orientechnologies_orient_client_db_ODatabaseHelper.java
|
392 |
static class UnLockThread extends Thread{
public Exception exception=null;
public MultiMap mm=null;
public Object key=null;
public UnLockThread(MultiMap mm, Object key){
this.mm = mm;
this.key = key;
}
public void run() {
try{
mm.unlock(key);
}catch (Exception e){
exception = e;
}
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapLockTest.java
|
280 |
public abstract class MessageCreator {
private JavaMailSender mailSender;
public MessageCreator(JavaMailSender mailSender) {
this.mailSender = mailSender;
}
public void sendMessage(final HashMap<String,Object> props) throws MailException {
MimeMessagePreparator preparator = buildMimeMessagePreparator(props);
this.mailSender.send(preparator);
}
public abstract String buildMessageBody(EmailInfo info, HashMap<String,Object> props);
public MimeMessagePreparator buildMimeMessagePreparator(final HashMap<String,Object> props) {
MimeMessagePreparator preparator = new MimeMessagePreparator() {
public void prepare(MimeMessage mimeMessage) throws Exception {
EmailTarget emailUser = (EmailTarget) props.get(EmailPropertyType.USER.getType());
EmailInfo info = (EmailInfo) props.get(EmailPropertyType.INFO.getType());
MimeMessageHelper message = new MimeMessageHelper(mimeMessage, (info.getAttachments() != null && info.getAttachments().size() > 0));
message.setTo(emailUser.getEmailAddress());
message.setFrom(info.getFromAddress());
message.setSubject(info.getSubject());
if (emailUser.getBCCAddresses() != null && emailUser.getBCCAddresses().length > 0) {
message.setBcc(emailUser.getBCCAddresses());
}
if (emailUser.getCCAddresses() != null && emailUser.getCCAddresses().length > 0) {
message.setCc(emailUser.getCCAddresses());
}
String messageBody = info.getMessageBody();
if (messageBody == null) {
messageBody = buildMessageBody(info, props);
}
message.setText(messageBody, true);
for (Attachment attachment : info.getAttachments()) {
ByteArrayDataSource dataSource = new ByteArrayDataSource(attachment.getData(), attachment.getMimeType());
message.addAttachment(attachment.getFilename(), dataSource);
}
}
};
return preparator;
}
public JavaMailSender getMailSender() {
return mailSender;
}
public void setMailSender(JavaMailSender mailSender) {
this.mailSender = mailSender;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_email_service_message_MessageCreator.java
|
350 |
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(request.delay.millis());
} catch (InterruptedException e) {
// ignore
}
final CountDownLatch latch = new CountDownLatch(nodesIds.length);
for (String nodeId : nodesIds) {
final DiscoveryNode node = state.nodes().get(nodeId);
if (node == null) {
logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId);
latch.countDown();
continue;
}
logger.trace("[partial_cluster_shutdown]: sending shutdown request to [{}]", node);
transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[partial_cluster_shutdown]: received shutdown response from [{}]", node);
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
logger.warn("[partial_cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
latch.countDown();
}
});
}
try {
latch.await();
} catch (InterruptedException e) {
// ignore
}
logger.info("[partial_cluster_shutdown]: done shutting down [{}]", ((Object) nodesIds));
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_TransportNodesShutdownAction.java
|
40 |
public interface Action<A> { void apply(A a); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
381 |
public class PutItemsThread extends Thread{
public static final int MAX_ITEMS = 1000;
public final MyEntryListener listener = new MyEntryListener();
public HazelcastInstance hzInstance;
public MultiMap mm;
public String id;
public PutItemsThread(HazelcastInstance hzInstance){
this.id = randomString();
this.hzInstance = hzInstance;
this.mm = hzInstance.getMultiMap(MAP_NAME);
mm.addEntryListener(listener, true);
}
public void run(){
for(int i=0; i< MAX_ITEMS; i++){
mm.put(id+i, id+i);
}
}
public void assertResult(int target){
System.out.println("listener "+id+" add events received "+listener.add.get());
assertEquals(target, listener.add.get());
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapListenerStressTest.java
|
150 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SC_TYPE")
@Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements")
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "StructuredContentTypeImpl_baseStructuredContentType")
public class StructuredContentTypeImpl implements StructuredContentType, AdminMainEntity {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StructuredContentTypeId")
@GenericGenerator(
name="StructuredContentTypeId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StructuredContentTypeImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.structure.domain.StructuredContentTypeImpl")
}
)
@Column(name = "SC_TYPE_ID")
protected Long id;
@Column (name = "NAME")
@AdminPresentation(friendlyName = "StructuredContentTypeImpl_Name", order = 1, gridOrder = 1, group = "StructuredContentTypeImpl_Details", prominent = true)
@Index(name="SC_TYPE_NAME_INDEX", columnNames={"NAME"})
protected String name;
@Column (name = "DESCRIPTION")
protected String description;
@ManyToOne(targetEntity = StructuredContentFieldTemplateImpl.class)
@JoinColumn(name="SC_FLD_TMPLT_ID")
protected StructuredContentFieldTemplate structuredContentFieldTemplate;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getDescription() {
return description;
}
@Override
public void setDescription(String description) {
this.description = description;
}
@Override
public StructuredContentFieldTemplate getStructuredContentFieldTemplate() {
return structuredContentFieldTemplate;
}
@Override
public void setStructuredContentFieldTemplate(StructuredContentFieldTemplate scft) {
this.structuredContentFieldTemplate = scft;
}
@Override
public String getMainEntityName() {
return getName();
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentTypeImpl.java
|
1,484 |
public class Hibernate4CacheEntrySerializerHook
implements SerializerHook {
private static final String SKIP_INIT_MSG = "Hibernate4 not available, skipping serializer initialization";
private final Class<?> cacheEntryClass;
public Hibernate4CacheEntrySerializerHook() {
Class<?> cacheEntryClass = null;
if (UnsafeHelper.UNSAFE_AVAILABLE) {
try {
cacheEntryClass = Class.forName("org.hibernate.cache.spi.entry.CacheEntry");
} catch (Exception e) {
Logger.getLogger(Hibernate4CacheEntrySerializerHook.class).finest(SKIP_INIT_MSG);
}
}
this.cacheEntryClass = cacheEntryClass;
}
@Override
public Class getSerializationType() {
return cacheEntryClass;
}
@Override
public Serializer createSerializer() {
if (cacheEntryClass != null) {
return new Hibernate4CacheEntrySerializer();
}
return null;
}
@Override
public boolean isOverwritable() {
return true;
}
}
| 1no label
|
hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_serialization_Hibernate4CacheEntrySerializerHook.java
|
2,603 |
static class NotMasterException extends ElasticsearchIllegalStateException {
@Override
public Throwable fillInStackTrace() {
return null;
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java
|
1,053 |
private static class IndexComparator implements Comparator<OIndex<?>> {
private static final IndexComparator INSTANCE = new IndexComparator();
public int compare(final OIndex<?> indexOne, final OIndex<?> indexTwo) {
return indexOne.getDefinition().getParamCount() - indexTwo.getDefinition().getParamCount();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLSelect.java
|
210 |
public class CustomPassageFormatter extends PassageFormatter {
private final String preTag;
private final String postTag;
private final Encoder encoder;
public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) {
this.preTag = preTag;
this.postTag = postTag;
this.encoder = encoder;
}
@Override
public Snippet[] format(Passage[] passages, String content) {
Snippet[] snippets = new Snippet[passages.length];
int pos;
for (int j = 0; j < passages.length; j++) {
Passage passage = passages[j];
StringBuilder sb = new StringBuilder();
pos = passage.startOffset;
for (int i = 0; i < passage.numMatches; i++) {
int start = passage.matchStarts[i];
int end = passage.matchEnds[i];
// its possible to have overlapping terms
if (start > pos) {
append(sb, content, pos, start);
}
if (end > pos) {
sb.append(preTag);
append(sb, content, Math.max(pos, start), end);
sb.append(postTag);
pos = end;
}
}
// its possible a "term" from the analyzer could span a sentence boundary.
append(sb, content, pos, Math.max(pos, passage.endOffset));
//we remove the paragraph separator if present at the end of the snippet (we used it as separator between values)
if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) {
sb.deleteCharAt(sb.length() - 1);
}
//and we trim the snippets too
snippets[j] = new Snippet(sb.toString().trim(), passage.score, passage.numMatches > 0);
}
return snippets;
}
protected void append(StringBuilder dest, String content, int start, int end) {
dest.append(encoder.encodeText(content.substring(start, end)));
}
}
| 0true
|
src_main_java_org_apache_lucene_search_postingshighlight_CustomPassageFormatter.java
|
2,104 |
public class ReplaceOperation extends BasePutOperation {
private boolean successful = false;
public ReplaceOperation(String name, Data dataKey, Data value) {
super(name, dataKey, value);
}
public ReplaceOperation() {
}
public void run() {
final Object oldValue = recordStore.replace(dataKey, dataValue);
dataOldValue = mapService.toData(oldValue);
successful = oldValue != null;
}
public boolean shouldBackup() {
return successful;
}
public void afterRun() {
if (successful)
super.afterRun();
}
@Override
public String toString() {
return "ReplaceOperation{" + name + "}";
}
@Override
public Object getResponse() {
return dataOldValue;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_ReplaceOperation.java
|
4,876 |
public class RestAllocationAction extends AbstractCatAction {
@Inject
public RestAllocationAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_cat/allocation", this);
controller.registerHandler(GET, "/_cat/allocation/{nodes}", this);
}
@Override
void documentation(StringBuilder sb) {
sb.append("/_cat/allocation\n");
}
@Override
public void doRequest(final RestRequest request, final RestChannel channel) {
final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().routingTable(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(final ClusterStateResponse state) {
NodesStatsRequest statsRequest = new NodesStatsRequest(nodes);
statsRequest.clear().fs(true);
client.admin().cluster().nodesStats(statsRequest, new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse stats) {
try {
Table tab = buildTable(request, state, stats);
channel.sendResponse(RestTable.buildResponse(tab, request, channel));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
Table getTableWithHeader(final RestRequest request) {
final Table table = new Table();
table.startHeaders();
table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node");
table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)");
table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available");
table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes");
table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used");
table.addCell("host", "alias:h;desc:host of node");
table.addCell("ip", "desc:ip of node");
table.addCell("node", "alias:n;desc:name of node");
table.endHeaders();
return table;
}
private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) {
final ObjectIntOpenHashMap<String> allocs = new ObjectIntOpenHashMap<String>();
for (ShardRouting shard : state.getState().routingTable().allShards()) {
String nodeId = "UNASSIGNED";
if (shard.assignedToNode()) {
nodeId = shard.currentNodeId();
}
allocs.addTo(nodeId, 1);
}
Table table = getTableWithHeader(request);
for (NodeStats nodeStats : stats.getNodes()) {
DiscoveryNode node = nodeStats.getNode();
int shardCount = 0;
if (allocs.containsKey(node.id())) {
shardCount = allocs.lget();
}
long used = nodeStats.getFs().getTotal().getTotal().bytes() - nodeStats.getFs().getTotal().getAvailable().bytes();
long avail = nodeStats.getFs().getTotal().getAvailable().bytes();
short diskPercent = -1;
if (used >= 0 && avail >= 0) {
diskPercent = (short) (used * 100 / (used + avail));
}
table.startRow();
table.addCell(shardCount);
table.addCell(used < 0 ? null : new ByteSizeValue(used));
table.addCell(avail < 0 ? null : new ByteSizeValue(avail));
table.addCell(nodeStats.getFs().getTotal().getTotal());
table.addCell(diskPercent < 0 ? null : diskPercent);
table.addCell(node == null ? null : node.getHostName());
table.addCell(node == null ? null : node.getHostAddress());
table.addCell(node == null ? "UNASSIGNED" : node.name());
table.endRow();
}
if (allocs.containsKey("UNASSIGNED")) {
table.startRow();
table.addCell(allocs.lget());
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell("UNASSIGNED");
table.endRow();
}
return table;
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_cat_RestAllocationAction.java
|
2,682 |
class FieldDefinitionImpl implements DataSerializable, FieldDefinition {
int index;
String fieldName;
FieldType type;
int classId;
int factoryId;
FieldDefinitionImpl() {
}
FieldDefinitionImpl(int index, String fieldName, FieldType type) {
this(index, fieldName, type, 0, Data.NO_CLASS_ID);
}
FieldDefinitionImpl(int index, String fieldName, FieldType type, int factoryId, int classId) {
this.classId = classId;
this.type = type;
this.fieldName = fieldName;
this.index = index;
this.factoryId = factoryId;
}
public FieldType getType() {
return type;
}
public String getName() {
return fieldName;
}
public int getIndex() {
return index;
}
public int getFactoryId() {
return factoryId;
}
public int getClassId() {
return classId;
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(index);
out.writeUTF(fieldName);
out.writeByte(type.getId());
out.writeInt(factoryId);
out.writeInt(classId);
}
public void readData(ObjectDataInput in) throws IOException {
index = in.readInt();
fieldName = in.readUTF();
type = FieldType.get(in.readByte());
factoryId = in.readInt();
classId = in.readInt();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FieldDefinitionImpl that = (FieldDefinitionImpl) o;
if (classId != that.classId) {
return false;
}
if (factoryId != that.factoryId) {
return false;
}
if (fieldName != null ? !fieldName.equals(that.fieldName) : that.fieldName != null) {
return false;
}
if (type != that.type) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = fieldName != null ? fieldName.hashCode() : 0;
result = 31 * result + (type != null ? type.hashCode() : 0);
result = 31 * result + classId;
result = 31 * result + factoryId;
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("FieldDefinitionImpl{");
sb.append("index=").append(index);
sb.append(", fieldName='").append(fieldName).append('\'');
sb.append(", type=").append(type);
sb.append(", classId=").append(classId);
sb.append(", factoryId=").append(factoryId);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_serialization_FieldDefinitionImpl.java
|
2,067 |
public class MapReplicationOperation extends AbstractOperation {
private Map<String, Set<RecordReplicationInfo>> data;
private Map<String, Boolean> mapInitialLoadInfo;
private Map<String, List<DelayedEntry>> delayedEntries;
public MapReplicationOperation() {
}
public MapReplicationOperation(MapService mapService, PartitionContainer container, int partitionId, int replicaIndex) {
this.setPartitionId(partitionId).setReplicaIndex(replicaIndex);
data = new HashMap<String, Set<RecordReplicationInfo>>(container.getMaps().size());
mapInitialLoadInfo = new HashMap<String, Boolean>(container.getMaps().size());
for (Entry<String, RecordStore> entry : container.getMaps().entrySet()) {
RecordStore recordStore = entry.getValue();
MapContainer mapContainer = recordStore.getMapContainer();
final MapConfig mapConfig = mapContainer.getMapConfig();
if (mapConfig.getTotalBackupCount() < replicaIndex) {
continue;
}
String name = entry.getKey();
// adding if initial data is loaded for the only maps that has mapstore behind
if (mapContainer.getStore() != null) {
mapInitialLoadInfo.put(name, replicaIndex > 0 || recordStore.isLoaded());
}
// now prepare data to migrate records
Set<RecordReplicationInfo> recordSet = new HashSet<RecordReplicationInfo>();
for (Entry<Data, Record> recordEntry : recordStore.getReadonlyRecordMap().entrySet()) {
Record record = recordEntry.getValue();
RecordReplicationInfo recordReplicationInfo;
recordReplicationInfo = mapService.createRecordReplicationInfo(record);
recordSet.add(recordReplicationInfo);
}
data.put(name, recordSet);
}
readDelayedEntries(container);
}
private void readDelayedEntries(PartitionContainer container) {
delayedEntries = new HashMap<String, List<DelayedEntry>>(container.getMaps().size());
for (Entry<String, RecordStore> entry : container.getMaps().entrySet()) {
RecordStore recordStore = entry.getValue();
final List<DelayedEntry> delayedEntries = recordStore.getWriteBehindQueue().getSnapShot().asList();
if (delayedEntries != null && delayedEntries.size() == 0) {
continue;
}
this.delayedEntries.put(entry.getKey(), delayedEntries);
}
}
public void run() {
MapService mapService = getService();
if (data != null) {
for (Entry<String, Set<RecordReplicationInfo>> dataEntry : data.entrySet()) {
Set<RecordReplicationInfo> recordReplicationInfos = dataEntry.getValue();
final String mapName = dataEntry.getKey();
RecordStore recordStore = mapService.getRecordStore(getPartitionId(), mapName);
for (RecordReplicationInfo recordReplicationInfo : recordReplicationInfos) {
Data key = recordReplicationInfo.getKey();
Record newRecord = mapService.createRecord(mapName, key, recordReplicationInfo.getValue(), -1);
mapService.applyRecordInfo(newRecord, recordReplicationInfo);
recordStore.putForReplication(key, newRecord);
}
}
}
if (mapInitialLoadInfo != null) {
for (Entry<String, Boolean> entry : mapInitialLoadInfo.entrySet()) {
final String mapName = entry.getKey();
RecordStore recordStore = mapService.getRecordStore(getPartitionId(), mapName);
recordStore.setLoaded(entry.getValue());
}
}
for (Entry<String, List<DelayedEntry>> entry : delayedEntries.entrySet()) {
final RecordStore recordStore = mapService.getRecordStore(getPartitionId(), entry.getKey());
final List<DelayedEntry> replicatedEntries = entry.getValue();
final WriteBehindQueue<DelayedEntry> writeBehindQueue = recordStore.getWriteBehindQueue();
writeBehindQueue.addEnd(replicatedEntries);
}
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
protected void readInternal(final ObjectDataInput in) throws IOException {
int size = in.readInt();
data = new HashMap<String, Set<RecordReplicationInfo>>(size);
for (int i = 0; i < size; i++) {
String name = in.readUTF();
int mapSize = in.readInt();
Set<RecordReplicationInfo> recordReplicationInfos = new HashSet<RecordReplicationInfo>(mapSize);
for (int j = 0; j < mapSize; j++) {
RecordReplicationInfo recordReplicationInfo = in.readObject();
recordReplicationInfos.add(recordReplicationInfo);
}
data.put(name, recordReplicationInfos);
}
size = in.readInt();
mapInitialLoadInfo = new HashMap<String, Boolean>(size);
for (int i = 0; i < size; i++) {
String name = in.readUTF();
boolean loaded = in.readBoolean();
mapInitialLoadInfo.put(name, loaded);
}
size = in.readInt();
delayedEntries = new HashMap<String, List<DelayedEntry>>(size);
for (int i = 0; i < size; i++) {
final String mapName = in.readUTF();
final int listSize = in.readInt();
final List<DelayedEntry> delayedEntriesList = new ArrayList<DelayedEntry>(listSize);
for (int j = 0; j < listSize; j++) {
final Data key = IOUtil.readNullableData(in);
final Data value = IOUtil.readNullableData(in);
final long storeTime = in.readLong();
final int partitionId = in.readInt();
final DelayedEntry<Data, Data> entry
= DelayedEntry.create(key, value, storeTime, partitionId);
delayedEntriesList.add(entry);
}
delayedEntries.put(mapName, delayedEntriesList);
}
}
protected void writeInternal(final ObjectDataOutput out) throws IOException {
out.writeInt(data.size());
for (Entry<String, Set<RecordReplicationInfo>> mapEntry : data.entrySet()) {
out.writeUTF(mapEntry.getKey());
Set<RecordReplicationInfo> recordReplicationInfos = mapEntry.getValue();
out.writeInt(recordReplicationInfos.size());
for (RecordReplicationInfo recordReplicationInfo : recordReplicationInfos) {
out.writeObject(recordReplicationInfo);
}
}
out.writeInt(mapInitialLoadInfo.size());
for (Entry<String, Boolean> entry : mapInitialLoadInfo.entrySet()) {
out.writeUTF(entry.getKey());
out.writeBoolean(entry.getValue());
}
final MapService mapService = getService();
out.writeInt(delayedEntries.size());
for (Entry<String, List<DelayedEntry>> entry : delayedEntries.entrySet()) {
out.writeUTF(entry.getKey());
final List<DelayedEntry> delayedEntryList = entry.getValue();
out.writeInt(delayedEntryList.size());
for (DelayedEntry e : delayedEntryList) {
final Data key = mapService.toData(e.getKey());
final Data value = mapService.toData(e.getValue());
IOUtil.writeNullableData(out, key);
IOUtil.writeNullableData(out, value);
out.writeLong(e.getStoreTime());
out.writeInt(e.getPartitionId());
}
}
}
public boolean isEmpty() {
return data == null || data.isEmpty();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_MapReplicationOperation.java
|
1,213 |
public class PaymentLogEventType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, PaymentLogEventType> TYPES = new LinkedHashMap<String, PaymentLogEventType>();
public static final PaymentLogEventType START = new PaymentLogEventType("START", "Start");
public static final PaymentLogEventType FINISHED = new PaymentLogEventType("FINISHED", "Finished");
public static PaymentLogEventType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public PaymentLogEventType() {
//do nothing
}
public PaymentLogEventType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PaymentLogEventType other = (PaymentLogEventType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_type_PaymentLogEventType.java
|
2,977 |
public class UnsortedIndexStore implements IndexStore {
private final ConcurrentMap<Comparable, ConcurrentMap<Data, QueryableEntry>> mapRecords
= new ConcurrentHashMap<Comparable, ConcurrentMap<Data, QueryableEntry>>(1000);
@Override
public void getSubRecordsBetween(MultiResultSet results, Comparable from, Comparable to) {
int trend = from.compareTo(to);
if (trend == 0) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(from);
if (records != null) {
results.addResultSet(records);
}
return;
}
if (trend < 0) {
Comparable oldFrom = from;
from = to;
to = oldFrom;
}
Set<Comparable> values = mapRecords.keySet();
for (Comparable value : values) {
if (value.compareTo(from) <= 0 && value.compareTo(to) >= 0) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
}
}
@Override
public void getSubRecords(MultiResultSet results, ComparisonType comparisonType, Comparable searchedValue) {
Set<Comparable> values = mapRecords.keySet();
for (Comparable value : values) {
boolean valid;
int result = value.compareTo(searchedValue);
switch (comparisonType) {
case LESSER:
valid = result < 0;
break;
case LESSER_EQUAL:
valid = result <= 0;
break;
case GREATER:
valid = result > 0;
break;
case GREATER_EQUAL:
valid = result >= 0;
break;
case NOT_EQUAL:
valid = result != 0;
break;
default:
throw new IllegalStateException("Unrecognized comparisonType:" + comparisonType);
}
if (valid) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
}
}
@Override
public void newIndex(Comparable newValue, QueryableEntry record) {
Data indexKey = record.getIndexKey();
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(newValue);
if (records == null) {
records = new ConcurrentHashMap<Data, QueryableEntry>();
ConcurrentMap<Data, QueryableEntry> existing = mapRecords.putIfAbsent(newValue, records);
if (existing != null) {
records = existing;
}
}
records.put(indexKey, record);
}
@Override
public ConcurrentMap<Data, QueryableEntry> getRecordMap(Comparable indexValue) {
return mapRecords.get(indexValue);
}
@Override
public void removeIndex(Comparable oldValue, Data indexKey) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(oldValue);
if (records != null) {
records.remove(indexKey);
if (records.size() == 0) {
mapRecords.remove(oldValue);
}
}
}
@Override
public Set<QueryableEntry> getRecords(Comparable value) {
return new SingleResultSet(mapRecords.get(value));
}
@Override
public void getRecords(MultiResultSet results, Set<Comparable> values) {
for (Comparable value : values) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
}
@Override
public void clear() {
mapRecords.clear();
}
@Override
public String toString() {
return "UnsortedIndexStore{"
+ "mapRecords=" + mapRecords.size()
+ '}';
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_impl_UnsortedIndexStore.java
|
292 |
public interface OScriptFormatter {
public String getFunctionDefinition(OFunction iFunction);
public String getFunctionInvoke(OFunction iFunction, final Object[] iArgs);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_formatter_OScriptFormatter.java
|
544 |
flushAction.execute(Requests.flushRequest(request.indices()), new ActionListener<FlushResponse>() {
@Override
public void onResponse(FlushResponse flushResponse) {
// get all types that need to be deleted.
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = clusterService.state().metaData().findMappings(
request.indices(), request.types()
);
// create OrFilter with type filters within to account for different types
BoolFilterBuilder filterBuilder = new BoolFilterBuilder();
Set<String> types = new HashSet<String>();
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> typesMeta : result) {
for (ObjectObjectCursor<String, MappingMetaData> type : typesMeta.value) {
filterBuilder.should(new TypeFilterBuilder(type.key));
types.add(type.key);
}
}
if (types.size() == 0) {
throw new TypeMissingException(new Index("_all"), request.types(), "No index has the type.");
}
request.types(types.toArray(new String[types.size()]));
QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder()
.setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), filterBuilder));
deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() {
@Override
public void onResponse(DeleteByQueryResponse deleteByQueryResponse) {
refreshAction.execute(Requests.refreshRequest(request.indices()), new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse refreshResponse) {
removeMapping();
}
@Override
public void onFailure(Throwable e) {
removeMapping();
}
protected void removeMapping() {
DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
.indices(request.indices()).types(request.types())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());
metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
});
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_TransportDeleteMappingAction.java
|
618 |
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
70 |
public abstract class AllPartitionsClientRequest extends ClientRequest {
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
OperationFactory operationFactory = new OperationFactoryWrapper(createOperationFactory(), endpoint.getUuid());
Map<Integer, Object> map = clientEngine.invokeOnAllPartitions(getServiceName(), operationFactory);
Object result = reduce(map);
endpoint.sendResponse(result, getCallId());
}
protected abstract OperationFactory createOperationFactory();
protected abstract Object reduce(Map<Integer, Object> map);
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_AllPartitionsClientRequest.java
|
1,096 |
@SuppressWarnings("serial")
public class AboutDialog extends JDialog {
private static ImageIcon mctLogoIcon = new ImageIcon(ClassLoader.getSystemResource("images/mctlogo.png"));
public AboutDialog(JFrame frame) {
super(frame);
setDefaultCloseOperation(DISPOSE_ON_CLOSE);
Image image = mctLogoIcon.getImage().getScaledInstance(320, 80, Image.SCALE_SMOOTH);
JLabel label = new JLabel(new ImageIcon(image));
JPanel labelPanel = new JPanel();
labelPanel.setBackground(Color.white);
labelPanel.add(label, BorderLayout.CENTER);
labelPanel.setBorder(new EmptyBorder(5,5,5,5));
Container contentPane = getContentPane();
contentPane.setLayout(new BorderLayout());
contentPane.add(labelPanel, BorderLayout.NORTH);
// Modified the AboutDialog to add the Version and Build numbers to the screen - JOe...
JTextArea license = new JTextArea(100, 100);
license.setText("Mission Control Technologies, Copyright (c) 2009-2012, United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.\n\nMission Control Technologies is a collaborative environment developed at NASA Ames Research Center. The MCT platform is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this application except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0.\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nMCT includes source code licensed under additional open source licenses. See About MCT Licenses or the MCT Open Source Licenses file included with this distribution for additional information.");
license.setLineWrap(true);
license.setWrapStyleWord(true);
license.setEditable(false);
JPanel licensePanel = new JPanel(new GridLayout(0, 1));
licensePanel.add(license);
licensePanel.setBackground(Color.white);
licensePanel.setBorder(BorderFactory.createEmptyBorder(20,40, 20, 40));
contentPane.add(licensePanel, BorderLayout.CENTER);
JPanel panel = new JPanel();
panel.setBackground(Color.white);
JButton close = new JButton("Close");
close.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
AboutDialog.this.setVisible(false);
}
});
panel.add(close);
contentPane.add(panel, BorderLayout.SOUTH);
setBackground(Color.WHITE);
setSize(400, 600);
setResizable(false);
setLocationRelativeTo(frame);
setTitle("About MCT");
}
public static String getBuildNumber() {
String buildnumber = "Not Found";
try {
Properties p = new Properties();
p.load(ClassLoader.getSystemResourceAsStream("properties/version.properties"));
buildnumber = p.getProperty("build.number");
} catch (Exception e) {
// if not found, just ignore any exceptions - it's not critical...
}
return buildnumber;
}
}
| 1no label
|
platform_src_main_java_gov_nasa_arc_mct_gui_dialogs_AboutDialog.java
|
1,230 |
public abstract class OAbstractFile implements OFile {
private FileLock fileLock;
protected File osFile;
protected RandomAccessFile accessFile;
protected FileChannel channel;
protected volatile boolean dirty = false;
protected volatile boolean headerDirty = false;
protected int version;
protected int incrementSize = DEFAULT_INCREMENT_SIZE;
protected long maxSize;
protected byte[] securityCode = new byte[32]; // PART OF HEADER (32 bytes)
protected String mode;
protected boolean failCheck = true;
protected volatile long size; // PART OF HEADER (4 bytes)
public static final int HEADER_SIZE = 1024;
protected static final int HEADER_DATA_OFFSET = 128;
protected static final int DEFAULT_SIZE = 1024000;
protected static final int DEFAULT_INCREMENT_SIZE = -50; // NEGATIVE NUMBER MEANS AS PERCENT OF
// CURRENT
// SIZE
private static final int OPEN_RETRY_MAX = 10;
private static final int OPEN_DELAY_RETRY = 100;
private static final long LOCK_WAIT_TIME = 300;
private static final int LOCK_MAX_RETRIES = 10;
protected static final int SIZE_OFFSET_V_0 = 0;
protected static final int FILLEDUPTO_OFFSET_V_0 = 4;
protected static final int SOFTLY_CLOSED_OFFSET_V_0 = 8;
protected static final int SIZE_OFFSET = 0;
protected static final int FILLEDUPTO_OFFSET = 8;
protected static final int SOFTLY_CLOSED_OFFSET = 16;
protected static final int VERSION_OFFSET = 48;
protected static final int CURRENT_VERSION = 1;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private boolean wasSoftlyClosed = true;
public abstract long getFileSize();
public abstract long getFilledUpTo();
public abstract void setSize(long iSize) throws IOException;
public abstract void writeHeaderLong(int iPosition, long iValue) throws IOException;
public abstract long readHeaderLong(int iPosition) throws IOException;
public abstract boolean synch() throws IOException;
public abstract void read(long iOffset, byte[] iDestBuffer, int iLenght) throws IOException;
public abstract short readShort(long iLogicalPosition) throws IOException;
public abstract int readInt(long iLogicalPosition) throws IOException;
public abstract long readLong(long iOffset) throws IOException;
public abstract byte readByte(long iOffset) throws IOException;
public abstract void writeInt(long iOffset, int iValue) throws IOException;
public abstract void writeLong(long iOffset, long iValue) throws IOException;
public abstract void writeShort(long iOffset, short iValue) throws IOException;
public abstract void writeByte(long iOffset, byte iValue) throws IOException;
public abstract void write(long iOffset, byte[] iSourceBuffer) throws IOException;
protected abstract void init() throws IOException;
protected abstract void setFilledUpTo(long iHow) throws IOException;
protected abstract void flushHeader() throws IOException;
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#open()
*/
public boolean open() throws IOException {
acquireWriteLock();
try {
if (!osFile.exists())
throw new FileNotFoundException("File: " + osFile.getAbsolutePath());
openChannel(osFile.length());
OLogManager.instance().debug(this, "Checking file integrity of " + osFile.getName() + "...");
init();
long filledUpTo = getFilledUpTo();
long fileSize = getFileSize();
if (fileSize == 0) {
// CORRUPTED? GET THE OS FILE SIZE
final long newFileSize = osFile.length() - HEADER_SIZE;
if (newFileSize != fileSize) {
OLogManager
.instance()
.error(
this,
"Invalid fileSize=%d for file %s. Resetting it to the os file size: %d. Probably the file was not closed correctly last time. The number of records has been set to the maximum value. It's strongly suggested to export and reimport the database before using it",
fileSize, getOsFile().getAbsolutePath(), newFileSize);
setFilledUpTo(newFileSize, true);
setSize(newFileSize, true);
fileSize = newFileSize;
}
}
if (filledUpTo > 0 && filledUpTo > fileSize) {
OLogManager
.instance()
.error(
this,
"Invalid filledUp=%d for file %s. Resetting it to the os file size: %d. Probably the file was not closed correctly last time. The number of records has been set to the maximum value. It's strongly suggested to export and reimport the database before using it",
filledUpTo, getOsFile().getAbsolutePath(), fileSize);
setSize(fileSize);
setFilledUpTo(fileSize);
filledUpTo = getFilledUpTo();
}
if (filledUpTo > fileSize || filledUpTo < 0)
OLogManager.instance().error(this, "Invalid filledUp size (=" + filledUpTo + "). The file could be corrupted", null,
OStorageException.class);
if (failCheck) {
wasSoftlyClosed = isSoftlyClosed();
if (wasSoftlyClosed)
setSoftlyClosed(false);
}
if (version < CURRENT_VERSION) {
setSize(fileSize, true);
setFilledUpTo(filledUpTo, true);
setVersion(CURRENT_VERSION);
version = CURRENT_VERSION;
setSoftlyClosed(!failCheck);
}
if (failCheck)
return wasSoftlyClosed;
return true;
} finally {
releaseWriteLock();
}
}
public boolean wasSoftlyClosed() {
acquireReadLock();
try {
return wasSoftlyClosed;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#create(int)
*/
public void create(int iStartSize) throws IOException {
acquireWriteLock();
try {
if (iStartSize == -1)
iStartSize = DEFAULT_SIZE;
openChannel(iStartSize);
setFilledUpTo(0, true);
setSize(maxSize > 0 && iStartSize > maxSize ? maxSize : iStartSize, true);
setVersion(CURRENT_VERSION);
version = CURRENT_VERSION;
setSoftlyClosed(!failCheck);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#close()
*/
public void close() throws IOException {
acquireWriteLock();
try {
try {
setSoftlyClosed(true);
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
unlock();
if (channel != null && channel.isOpen()) {
channel.close();
channel = null;
}
if (accessFile != null) {
accessFile.close();
accessFile = null;
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on closing file " + osFile.getAbsolutePath(), e, OIOException.class);
}
} finally {
releaseWriteLock();
}
}
public void close(boolean softlyClosed) throws IOException {
acquireWriteLock();
try {
try {
setSoftlyClosed(softlyClosed);
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
unlock();
if (channel != null && channel.isOpen()) {
channel.close();
channel = null;
}
if (accessFile != null) {
accessFile.close();
accessFile = null;
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on closing file " + osFile.getAbsolutePath(), e, OIOException.class);
}
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#delete()
*/
public void delete() throws IOException {
acquireWriteLock();
try {
close();
if (osFile != null) {
boolean deleted = osFile.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !osFile.exists() || osFile.delete();
}
}
} finally {
releaseWriteLock();
}
}
/*
* Locks a portion of file.
*/
public FileLock lock(final long iRangeFrom, final long iRangeSize, final boolean iShared) throws IOException {
acquireWriteLock();
try {
return channel.lock(iRangeFrom, iRangeSize, iShared);
} finally {
releaseWriteLock();
}
}
/*
* Unlocks a portion of file.
*/
public OFile unlock(final FileLock iLock) throws IOException {
acquireWriteLock();
try {
if (iLock != null) {
try {
iLock.release();
} catch (ClosedChannelException e) {
}
}
return this;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#lock()
*/
public void lock() throws IOException {
if (channel == null)
return;
acquireWriteLock();
try {
for (int i = 0; i < LOCK_MAX_RETRIES; ++i) {
try {
fileLock = channel.tryLock(0, 1, true);
if (fileLock != null)
break;
} catch (OverlappingFileLockException e) {
OLogManager.instance().debug(this,
"Cannot open file '" + osFile.getAbsolutePath() + "' because it is locked. Waiting %d ms and retrying %d/%d...",
LOCK_WAIT_TIME, i, LOCK_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(LOCK_WAIT_TIME);
}
if (fileLock == null)
throw new OLockException(
"File '"
+ osFile.getPath()
+ "' is locked by another process, maybe the database is in use by another process. Use the remote mode with a OrientDB server to allow multiple access to the same database.");
}
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#unlock()
*/
public void unlock() throws IOException {
acquireWriteLock();
try {
if (fileLock != null) {
try {
fileLock.release();
} catch (ClosedChannelException e) {
}
fileLock = null;
}
} finally {
releaseWriteLock();
}
}
protected void checkSize(final long iSize) throws IOException {
acquireReadLock();
try {
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance().debug(this, "Changing file size to " + iSize + " bytes. " + toString());
final long filledUpTo = getFilledUpTo();
if (iSize < filledUpTo)
OLogManager.instance().error(
this,
"You cannot resize down the file to " + iSize + " bytes, since it is less than current space used: " + filledUpTo
+ " bytes", OIOException.class);
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#removeTail(int)
*/
public void removeTail(long iSizeToShrink) throws IOException {
acquireWriteLock();
try {
final long filledUpTo = getFilledUpTo();
if (filledUpTo < iSizeToShrink)
iSizeToShrink = 0;
setFilledUpTo(filledUpTo - iSizeToShrink);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#shrink(int)
*/
public void shrink(final long iSize) throws IOException {
acquireWriteLock();
try {
final long filledUpTo = getFilledUpTo();
if (iSize >= filledUpTo)
return;
OLogManager.instance().debug(this, "Shrinking filled file from " + filledUpTo + " to " + iSize + " bytes. " + toString());
setFilledUpTo(iSize);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#allocateSpace(int)
*/
public long allocateSpace(final long iSize) throws IOException {
acquireWriteLock();
try {
final long offset = getFilledUpTo();
final long size = getFileSize();
if (getFreeSpace() < iSize) {
if (maxSize > 0 && maxSize - size < iSize)
throw new IllegalArgumentException("Cannot enlarge file since the configured max size ("
+ OFileUtils.getSizeAsString(maxSize) + ") was reached! " + toString());
// MAKE ROOM
long newFileSize = size;
if (newFileSize == 0)
// PROBABLY HAS BEEN LOST WITH HARD KILLS
newFileSize = DEFAULT_SIZE;
// GET THE STEP SIZE IN BYTES
long stepSizeInBytes = incrementSize > 0 ? incrementSize : -1 * size / 100 * incrementSize;
// FIND THE BEST SIZE TO ALLOCATE (BASED ON INCREMENT-SIZE)
while (newFileSize - offset <= iSize) {
newFileSize += stepSizeInBytes;
if (newFileSize == 0)
// EMPTY FILE: ALLOCATE REQUESTED SIZE ONLY
newFileSize = iSize;
if (newFileSize > maxSize && maxSize > 0)
// TOO BIG: ROUND TO THE MAXIMUM FILE SIZE
newFileSize = maxSize;
}
setSize(newFileSize);
}
// THERE IS SPACE IN FILE: RETURN THE UPPER BOUND OFFSET AND UPDATE THE FILLED THRESHOLD
setFilledUpTo(offset + iSize);
return offset;
} finally {
releaseWriteLock();
}
}
protected long checkRegions(final long iOffset, final long iLength) {
acquireReadLock();
try {
if (iOffset < 0 || iOffset + iLength > getFilledUpTo())
throw new OIOException("You cannot access outside the file size (" + getFilledUpTo()
+ " bytes). You have requested portion " + iOffset + "-" + (iOffset + iLength) + " bytes. File: " + toString());
return iOffset;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getFreeSpace()
*/
public long getFreeSpace() {
acquireReadLock();
try {
return getFileSize() - getFilledUpTo();
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#canOversize(int)
*/
public boolean canOversize(final int iRecordSize) {
acquireReadLock();
try {
return maxSize - getFileSize() > iRecordSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#toString()
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("File: ");
builder.append(osFile.getName());
if (accessFile != null) {
builder.append(" os-size=");
try {
builder.append(accessFile.length());
} catch (IOException e) {
builder.append("?");
}
}
builder.append(", stored=");
builder.append(getFileSize());
builder.append(", filled=");
builder.append(getFilledUpTo());
builder.append(", max=");
builder.append(maxSize);
builder.append("");
return builder.toString();
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getOsFile()
*/
public File getOsFile() {
acquireReadLock();
try {
return osFile;
} finally {
releaseReadLock();
}
}
public OAbstractFile init(final String iFileName, final String iMode) {
acquireWriteLock();
try {
mode = iMode;
osFile = new File(iFileName);
return this;
} finally {
releaseWriteLock();
}
}
protected void openChannel(final long newSize) throws IOException {
acquireWriteLock();
try {
OLogManager.instance().debug(this, "[OFile.openChannel] opening channel for file '%s' of size: %d", osFile, osFile.length());
for (int i = 0; i < OPEN_RETRY_MAX; ++i)
try {
accessFile = new RandomAccessFile(osFile, mode);
break;
} catch (FileNotFoundException e) {
if (i == OPEN_DELAY_RETRY)
throw e;
// TRY TO RE-CREATE THE DIRECTORY (THIS HAPPENS ON WINDOWS AFTER A DELETE IS PENDING, USUALLY WHEN REOPEN THE DB VERY
// FREQUENTLY)
osFile.getParentFile().mkdirs();
try {
Thread.sleep(OPEN_DELAY_RETRY);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
}
if (accessFile == null)
throw new FileNotFoundException(osFile.getAbsolutePath());
if (accessFile.length() != newSize)
accessFile.setLength(newSize);
accessFile.seek(VERSION_OFFSET);
version = accessFile.read();
accessFile.seek(0);
channel = accessFile.getChannel();
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
lock();
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getMaxSize()
*/
public long getMaxSize() {
acquireReadLock();
try {
return maxSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setMaxSize(int)
*/
public void setMaxSize(int maxSize) {
acquireWriteLock();
try {
this.maxSize = maxSize;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getIncrementSize()
*/
public int getIncrementSize() {
acquireReadLock();
try {
return incrementSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setIncrementSize(int)
*/
public void setIncrementSize(int incrementSize) {
acquireWriteLock();
try {
this.incrementSize = incrementSize;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#isOpen()
*/
public boolean isOpen() {
acquireReadLock();
try {
return accessFile != null;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#exists()
*/
public boolean exists() {
acquireReadLock();
try {
return osFile != null && osFile.exists();
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#isFailCheck()
*/
public boolean isFailCheck() {
acquireReadLock();
try {
return failCheck;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setFailCheck(boolean)
*/
public void setFailCheck(boolean failCheck) {
acquireWriteLock();
try {
this.failCheck = failCheck;
} finally {
releaseWriteLock();
}
}
protected void setDirty() {
acquireWriteLock();
try {
if (!dirty)
dirty = true;
} finally {
releaseWriteLock();
}
}
protected void setHeaderDirty() {
acquireWriteLock();
try {
if (!headerDirty)
headerDirty = true;
} finally {
releaseWriteLock();
}
}
public String getName() {
acquireReadLock();
try {
return osFile.getName();
} finally {
releaseReadLock();
}
}
public String getPath() {
acquireReadLock();
try {
return osFile.getPath();
} finally {
releaseReadLock();
}
}
public String getAbsolutePath() {
acquireReadLock();
try {
return osFile.getAbsolutePath();
} finally {
releaseReadLock();
}
}
public boolean renameTo(final File newFile) {
acquireWriteLock();
try {
return osFile.renameTo(newFile);
} finally {
releaseWriteLock();
}
}
protected void acquireWriteLock() {
lock.writeLock().lock();
}
protected void releaseWriteLock() {
lock.writeLock().unlock();
}
protected void acquireReadLock() {
lock.readLock().lock();
}
protected void releaseReadLock() {
lock.readLock().unlock();
}
protected abstract void setVersion(int version) throws IOException;
protected abstract void setFilledUpTo(final long iHow, boolean force);
protected abstract void setSize(final long size, final boolean force) throws IOException;
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_fs_OAbstractFile.java
|
2,638 |
public class ClassDefinitionImpl extends BinaryClassDefinition implements ClassDefinition {
private final List<FieldDefinition> fieldDefinitions = new ArrayList<FieldDefinition>();
private final Map<String, FieldDefinition> fieldDefinitionsMap = new HashMap<String,
FieldDefinition>();
private final Set<ClassDefinition> nestedClassDefinitions = new HashSet<ClassDefinition>();
public ClassDefinitionImpl() {
}
public ClassDefinitionImpl(int factoryId, int classId) {
this.factoryId = factoryId;
this.classId = classId;
}
public void addFieldDef(FieldDefinition fd) {
fieldDefinitions.add(fd);
fieldDefinitionsMap.put(fd.getName(), fd);
}
public void addClassDef(ClassDefinition cd) {
nestedClassDefinitions.add(cd);
}
public FieldDefinition get(String name) {
return fieldDefinitionsMap.get(name);
}
public FieldDefinition get(int fieldIndex) {
return fieldDefinitions.get(fieldIndex);
}
public Set<ClassDefinition> getNestedClassDefinitions() {
return nestedClassDefinitions;
}
public boolean hasField(String fieldName) {
return fieldDefinitionsMap.containsKey(fieldName);
}
public Set<String> getFieldNames() {
return new HashSet<String>(fieldDefinitionsMap.keySet());
}
public FieldType getFieldType(String fieldName) {
final FieldDefinition fd = get(fieldName);
if (fd != null) {
return fd.getType();
}
throw new IllegalArgumentException();
}
public int getFieldClassId(String fieldName) {
final FieldDefinition fd = get(fieldName);
if (fd != null) {
return fd.getClassId();
}
throw new IllegalArgumentException();
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(factoryId);
out.writeInt(classId);
out.writeInt(version);
out.writeInt(fieldDefinitions.size());
for (FieldDefinition fieldDefinition : fieldDefinitions) {
fieldDefinition.writeData(out);
}
out.writeInt(nestedClassDefinitions.size());
for (ClassDefinition classDefinition : nestedClassDefinitions) {
classDefinition.writeData(out);
}
}
public void readData(ObjectDataInput in) throws IOException {
factoryId = in.readInt();
classId = in.readInt();
version = in.readInt();
int size = in.readInt();
for (int i = 0; i < size; i++) {
FieldDefinitionImpl fieldDefinition = new FieldDefinitionImpl();
fieldDefinition.readData(in);
addFieldDef(fieldDefinition);
}
size = in.readInt();
for (int i = 0; i < size; i++) {
ClassDefinitionImpl classDefinition = new ClassDefinitionImpl();
classDefinition.readData(in);
addClassDef(classDefinition);
}
}
public int getFieldCount() {
return fieldDefinitions.size();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClassDefinitionImpl that = (ClassDefinitionImpl) o;
if (classId != that.classId) {
return false;
}
if (version != that.version) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = classId;
result = 31 * result + version;
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("ClassDefinition");
sb.append("{factoryId=").append(factoryId);
sb.append(", classId=").append(classId);
sb.append(", version=").append(version);
sb.append(", fieldDefinitions=").append(fieldDefinitions);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_serialization_ClassDefinitionImpl.java
|
19 |
final class InverseEntryIterator extends AbstractEntryIterator<K, V, Map.Entry<K, V>> {
InverseEntryIterator(final OMVRBTreeEntry<K, V> last) {
super(last);
// we have to set ourselves after current index to make iterator work
if (last != null) {
pageIndex = last.getTree().getPageIndex() + 1;
}
}
public Map.Entry<K, V> next() {
return prevEntry();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
1,839 |
@Component("blUriPropertyValidator")
public class UriPropertyValidator extends ValidationConfigurationBasedPropertyValidator {
protected static final Log LOG = LogFactory.getLog(UriPropertyValidator.class);
protected String ERROR_KEY_BEGIN_WITH_SLASH = "uriPropertyValidatorMustBeginWithSlashError";
protected String ERROR_KEY_CANNOT_END_WITH_SLASH = "uriPropertyValidatorCannotEndWithSlashError";
@Value("${uriPropertyValidator.ignoreFullUrls}")
protected boolean ignoreFullUrls = true;
@Value("${uriPropertyValidator.requireLeadingSlash}")
protected boolean requireLeadingSlash = true;
@Value("${uriPropertyValidator.allowTrailingSlash}")
protected boolean allowTrailingSlash = false;
public boolean isFullUrl(String url) {
return (url.startsWith("http") || url.startsWith("ftp"));
}
/**
* Denotes what should occur when this validator encounters a null value to validate against. Default behavior is to
* allow them, which means that this validator will always return true with null values
*/
protected boolean succeedForNullValues = true;
@Override
public PropertyValidationResult validate(Entity entity,
Serializable instance,
Map<String, FieldMetadata> entityFieldMetadata,
Map<String, String> validationConfiguration,
BasicFieldMetadata propertyMetadata,
String propertyName,
String value) {
if (value == null) {
return new PropertyValidationResult(succeedForNullValues);
}
if (isFullUrl(value) && ignoreFullUrls) {
return new PropertyValidationResult(true);
}
if (requireLeadingSlash && !value.startsWith("/")) {
return new PropertyValidationResult(false, ERROR_KEY_BEGIN_WITH_SLASH);
}
if (!allowTrailingSlash && value.endsWith("/")) {
return new PropertyValidationResult(false, ERROR_KEY_CANNOT_END_WITH_SLASH);
}
return new PropertyValidationResult(true);
}
public boolean isSucceedForNullValues() {
return succeedForNullValues;
}
public void setSucceedForNullValues(boolean succeedForNullValues) {
this.succeedForNullValues = succeedForNullValues;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_validation_UriPropertyValidator.java
|
449 |
public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
public ClusterStatsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterStatsRequest());
}
@Override
protected void doExecute(ActionListener<ClusterStatsResponse> listener) {
((ClusterAdminClient) client).clusterStats(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsRequestBuilder.java
|
608 |
public class OIndexManagerRemote extends OIndexManagerAbstract {
private static final String QUERY_DROP = "drop index %s";
public OIndexManagerRemote(final ODatabaseRecord iDatabase) {
super(iDatabase);
}
protected OIndex<?> getRemoteIndexInstance(boolean isMultiValueIndex, String type, String name, Set<String> clustersToIndex,
OIndexDefinition indexDefinition, ORID identity, ODocument configuration) {
if (isMultiValueIndex)
return new OIndexRemoteMultiValue(name, type, identity, indexDefinition, configuration, clustersToIndex);
return new OIndexRemoteOneValue(name, type, identity, indexDefinition, configuration, clustersToIndex);
}
public OIndex<?> createIndex(final String iName, final String iType, final OIndexDefinition iIndexDefinition,
final int[] iClusterIdsToIndex, final OProgressListener iProgressListener) {
final String createIndexDDL;
if (iIndexDefinition != null) {
createIndexDDL = iIndexDefinition.toCreateIndexDDL(iName, iType);
} else {
createIndexDDL = new OSimpleKeyIndexDefinition().toCreateIndexDDL(iName, iType);
}
acquireExclusiveLock();
try {
if (iProgressListener != null) {
iProgressListener.onBegin(this, 0);
}
getDatabase().command(new OCommandSQL(createIndexDDL)).execute();
document.setIdentity(new ORecordId(document.getDatabase().getStorage().getConfiguration().indexMgrRecordId));
if (iProgressListener != null) {
iProgressListener.onCompletition(this, true);
}
reload();
return preProcessBeforeReturn(indexes.get(iName.toLowerCase()));
} finally {
releaseExclusiveLock();
}
}
public OIndexManager dropIndex(final String iIndexName) {
acquireExclusiveLock();
try {
final String text = String.format(QUERY_DROP, iIndexName);
getDatabase().command(new OCommandSQL(text)).execute();
// REMOVE THE INDEX LOCALLY
indexes.remove(iIndexName.toLowerCase());
reload();
return this;
} finally {
releaseExclusiveLock();
}
}
@Override
protected void fromStream() {
acquireExclusiveLock();
try {
clearMetadata();
final Collection<ODocument> idxs = document.field(CONFIG_INDEXES);
if (idxs != null) {
for (ODocument d : idxs) {
try {
OIndexInternal<?> newIndex = OIndexes.createIndex(getDatabase(), (String) d.field(OIndexInternal.CONFIG_TYPE),
document.<String> field(OIndexInternal.ALGORITHM),
document.<String> field(OIndexInternal.VALUE_CONTAINER_ALGORITHM));
OIndexInternal.IndexMetadata newIndexMetadata = newIndex.loadMetadata(d);
addIndexInternal(getRemoteIndexInstance(newIndex instanceof OIndexMultiValues, newIndexMetadata.getType(),
newIndexMetadata.getName(), newIndexMetadata.getClustersToIndex(), newIndexMetadata.getIndexDefinition(),
(ORID) d.field(OIndexAbstract.CONFIG_MAP_RID, OType.LINK), d));
} catch (Exception e) {
OLogManager.instance().error(this, "Error on loading of index by configuration: %s", e, d);
}
}
}
} finally {
releaseExclusiveLock();
}
}
@Override
public ODocument toStream() {
throw new UnsupportedOperationException("Remote index cannot be streamed");
}
@Override
public void recreateIndexes() {
throw new UnsupportedOperationException("recreateIndexes()");
}
@Override
public void waitTillIndexRestore() {
}
@Override
public boolean autoRecreateIndexesAfterCrash() {
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexManagerRemote.java
|
1,303 |
@Test
public class LocalPaginatedStorageUpdateCrashRestore {
private ODatabaseDocumentTx baseDocumentTx;
private ODatabaseDocumentTx testDocumentTx;
private File buildDir;
private int idGen = 0;
private OLockManager<Integer, Thread> idLockManager = new OLockManager<Integer, Thread>(true, 1000);
private ExecutorService executorService = Executors.newCachedThreadPool();
private Process process;
@BeforeClass
public void beforeClass() throws Exception {
OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0);
OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0);
String buildDirectory = System.getProperty("buildDirectory", ".");
buildDirectory += "/localPaginatedStorageUpdateCrashRestore";
buildDir = new File(buildDirectory);
if (buildDir.exists())
buildDir.delete();
buildDir.mkdir();
String javaExec = System.getProperty("java.home") + "/bin/java";
System.setProperty("ORIENTDB_HOME", buildDirectory);
ProcessBuilder processBuilder = new ProcessBuilder(javaExec, "-Xmx2048m", "-classpath", System.getProperty("java.class.path"),
"-DORIENTDB_HOME=" + buildDirectory, RemoteDBRunner.class.getName());
processBuilder.inheritIO();
process = processBuilder.start();
Thread.sleep(5000);
}
public static final class RemoteDBRunner {
public static void main(String[] args) throws Exception {
OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0);
OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0);
OServer server = OServerMain.create();
server.startup(RemoteDBRunner.class
.getResourceAsStream("/com/orientechnologies/orient/core/storage/impl/local/paginated/db-update-config.xml"));
server.activate();
while (true)
;
}
}
@AfterClass
public void afterClass() {
testDocumentTx.drop();
baseDocumentTx.drop();
Assert.assertTrue(buildDir.delete());
}
@BeforeMethod
public void beforeMethod() {
baseDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath()
+ "/baseLocalPaginatedStorageUpdateCrashRestore");
if (baseDocumentTx.exists()) {
baseDocumentTx.open("admin", "admin");
baseDocumentTx.drop();
}
baseDocumentTx.create();
testDocumentTx = new ODatabaseDocumentTx("remote:localhost:3500/testLocalPaginatedStorageUpdateCrashRestore");
testDocumentTx.open("admin", "admin");
}
public void testDocumentUpdate() throws Exception {
createSchema(baseDocumentTx);
createSchema(testDocumentTx);
System.out.println("Schema was created.");
System.out.println("Document creation was started.");
createDocuments();
System.out.println("Document creation was finished.");
System.out.println("Start documents update.");
List<Future> futures = new ArrayList<Future>();
for (int i = 0; i < 5; i++) {
futures.add(executorService.submit(new DataUpdateTask(baseDocumentTx, testDocumentTx)));
}
Thread.sleep(150000);
long lastTs = System.currentTimeMillis();
process.destroy();
for (Future future : futures) {
try {
future.get();
} catch (Exception e) {
e.printStackTrace();
}
}
System.out.println("Documents update was stopped.");
testDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath()
+ "/testLocalPaginatedStorageUpdateCrashRestore");
testDocumentTx.open("admin", "admin");
testDocumentTx.close();
testDocumentTx.open("admin", "admin");
System.out.println("Start documents comparison.");
compareDocuments(lastTs);
}
private void createSchema(ODatabaseDocumentTx dbDocumentTx) {
ODatabaseRecordThreadLocal.INSTANCE.set(dbDocumentTx);
OSchema schema = dbDocumentTx.getMetadata().getSchema();
if (!schema.existsClass("TestClass")) {
OClass testClass = schema.createClass("TestClass");
testClass.createProperty("id", OType.LONG);
testClass.createProperty("timestamp", OType.LONG);
testClass.createProperty("stringValue", OType.STRING);
testClass.createIndex("idIndex", OClass.INDEX_TYPE.UNIQUE, "id");
schema.save();
}
}
private void createDocuments() {
Random random = new Random();
for (int i = 0; i < 1000000; i++) {
final ODocument document = new ODocument("TestClass");
document.field("id", idGen++);
document.field("timestamp", System.currentTimeMillis());
document.field("stringValue", "sfe" + random.nextLong());
saveDoc(document, baseDocumentTx, testDocumentTx);
if (i % 10000 == 0)
System.out.println(i + " documents were created.");
}
}
private void saveDoc(ODocument document, ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDB) {
ODatabaseRecordThreadLocal.INSTANCE.set(baseDB);
ODocument testDoc = new ODocument();
document.copyTo(testDoc);
document.save();
ODatabaseRecordThreadLocal.INSTANCE.set(testDB);
testDoc.save();
ODatabaseRecordThreadLocal.INSTANCE.set(baseDB);
}
private void compareDocuments(long lastTs) {
long minTs = Long.MAX_VALUE;
int clusterId = baseDocumentTx.getClusterIdByName("TestClass");
OStorage baseStorage = baseDocumentTx.getStorage();
OPhysicalPosition[] physicalPositions = baseStorage.ceilingPhysicalPositions(clusterId, new OPhysicalPosition(
OClusterPositionFactory.INSTANCE.valueOf(0)));
int recordsRestored = 0;
int recordsTested = 0;
while (physicalPositions.length > 0) {
final ORecordId rid = new ORecordId(clusterId);
for (OPhysicalPosition physicalPosition : physicalPositions) {
rid.clusterPosition = physicalPosition.clusterPosition;
ODatabaseRecordThreadLocal.INSTANCE.set(baseDocumentTx);
ODocument baseDocument = baseDocumentTx.load(rid);
ODatabaseRecordThreadLocal.INSTANCE.set(testDocumentTx);
List<ODocument> testDocuments = testDocumentTx.query(new OSQLSynchQuery<ODocument>("select from TestClass where id = "
+ baseDocument.field("id")));
Assert.assertTrue(!testDocuments.isEmpty());
ODocument testDocument = testDocuments.get(0);
if (testDocument.field("timestamp").equals(baseDocument.field("timestamp"))
&& testDocument.field("stringValue").equals(baseDocument.field("stringValue"))) {
recordsRestored++;
} else {
if (((Long) baseDocument.field("timestamp")) < minTs)
minTs = baseDocument.field("timestamp");
}
recordsTested++;
if (recordsTested % 10000 == 0)
System.out.println(recordsTested + " were tested, " + recordsRestored + " were restored ...");
}
physicalPositions = baseStorage.higherPhysicalPositions(clusterId, physicalPositions[physicalPositions.length - 1]);
}
System.out.println(recordsRestored + " records were restored. Total records " + recordsTested
+ ". Max interval for lost records " + (lastTs - minTs));
}
public class DataUpdateTask implements Callable<Void> {
private ODatabaseDocumentTx baseDB;
private ODatabaseDocumentTx testDB;
public DataUpdateTask(ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDocumentTx) {
this.baseDB = new ODatabaseDocumentTx(baseDB.getURL());
this.testDB = new ODatabaseDocumentTx(testDocumentTx.getURL());
}
@Override
public Void call() throws Exception {
Random random = new Random();
baseDB.open("admin", "admin");
testDB.open("admin", "admin");
int counter = 0;
try {
while (true) {
final int idToUpdate = random.nextInt(idGen);
idLockManager.acquireLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
try {
OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<ODocument>("select from TestClass where id = " + idToUpdate);
final List<ODocument> result = baseDB.query(query);
Assert.assertTrue(!result.isEmpty());
final ODocument document = result.get(0);
document.field("timestamp", System.currentTimeMillis());
document.field("stringValue", "vde" + random.nextLong());
saveDoc(document, baseDB, testDB);
counter++;
if (counter % 50000 == 0)
System.out.println(counter + " records were updated.");
} finally {
idLockManager.releaseLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
}
}
} finally {
baseDB.close();
testDB.close();
}
}
}
}
| 1no label
|
server_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageUpdateCrashRestore.java
|
96 |
class ODirectMemoryFactory {
public static final ODirectMemoryFactory INSTANCE = new ODirectMemoryFactory();
private static final ODirectMemory directMemory;
static {
ODirectMemory localDirectMemory = null;
try {
final Class<?> jnaClass = Class.forName("com.orientechnologies.nio.OJNADirectMemory");
if (jnaClass == null)
localDirectMemory = null;
else
localDirectMemory = (ODirectMemory) jnaClass.newInstance();
} catch (Exception e) {
// ignore
}
if (localDirectMemory == null) {
try {
final Class<?> sunClass = Class.forName("sun.misc.Unsafe");
if (sunClass != null) {
localDirectMemory = OUnsafeMemory.INSTANCE;
OLogManager.instance().warn(
ODirectMemoryFactory.class,
"Sun Unsafe direct memory implementation is going to be used, "
+ "this implementation is not stable so please use JNA version instead.");
}
} catch (Exception e) {
// ignore
}
}
directMemory = localDirectMemory;
}
public ODirectMemory directMemory() {
return directMemory;
}
}
| 1no label
|
commons_src_main_java_com_orientechnologies_common_directmemory_ODirectMemoryFactory.java
|
1,270 |
public class OStorageLocal extends OStorageLocalAbstract {
private final int DELETE_MAX_RETRIES;
private final int DELETE_WAIT_TIME;
private final Map<String, OCluster> clusterMap = new LinkedHashMap<String, OCluster>();
private OCluster[] clusters = new OCluster[0];
private ODataLocal[] dataSegments = new ODataLocal[0];
private final OStorageLocalTxExecuter txManager;
private String storagePath;
private final OStorageVariableParser variableParser;
private int defaultClusterId = -1;
private static String[] ALL_FILE_EXTENSIONS = { "ocf", ".och", ".ocl", ".oda", ".odh", ".otx", ".ocs",
".oef", ".oem", OWriteAheadLog.MASTER_RECORD_EXTENSION, OWriteAheadLog.WAL_SEGMENT_EXTENSION,
OLocalHashTableIndexEngine.BUCKET_FILE_EXTENSION, OLocalHashTableIndexEngine.METADATA_FILE_EXTENSION,
OLocalHashTableIndexEngine.TREE_FILE_EXTENSION, OSBTreeIndexEngine.DATA_FILE_EXTENSION };
private long positionGenerator = 1;
private OModificationLock modificationLock = new OModificationLock();
private final Set<String> clustersToSyncImmediately = new HashSet<String>();
public OStorageLocal(final String iName, final String iFilePath, final String iMode) throws IOException {
super(iName, iFilePath, iMode);
File f = new File(url);
if (f.exists() || !exists(f.getParent())) {
// ALREADY EXISTS OR NOT LEGACY
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getPath()));
} else {
// LEGACY DB
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getParent()));
}
storagePath = OIOUtils.getPathFromDatabaseName(storagePath);
variableParser = new OStorageVariableParser(storagePath);
configuration = new OStorageConfigurationSegment(this);
txManager = new OStorageLocalTxExecuter(this, configuration.txSegment);
DELETE_MAX_RETRIES = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger();
DELETE_WAIT_TIME = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger();
final String[] clustersToSync = OGlobalConfiguration.NON_TX_CLUSTERS_SYNC_IMMEDIATELY.getValueAsString().trim()
.split("\\s*,\\s*");
clustersToSyncImmediately.addAll(Arrays.asList(clustersToSync));
installProfilerHooks();
long diskCacheSize = OGlobalConfiguration.DISK_CACHE_SIZE.getValueAsLong() * 1024 * 1024;
long writeCacheSize = (long) Math.floor((((double) OGlobalConfiguration.DISK_WRITE_CACHE_PART.getValueAsInteger()) / 100.0)
* diskCacheSize);
long readCacheSize = diskCacheSize - writeCacheSize;
diskCache = new OReadWriteDiskCache(name, readCacheSize, writeCacheSize,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_TTL.getValueAsLong() * 1000,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL.getValueAsInteger(), this, null, false, true);
}
public synchronized void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
addUser();
if (status != STATUS.CLOSED)
// ALREADY OPENED: THIS IS THE CASE WHEN A STORAGE INSTANCE IS
// REUSED
return;
if (!exists())
throw new OStorageException("Cannot open the storage '" + name + "' because it does not exist in path: " + url);
status = STATUS.OPEN;
// OPEN BASIC SEGMENTS
int pos;
pos = registerDataSegment(new OStorageDataConfiguration(configuration, OStorage.DATA_DEFAULT_NAME, 0, getStoragePath()));
dataSegments[pos].open();
addDefaultClusters();
// REGISTER DATA SEGMENT
for (int i = 0; i < configuration.dataSegments.size(); ++i) {
final OStorageDataConfiguration dataConfig = configuration.dataSegments.get(i);
if (dataConfig == null)
continue;
pos = registerDataSegment(dataConfig);
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
dataSegments[i].close();
dataSegments[i] = new ODataLocal(this, dataConfig, i);
dataSegments[i].open();
} else
dataSegments[pos].open();
}
// REGISTER CLUSTER
for (int i = 0; i < configuration.clusters.size(); ++i) {
final OStorageClusterConfiguration clusterConfig = configuration.clusters.get(i);
if (clusterConfig != null) {
pos = createClusterFromConfig(clusterConfig);
try {
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
if (clusters[i] != null && clusters[i] instanceof OClusterLocal)
clusters[i].close();
clusters[i] = Orient.instance().getClusterFactory().createCluster(OClusterLocal.TYPE);
clusters[i].configure(this, clusterConfig);
clusterMap.put(clusters[i].getName(), clusters[i]);
clusters[i].open();
} else {
if (clusterConfig.getName().equals(CLUSTER_DEFAULT_NAME))
defaultClusterId = pos;
clusters[pos].open();
}
} catch (FileNotFoundException e) {
OLogManager.instance().warn(
this,
"Error on loading cluster '" + clusters[i].getName() + "' (" + i
+ "): file not found. It will be excluded from current database '" + getName() + "'.");
clusterMap.remove(clusters[i].getName());
clusters[i] = null;
}
} else {
clusters = Arrays.copyOf(clusters, clusters.length + 1);
clusters[i] = null;
}
}
if (OGlobalConfiguration.USE_WAL.getValueAsBoolean())
writeAheadLog = new OWriteAheadLog(this);
txManager.open();
} catch (Exception e) {
close(true);
throw new OStorageException("Cannot open local storage '" + url + "' with mode=" + mode, e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".open", "Open a database", timer, "db.*.open");
}
}
public ODiskCache getDiskCache() {
return diskCache;
}
private void addDefaultClusters() throws IOException {
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INTERNAL_NAME));
configuration.load();
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INDEX_NAME));
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME));
defaultClusterId = createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
CLUSTER_DEFAULT_NAME));
}
public void create(final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (status != STATUS.CLOSED)
throw new OStorageException("Cannot create new storage '" + name + "' because it is not closed (status:" + status + ")");
addUser();
final File storageFolder = new File(storagePath);
if (!storageFolder.exists())
storageFolder.mkdir();
if (exists())
throw new OStorageException("Cannot create new storage '" + name + "' because it already exists");
status = STATUS.OPEN;
addDataSegment(OStorage.DATA_DEFAULT_NAME);
addDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME);
// ADD THE METADATA CLUSTER TO STORE INTERNAL STUFF
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INTERNAL_NAME, null, null, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING IN THE INDEX DATA SEGMENT
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INDEX_NAME, null,
OMetadataDefault.DATASEGMENT_INDEX_NAME, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, null, true);
// ADD THE DEFAULT CLUSTER
defaultClusterId = addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), CLUSTER_DEFAULT_NAME, null, null, false);
configuration.create();
writeAheadLog = new OWriteAheadLog(this);
txManager.create();
} catch (OStorageException e) {
close();
throw e;
} catch (IOException e) {
close();
throw new OStorageException("Error on creation of storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".create", "Create a database", timer, "db.*.create");
}
}
public void reload() {
}
public boolean exists() {
return exists(storagePath);
}
private boolean exists(String path) {
return new File(path + "/" + OStorage.DATA_DEFAULT_NAME + ".0" + ODataLocal.DEF_EXTENSION).exists();
}
@Override
public void close(final boolean iForce) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (!checkForClose(iForce))
return;
status = STATUS.CLOSING;
for (OCluster cluster : clusters)
if (cluster != null)
cluster.close();
clusters = new OCluster[0];
clusterMap.clear();
for (ODataLocal data : dataSegments)
if (data != null)
data.close();
dataSegments = new ODataLocal[0];
txManager.close();
if (configuration != null)
configuration.close();
level2Cache.shutdown();
OMMapManagerLocator.getInstance().flush();
super.close(iForce);
uninstallProfilerHooks();
if (diskCache != null)
diskCache.close();
if (writeAheadLog != null)
writeAheadLog.delete();
Orient.instance().unregisterStorage(this);
status = STATUS.CLOSED;
} catch (IOException e) {
OLogManager.instance().error(this, "Error on closing of storage '" + name, e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".close", "Close a database", timer, "db.*.close");
}
}
/**
* Deletes physically all the database files (that ends for ".och", ".ocl", ".oda", ".odh", ".otx"). Tries also to delete the
* container folder if the directory is empty. If files are locked, retry up to 10 times before to raise an exception.
*/
public void delete() {
// CLOSE THE DATABASE BY REMOVING THE CURRENT USER
if (status != STATUS.CLOSED) {
if (getUsers() > 0) {
while (removeUser() > 0)
;
}
}
close(true);
try {
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot unregister storage", e);
}
final long timer = Orient.instance().getProfiler().startChrono();
// GET REAL DIRECTORY
File dbDir = new File(OIOUtils.getPathFromDatabaseName(OSystemVariableResolver.resolveSystemVariables(url)));
if (!dbDir.exists() || !dbDir.isDirectory())
dbDir = dbDir.getParentFile();
lock.acquireExclusiveLock();
try {
if (diskCache != null)
diskCache.delete();
// RETRIES
for (int i = 0; i < DELETE_MAX_RETRIES; ++i) {
if (dbDir.exists() && dbDir.isDirectory()) {
int notDeletedFiles = 0;
// TRY TO DELETE ALL THE FILES
File[] files = dbDir.listFiles();
if (files != null) {
for (File f : files) {
// DELETE ONLY THE SUPPORTED FILES
for (String ext : ALL_FILE_EXTENSIONS)
if (f.getPath().endsWith(ext)) {
if (!f.delete()) {
notDeletedFiles++;
}
break;
}
}
}
if (notDeletedFiles == 0) {
// TRY TO DELETE ALSO THE DIRECTORY IF IT'S EMPTY
dbDir.delete();
return;
}
} else
return;
OLogManager
.instance()
.debug(
this,
"Cannot delete database files because they are still locked by the OrientDB process: waiting %d ms and retrying %d/%d...",
DELETE_WAIT_TIME, i, DELETE_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(DELETE_WAIT_TIME);
}
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ". Database files seem locked");
} catch (IOException ioe) {
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ".", ioe);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".drop", "Drop a database", timer, "db.*.drop");
}
}
public boolean check(final boolean iVerbose, final OCommandOutputListener iListener) {
int errors = 0;
int warnings = 0;
lock.acquireSharedLock();
try {
long totalRecors = 0;
final long start = System.currentTimeMillis();
formatMessage(iVerbose, iListener, "\nChecking database '" + getName() + "'...\n");
formatMessage(iVerbose, iListener, "\n(1) Checking data-clusters. This activity checks if pointers to data are coherent.");
final OPhysicalPosition ppos = new OPhysicalPosition();
// BROWSE ALL THE CLUSTERS
for (OCluster c : clusters) {
if (!(c instanceof OClusterLocal))
continue;
formatMessage(iVerbose, iListener, "\n- data-cluster #%-5d %s -> ", c.getId(), c.getName());
// BROWSE ALL THE RECORDS
for (final OClusterEntryIterator it = c.absoluteIterator(); it.hasNext();) {
final OPhysicalPosition physicalPosition = it.next();
totalRecors++;
try {
if (physicalPosition.dataSegmentId >= dataSegments.length) {
formatMessage(iVerbose, iListener, "WARN: Found wrong data segment %d ", physicalPosition.dataSegmentId);
warnings++;
}
if (physicalPosition.recordSize < 0) {
formatMessage(iVerbose, iListener, "WARN: Found wrong record size %d ", physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.recordSize >= 1000000) {
formatMessage(iVerbose, iListener, "WARN: Found suspected big record size %d. Is it corrupted? ",
physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.dataSegmentPos > dataSegments[physicalPosition.dataSegmentId].getFilledUpTo()) {
formatMessage(iVerbose, iListener, "WARN: Found wrong pointer to data chunk %d out of data segment size (%d) ",
physicalPosition.dataSegmentPos, dataSegments[physicalPosition.dataSegmentId].getFilledUpTo());
warnings++;
}
if (physicalPosition.recordVersion.isTombstone() && (c instanceof OClusterLocal)) {
// CHECK IF THE HOLE EXISTS
boolean found = false;
int tot = ((OClusterLocal) c).holeSegment.getHoles();
for (int i = 0; i < tot; ++i) {
final long recycledPosition = ((OClusterLocal) c).holeSegment.getEntryPosition(i) / OClusterLocal.RECORD_SIZE;
if (recycledPosition == physicalPosition.clusterPosition.longValue()) {
// FOUND
found = true;
break;
}
}
if (!found) {
formatMessage(iVerbose, iListener, "WARN: Cannot find hole for deleted record %d:%d ", c.getId(),
physicalPosition.clusterPosition);
warnings++;
}
}
} catch (IOException e) {
formatMessage(iVerbose, iListener, "WARN: Error while reading record #%d:%d ", e, c.getId(), ppos.clusterPosition);
warnings++;
}
}
if (c instanceof OClusterLocal) {
final int totalHoles = ((OClusterLocal) c).holeSegment.getHoles();
if (totalHoles > 0) {
formatMessage(iVerbose, iListener, " [found " + totalHoles + " hole(s)]");
// CHECK HOLES
for (int i = 0; i < totalHoles; ++i) {
long recycledPosition = -1;
try {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(((OClusterLocal) c).holeSegment.getEntryPosition(i)
/ OClusterLocal.RECORD_SIZE);
OPhysicalPosition physicalPosition = c.getPhysicalPosition(ppos);
if (physicalPosition != null && !physicalPosition.recordVersion.isTombstone()) {
formatMessage(iVerbose, iListener,
"WARN: Found wrong hole %d/%d for deleted record %d:%d. The record seems good ", i, totalHoles - 1,
c.getId(), recycledPosition);
warnings++;
}
} catch (Exception e) {
formatMessage(iVerbose, iListener, "WARN: Found wrong hole %d/%d for deleted record %d:%d. The record not exists ",
i, totalHoles - 1, c.getId(), recycledPosition);
warnings++;
}
}
}
}
formatMessage(iVerbose, iListener, "OK");
}
int totalChunks = 0;
formatMessage(iVerbose, iListener,
"\n\n(2) Checking data chunks integrity. In this phase data segments are scanned to check the back reference into the clusters.");
for (ODataLocal d : dataSegments) {
if (d == null)
continue;
formatMessage(iVerbose, iListener, "\n- data-segment %s (id=%d) size=%d/%d...", d.getName(), d.getId(), d.getFilledUpTo(),
d.getSize(), d.getHoles());
int nextPos = 0;
// GET DATA-SEGMENT HOLES
final List<ODataHoleInfo> holes = d.getHolesList();
if (iVerbose) {
formatMessage(iVerbose, iListener, "\n-- found %d holes:", holes.size());
for (ODataHoleInfo hole : holes)
formatMessage(iVerbose, iListener, "\n--- hole #%-7d offset=%-10d size=%-7d", hole.holeOffset, hole.dataOffset,
hole.size);
}
// CHECK CHUNKS
formatMessage(iVerbose, iListener, "\n-- checking chunks:");
int pos;
do {
try {
pos = nextPos;
// SEARCH IF THE RECORD IT'S BETWEEN HOLES
ODataHoleInfo foundHole = null;
for (ODataHoleInfo hole : holes) {
if (hole.dataOffset == pos) {
// HOLE FOUND!
foundHole = hole;
break;
}
}
int recordSize = d.getRecordSize(pos);
formatMessage(iVerbose, iListener, "\n--- chunk #%-7d offset=%-10d size=%-7d -> ", totalChunks, pos, recordSize);
if (recordSize < 0) {
recordSize *= -1;
// HOLE: CHECK HOLE PRESENCE
if (foundHole != null) {
if (foundHole.size != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) differs in size with the hole size %d ", d.getName(), totalChunks,
pos, recordSize, foundHole.size);
warnings++;
}
nextPos = pos + foundHole.size;
} else {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) has no hole for deleted chunk ",
d.getName(), totalChunks, pos, recordSize);
warnings++;
nextPos = pos + recordSize;
}
} else {
if (foundHole != null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) it's between the holes (hole #%d) even if has no negative recordSize. Jump the content ",
d.getName(), totalChunks, pos, recordSize, foundHole.holeOffset);
warnings++;
nextPos = pos + foundHole.size;
} else {
// REGULAR DATA CHUNK
nextPos = pos + ODataLocal.RECORD_FIX_SIZE + recordSize;
final byte[] buffer = d.getRecord(pos);
if (buffer.length != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has wrong record size because the record length is %d ", d.getName(),
totalChunks, pos, recordSize, buffer.length);
warnings++;
}
final ORecordId rid = d.getRecordRid(pos);
if (!rid.isValid()) {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) points to invalid RID %s ",
d.getName(), totalChunks, pos, recordSize, rid);
warnings++;
} else {
if (rid.clusterId >= clusters.length) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but configured clusters are %d in total ",
d.getName(), totalChunks, pos, recordSize, rid, clusters.length);
warnings++;
} else if (clusters[rid.clusterId] == null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but the cluster %d not exists ",
d.getName(), totalChunks, pos, recordSize, rid, rid.clusterId);
warnings++;
} else {
ppos.clusterPosition = rid.clusterPosition;
clusters[rid.clusterId].getPhysicalPosition(ppos);
if (ppos.dataSegmentId != d.getId()) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current data segment %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, d.getId(), ppos.dataSegmentId);
warnings++;
}
if (ppos.dataSegmentPos != pos) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current chunk %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, ppos.dataSegmentPos, pos);
warnings++;
}
}
}
}
}
totalChunks++;
formatMessage(iVerbose, iListener, "OK");
} catch (Exception e) {
iListener.onMessage("ERROR: " + e.toString());
// OLogManager.instance().warn(this, "ERROR: Chunk %s:%d (offset=%d) error: %s", e, d.getName(),
// totalChunks, pos, e.toString());
errors++;
}
} while (nextPos < d.getFilledUpTo());
formatMessage(iVerbose, iListener, "\n");
}
iListener.onMessage("\nCheck of database completed in " + (System.currentTimeMillis() - start)
+ "ms:\n- Total records checked: " + totalRecors + "\n- Total chunks checked.: " + totalChunks
+ "\n- Warnings.............: " + warnings + "\n- Errors...............: " + errors + "\n");
} finally {
lock.releaseSharedLock();
}
return errors == 0;
}
public ODataLocal getDataSegmentById(final int iDataSegmentId) {
checkOpeness();
lock.acquireSharedLock();
try {
if (iDataSegmentId >= dataSegments.length)
throw new IllegalArgumentException("Data segment #" + iDataSegmentId + " does not exist in database '" + name + "'");
return dataSegments[iDataSegmentId];
} finally {
lock.releaseSharedLock();
}
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
if (iDataSegmentName == null)
return 0;
checkOpeness();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments) {
if (d != null && d.getName().equalsIgnoreCase(iDataSegmentName))
return d.getId();
}
throw new IllegalArgumentException("Data segment '" + iDataSegmentName + "' does not exist in database '" + name + "'");
} finally {
lock.releaseSharedLock();
}
}
/**
* Add a new data segment in the default segment directory and with filename equals to the cluster name.
*/
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public int addDataSegment(String iSegmentName, final String iDirectory) {
checkOpeness();
iSegmentName = iSegmentName.toLowerCase();
lock.acquireExclusiveLock();
try {
final OStorageDataConfiguration conf = new OStorageDataConfiguration(configuration, iSegmentName, -1, iDirectory);
final int pos = registerDataSegment(conf);
if (pos == -1)
throw new OConfigurationException("Cannot add segment " + conf.name + " because it is already part of storage '" + name
+ "'");
dataSegments[pos].create(-1);
// UPDATE CONFIGURATION
conf.id = pos;
if (pos == configuration.dataSegments.size())
configuration.dataSegments.add(conf);
else
configuration.dataSegments.set(pos, conf);
configuration.update();
return pos;
} catch (Throwable e) {
OLogManager.instance().error(this, "Error on creation of new data segment '" + iSegmentName + "' in: " + iDirectory, e,
OStorageException.class);
return -1;
} finally {
lock.releaseExclusiveLock();
}
}
/**
* Add a new cluster into the storage. Type can be: "physical" or "logical".
*/
public int addCluster(final String iClusterType, String iClusterName, final String iLocation, final String iDataSegmentName,
boolean forceListBased, final Object... iParameters) {
checkOpeness();
lock.acquireExclusiveLock();
try {
final OCluster cluster;
if (iClusterName != null) {
iClusterName = iClusterName.toLowerCase();
// FIND THE FIRST AVAILABLE CLUSTER ID
int clusterPos = clusters.length;
for (int i = 0; i < clusters.length; ++i)
if (clusters[i] == null) {
clusterPos = i;
break;
}
cluster = Orient.instance().getClusterFactory().createCluster(iClusterType);
cluster.configure(this, clusterPos, iClusterName, iLocation, getDataSegmentIdByName(iDataSegmentName), iParameters);
} else
cluster = null;
final int clusterId = registerCluster(cluster);
if (cluster != null) {
cluster.create(-1);
configuration.update();
}
return clusterId;
} catch (Exception e) {
OLogManager.instance().exception("Error in creation of new cluster '" + iClusterName + "' of type: " + iClusterType, e,
OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return -1;
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
throw new UnsupportedOperationException("This operation is unsupported for " + getType()
+ " storage. If you are doing import please use parameter -preserveClusterIDs=false .");
}
public ODataLocal[] getDataSegments() {
return dataSegments;
}
public OStorageLocalTxExecuter getTxManager() {
return txManager;
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
lock.acquireExclusiveLock();
try {
if (iClusterId < 0 || iClusterId >= clusters.length)
throw new IllegalArgumentException("Cluster id '" + iClusterId + "' is outside the of range of configured clusters (0-"
+ (clusters.length - 1) + ") in database '" + name + "'");
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return false;
getLevel2Cache().freeCluster(iClusterId);
if (iTruncate)
cluster.truncate();
cluster.delete();
clusterMap.remove(cluster.getName());
clusters[iClusterId] = null;
// UPDATE CONFIGURATION
configuration.dropCluster(iClusterId);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing cluster '" + iClusterId + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public boolean dropDataSegment(final String iName) {
lock.acquireExclusiveLock();
try {
final int id = getDataSegmentIdByName(iName);
final ODataLocal data = dataSegments[id];
if (data == null)
return false;
data.drop();
dataSegments[id] = null;
// UPDATE CONFIGURATION
configuration.dropDataSegment(id);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing data segment '" + iName + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
checkOpeness();
lock.acquireSharedLock();
try {
long tot = 0;
for (int iClusterId : iClusterIds) {
if (iClusterId >= clusters.length)
throw new OConfigurationException("Cluster id " + iClusterId + " was not found in database '" + name + "'");
if (iClusterId > -1) {
final OCluster c = clusters[iClusterId];
if (c != null)
tot += c.getEntries() - (countTombstones ? 0L : c.getTombstonesCount());
}
}
return tot;
} finally {
lock.releaseSharedLock();
}
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
if (iClusterId == -1)
return new OClusterPosition[] { OClusterPosition.INVALID_POSITION, OClusterPosition.INVALID_POSITION };
checkOpeness();
lock.acquireSharedLock();
try {
return clusters[iClusterId] != null ? new OClusterPosition[] { clusters[iClusterId].getFirstPosition(),
clusters[iClusterId].getLastPosition() } : new OClusterPosition[0];
} catch (IOException ioe) {
throw new OStorageException("Can not retrieve information about data range", ioe);
} finally {
lock.releaseSharedLock();
}
}
public long count(final int iClusterId) {
return count(iClusterId, false);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
if (iClusterId == -1)
throw new OStorageException("Cluster Id " + iClusterId + " is invalid in database '" + name + "'");
// COUNT PHYSICAL CLUSTER IF ANY
checkOpeness();
lock.acquireSharedLock();
try {
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return 0;
if (countTombstones)
return cluster.getEntries();
return cluster.getEntries() - cluster.getTombstonesCount();
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(int iDataSegmentId, final ORecordId iRid, final byte[] iContent,
final ORecordVersion iRecordVersion, final byte iRecordType, final int iMode, ORecordCallback<OClusterPosition> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(iDataSegmentId);
final OPhysicalPosition ppos;
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
ppos = txManager.createRecord(txManager.getCurrentTransaction().getId(), dataSegment, cluster, iRid, iContent,
iRecordVersion, iRecordType);
} else {
ppos = createRecord(dataSegment, cluster, iContent, iRecordType, iRid, iRecordVersion);
if (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean()
|| clustersToSyncImmediately.contains(cluster.getName()))
synchRecordUpdate(cluster, ppos);
if (iCallback != null)
iCallback.call(iRid, ppos.clusterPosition);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return new OStorageOperationResult<OPhysicalPosition>(ppos);
}
public boolean updateReplica(final int dataSegmentId, final ORecordId rid, final byte[] content,
final ORecordVersion recordVersion, final byte recordType) throws IOException {
if (rid.isNew())
throw new OStorageException("Passed record with id " + rid + " is new and can not be treated as replica.");
checkOpeness();
final OCluster cluster = getClusterById(rid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(dataSegmentId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (ppos == null) {
if (!cluster.isHashBased())
throw new OStorageException("Cluster with LH support is required.");
ppos = new OPhysicalPosition(rid.clusterPosition, recordVersion);
ppos.recordType = recordType;
ppos.dataSegmentId = dataSegment.getId();
if (!recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
}
cluster.addPhysicalPosition(ppos);
return true;
} else {
if (ppos.recordType != recordType)
throw new OStorageException("Record types of provided and stored replicas are different " + recordType + ":"
+ ppos.recordType + ".");
if (ppos.recordVersion.compareTo(recordVersion) < 0) {
cluster.updateVersion(ppos.clusterPosition, recordVersion);
if (!recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.setRecord(ppos.dataSegmentPos, rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (!recordVersion.isTombstone() && ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
dataSegment.deleteRecord(ppos.dataSegmentPos);
}
return true;
}
}
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return false;
}
@Override
public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock) {
if (iExclusiveLock) {
modificationLock.requestModificationLock();
try {
return super.callInLock(iCallable, iExclusiveLock);
} finally {
modificationLock.releaseModificationLock();
}
} else {
return super.callInLock(iCallable, iExclusiveLock);
}
}
@Override
public <V> V callInRecordLock(Callable<V> callable, ORID rid, boolean exclusiveLock) {
if (exclusiveLock)
modificationLock.requestModificationLock();
try {
if (exclusiveLock)
lock.acquireExclusiveLock();
else
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
try {
return callable.call();
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new OException("Error on nested call in lock", e);
} finally {
if (exclusiveLock)
lock.releaseExclusiveLock();
else
lock.releaseSharedLock();
}
} finally {
if (exclusiveLock)
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, boolean iIgnoreCache,
ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkOpeness();
return new OStorageOperationResult<ORawBuffer>(readRecord(getClusterById(iRid.clusterId), iRid, true, loadTombstones));
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRid, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, final int iMode, ORecordCallback<ORecordVersion> iCallback) {
checkOpeness();
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
final OCluster cluster = getClusterById(iRid.clusterId);
if (txManager.isCommitting()) {
return new OStorageOperationResult<ORecordVersion>(txManager.updateRecord(txManager.getCurrentTransaction().getId(),
cluster, iRid, iContent, iVersion, iRecordType));
} else {
final OPhysicalPosition ppos = updateRecord(cluster, iRid, iContent, iVersion, iRecordType);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final ORecordVersion returnValue = (ppos != null ? ppos.recordVersion : OVersionFactory.instance()
.createUntrackedVersion());
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<ORecordVersion>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRid, final ORecordVersion iVersion, final int iMode,
ORecordCallback<Boolean> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
return new OStorageOperationResult<Boolean>(txManager.deleteRecord(txManager.getCurrentTransaction().getId(), cluster,
iRid.clusterPosition, iVersion));
} else {
final OPhysicalPosition ppos = deleteRecord(cluster, iRid, iVersion,
OGlobalConfiguration.STORAGE_USE_TOMBSTONES.getValueAsBoolean());
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<Boolean>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public Set<String> getClusterNames() {
checkOpeness();
lock.acquireSharedLock();
try {
return clusterMap.keySet();
} finally {
lock.releaseSharedLock();
}
}
public int getClusterIdByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
if (iClusterName.length() == 0)
throw new IllegalArgumentException("Cluster name is empty");
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getId();
} finally {
lock.releaseSharedLock();
}
return -1;
}
public String getClusterTypeByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getType();
} finally {
lock.releaseSharedLock();
}
return null;
}
public void commit(final OTransaction iTx, Runnable callback) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
try {
startStorageTx(iTx);
txManager.clearLogEntries(iTx);
txManager.commitAllPendingRecords(iTx);
if (callback != null)
callback.run();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
endStorageTx();
} catch (Exception e) {
// WE NEED TO CALL ROLLBACK HERE, IN THE LOCK
OLogManager.instance().debug(this, "Error during transaction commit, transaction will be rolled back (tx-id=%d)", e,
iTx.getId());
rollback(iTx);
if (e instanceof OException)
throw ((OException) e);
else
throw new OStorageException("Error during transaction commit.", e);
} finally {
try {
txManager.clearLogEntries(iTx);
if (writeAheadLog != null)
writeAheadLog.shrinkTill(writeAheadLog.end());
} catch (Exception e) {
// XXX WHAT CAN WE DO HERE ? ROLLBACK IS NOT POSSIBLE
// IF WE THROW EXCEPTION, A ROLLBACK WILL BE DONE AT DB LEVEL BUT NOT AT STORAGE LEVEL
OLogManager.instance().error(this, "Clear tx log entries failed", e);
}
}
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void rollback(final OTransaction iTx) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
txManager.getTxSegment().rollback(iTx);
rollbackStorageTx();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
} catch (IOException ioe) {
OLogManager.instance().error(this,
"Error executing rollback for transaction with id '" + iTx.getId() + "' cause: " + ioe.getMessage(), ioe);
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void synch() {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.synch();
for (ODataLocal data : dataSegments)
if (data != null)
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".synch", "Synch a database", timer, "db.*.synch");
}
}
protected void synchRecordUpdate(final OCluster cluster, final OPhysicalPosition ppos) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
cluster.synch();
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler()
.stopChrono("db." + name + "record.synch", "Synch a record to database", timer, "db.*.record.synch");
}
}
/**
* Returns the list of holes as pair of position & ODataHoleInfo
*
*/
public List<ODataHoleInfo> getHolesList() {
final List<ODataHoleInfo> holes = new ArrayList<ODataHoleInfo>();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments)
if (d != null)
holes.addAll(d.getHolesList());
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total number of holes.
*
*/
public long getHoles() {
lock.acquireSharedLock();
try {
long holes = 0;
for (ODataLocal d : dataSegments)
if (d != null)
holes += d.getHoles();
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total size used by holes
*
*/
public long getHoleSize() {
lock.acquireSharedLock();
try {
final List<ODataHoleInfo> holes = getHolesList();
long size = 0;
for (ODataHoleInfo h : holes)
if (h.dataOffset > -1 && h.size > 0)
size += h.size;
return size;
} finally {
lock.releaseSharedLock();
}
}
public void setDefaultClusterId(final int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
public String getPhysicalClusterNameById(final int iClusterId) {
checkOpeness();
if (iClusterId < 0)
return null;
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
return clusters[iClusterId] != null ? clusters[iClusterId].getName() : null;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OStorageConfiguration getConfiguration() {
return configuration;
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
checkClusterSegmentIndexRange(iClusterId);
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterId + " is null");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OCluster getClusterByName(final String iClusterName) {
lock.acquireSharedLock();
try {
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterName + " does not exist in database '" + name + "'");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public String getURL() {
return OEngineLocal.NAME + ":" + url;
}
public long getSize() {
lock.acquireSharedLock();
try {
long size = 0;
for (OCluster c : clusters)
if (c != null)
size += c.getRecordsSize();
return size;
} catch (IOException ioe) {
throw new OStorageException("Can not calculate records size");
} finally {
lock.releaseSharedLock();
}
}
public String getStoragePath() {
return storagePath;
}
public String getMode() {
return mode;
}
public OStorageVariableParser getVariableParser() {
return variableParser;
}
public int getClusters() {
lock.acquireSharedLock();
try {
return clusterMap.size();
} finally {
lock.releaseSharedLock();
}
}
public Set<OCluster> getClusterInstances() {
final Set<OCluster> result = new HashSet<OCluster>();
lock.acquireSharedLock();
try {
// ADD ALL THE CLUSTERS
for (OCluster c : clusters)
if (c != null)
result.add(c);
} finally {
lock.releaseSharedLock();
}
return result;
}
/**
* Method that completes the cluster rename operation. <strong>IT WILL NOT RENAME A CLUSTER, IT JUST CHANGES THE NAME IN THE
* INTERNAL MAPPING</strong>
*/
public void renameCluster(final String iOldName, final String iNewName) {
clusterMap.put(iNewName, clusterMap.remove(iOldName));
}
protected int registerDataSegment(final OStorageDataConfiguration iConfig) throws IOException {
checkOpeness();
// CHECK FOR DUPLICATION OF NAMES
for (ODataLocal data : dataSegments)
if (data != null && data.getName().equals(iConfig.name)) {
// OVERWRITE CONFIG
data.config = iConfig;
return -1;
}
int pos = -1;
for (int i = 0; i < dataSegments.length; ++i)
if (dataSegments[i] == null) {
// RECYCLE POSITION
pos = i;
break;
}
if (pos == -1)
// ASSIGN LATEST
pos = dataSegments.length;
// CREATE AND ADD THE NEW REF SEGMENT
final ODataLocal segment = new ODataLocal(this, iConfig, pos);
if (pos == dataSegments.length)
dataSegments = OArrays.copyOf(dataSegments, dataSegments.length + 1);
dataSegments[pos] = segment;
return pos;
}
/**
* Create the cluster by reading the configuration received as argument and register it assigning it the higher serial id.
*
* @param iConfig
* A OStorageClusterConfiguration implementation, namely physical or logical
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException {
OCluster cluster = clusterMap.get(iConfig.getName());
if (cluster instanceof OClusterLocal && iConfig instanceof OStorageEHClusterConfiguration)
clusterMap.remove(iConfig.getName());
else if (cluster != null) {
if (cluster instanceof OClusterLocal) {
// ALREADY CONFIGURED, JUST OVERWRITE CONFIG
cluster.configure(this, iConfig);
}
return -1;
}
cluster = Orient.instance().getClusterFactory().createCluster(iConfig);
cluster.configure(this, iConfig);
return registerCluster(cluster);
}
/**
* Register the cluster internally.
*
* @param iCluster
* OCluster implementation
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int registerCluster(final OCluster iCluster) throws IOException {
final int id;
if (iCluster != null) {
// CHECK FOR DUPLICATION OF NAMES
if (clusterMap.containsKey(iCluster.getName()))
throw new OConfigurationException("Cannot add segment '" + iCluster.getName()
+ "' because it is already registered in database '" + name + "'");
// CREATE AND ADD THE NEW REF SEGMENT
clusterMap.put(iCluster.getName(), iCluster);
id = iCluster.getId();
} else
id = clusters.length;
clusters = OArrays.copyOf(clusters, clusters.length + 1);
clusters[id] = iCluster;
return id;
}
private void checkClusterSegmentIndexRange(final int iClusterId) {
if (iClusterId > clusters.length - 1)
throw new IllegalArgumentException("Cluster segment #" + iClusterId + " does not exist in database '" + name + "'");
}
protected OPhysicalPosition createRecord(final ODataLocal dataSegment, final OCluster cluster, final byte[] content,
final byte recordType, final ORecordId rid, final ORecordVersion recordVersion) {
assert (lock.assertExclusiveLockHold());
checkOpeness();
if (content == null)
throw new IllegalArgumentException("Record is null");
final long timer = Orient.instance().getProfiler().startChrono();
final OPhysicalPosition ppos = new OPhysicalPosition(-1, -1, recordType);
if (cluster.isHashBased()) {
if (rid.isNew()) {
if (OGlobalConfiguration.USE_NODE_ID_CLUSTER_POSITION.getValueAsBoolean()) {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.generateUniqueClusterPosition();
} else {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(positionGenerator++);
}
} else {
ppos.clusterPosition = rid.clusterPosition;
}
}
try {
if (!cluster.addPhysicalPosition(ppos))
throw new OStorageException("Record with given id " + new ORecordId(rid.clusterId, ppos.clusterPosition)
+ " already exists.");
rid.clusterPosition = ppos.clusterPosition;
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
ppos.dataSegmentId = dataSegment.getId();
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, ppos.dataSegmentPos);
if (recordVersion.getCounter() > 0 && recordVersion.compareTo(ppos.recordVersion) != 0) {
// OVERWRITE THE VERSION
cluster.updateVersion(rid.clusterPosition, recordVersion);
ppos.recordVersion = recordVersion;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException ioe) {
try {
if (ppos.clusterPosition != null && ppos.clusterPosition.compareTo(OClusterPosition.INVALID_POSITION) != 0)
cluster.removePhysicalPosition(ppos.clusterPosition);
} catch (IOException e) {
OLogManager.instance().error(this, "Error on removing physical position in cluster: " + cluster, e);
}
OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, ioe);
return null;
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_CREATE_RECORD, "Create a record in database", timer, "db.*.createRecord");
}
}
@Override
protected ORawBuffer readRecord(final OCluster iClusterSegment, final ORecordId iRid, boolean iAtomicLock, boolean loadTombstones) {
if (!iRid.isPersistent())
throw new IllegalArgumentException("Cannot read record " + iRid + " since the position is invalid in database '" + name
+ '\'');
// NOT FOUND: SEARCH IT IN THE STORAGE
final long timer = Orient.instance().getProfiler().startChrono();
// GET LOCK ONLY IF IT'S IN ATOMIC-MODE (SEE THE PARAMETER iAtomicLock)
// USUALLY BROWSING OPERATIONS (QUERY) AVOID ATOMIC LOCKING
// TO IMPROVE PERFORMANCES BY LOCKING THE ENTIRE CLUSTER FROM THE
// OUTSIDE.
if (iAtomicLock)
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.SHARED);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos != null && loadTombstones && ppos.recordVersion.isTombstone())
return new ORawBuffer(null, ppos.recordVersion, ppos.recordType);
if (ppos == null || !checkForRecordValidity(ppos))
// DELETED
return null;
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
return new ORawBuffer(data.getRecord(ppos.dataSegmentPos), ppos.recordVersion, ppos.recordType);
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.SHARED);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading record " + iRid + " (cluster: " + iClusterSegment + ')', e);
return null;
} finally {
if (iAtomicLock)
lock.releaseSharedLock();
Orient.instance().getProfiler().stopChrono(PROFILER_READ_RECORD, "Read a record from database", timer, "db.*.readRecord");
}
}
protected OPhysicalPosition updateRecord(final OCluster iClusterSegment, final ORecordId rid, final byte[] recordContent,
final ORecordVersion recordVersion, final byte iRecordType) {
assert (lock.assertExclusiveLockHold());
if (iClusterSegment == null)
throw new OStorageException("Cluster not defined for record: " + rid);
final long timer = Orient.instance().getProfiler().startChrono();
try {
// GET THE SHARED LOCK AND GET AN EXCLUSIVE LOCK AGAINST THE RECORD
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
// UPDATE IT
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (!checkForRecordValidity(ppos))
return null;
// VERSION CONTROL CHECK
switch (recordVersion.getCounter()) {
// DOCUMENT UPDATE, NO VERSION CONTROL
case -1:
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
break;
// DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION UPDATE
case -2:
break;
default:
// MVCC CONTROL AND RECORD UPDATE OR WRONG VERSION VALUE
if (recordVersion.getCounter() > -1) {
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (!recordVersion.equals(ppos.recordVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(rid, ppos.recordVersion, recordVersion, ORecordOperation.UPDATED);
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
} else {
// DOCUMENT ROLLBACKED
recordVersion.clearRollbackMode();
ppos.recordVersion.copyFrom(recordVersion);
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
}
}
if (ppos.recordType != iRecordType)
iClusterSegment.updateRecordType(rid.clusterPosition, iRecordType);
final long newDataSegmentOffset;
if (ppos.dataSegmentPos == -1)
// WAS EMPTY FIRST TIME, CREATE IT NOW
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).addRecord(rid, recordContent);
else
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).setRecord(ppos.dataSegmentPos, rid, recordContent);
if (newDataSegmentOffset != ppos.dataSegmentPos) {
// UPDATE DATA SEGMENT OFFSET WITH THE NEW PHYSICAL POSITION
iClusterSegment.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, newDataSegmentOffset);
ppos.dataSegmentPos = newDataSegmentOffset;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on updating record " + rid + " (cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_UPDATE_RECORD, "Update a record to database", timer, "db.*.updateRecord");
}
return null;
}
protected OPhysicalPosition deleteRecord(final OCluster iClusterSegment, final ORecordId iRid, final ORecordVersion iVersion,
boolean useTombstones) {
assert (lock.assertExclusiveLockHold());
final long timer = Orient.instance().getProfiler().startChrono();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos == null || ppos.dataSegmentId < 0 || (useTombstones && ppos.recordVersion.isTombstone()))
// ALREADY DELETED
return null;
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (iVersion.getCounter() > -1 && !ppos.recordVersion.equals(iVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(iRid, ppos.recordVersion, iVersion, ORecordOperation.DELETED);
if (!ppos.recordVersion.isTombstone() && ppos.dataSegmentPos > -1) {
try {
getDataSegmentById(ppos.dataSegmentId).deleteRecord(ppos.dataSegmentPos);
} catch (OIOException e) {
OLogManager.instance().error(this, "Cannot remove the record in data segment, however remove it from cluster", e);
}
}
if (useTombstones && iClusterSegment.hasTombstonesSupport())
iClusterSegment.convertToTombstone(iRid.clusterPosition);
else
iClusterSegment.removePhysicalPosition(iRid.clusterPosition);
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on deleting record " + iRid + "( cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler()
.stopChrono(PROFILER_DELETE_RECORD, "Delete a record from database", timer, "db.*.deleteRecord");
}
return null;
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
final OCluster cluster = getClusterById(recordId.clusterId);
modificationLock.requestModificationLock();
try {
final OPhysicalPosition ppos = deleteRecord(cluster, recordId, recordVersion, false);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (callback != null)
callback.call(recordId, returnValue);
return returnValue;
} finally {
modificationLock.releaseModificationLock();
}
}
private void installProfilerHooks() {
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holes", "Number of the holes in database", METRIC_TYPE.COUNTER,
new OProfilerHookValue() {
public Object getValue() {
return getHoles();
}
}, "db.*.data.holes");
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holeSize", "Size of the holes in database", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return getHoleSize();
}
}, "db.*.data.holeSize");
}
private void uninstallProfilerHooks() {
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holes");
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holeSize");
}
private void formatMessage(final boolean iVerbose, final OCommandOutputListener iListener, final String iMessage,
final Object... iArgs) {
if (iVerbose)
iListener.onMessage(String.format(iMessage, iArgs));
}
public void freeze(boolean throwException) {
modificationLock.prohibitModifications(throwException);
synch();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(true);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(true);
if (configuration != null)
configuration.setSoftlyClosed(true);
} catch (IOException e) {
throw new OStorageException("Error on freeze storage '" + name + "'", e);
}
}
public void release() {
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(false);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(false);
if (configuration != null)
configuration.setSoftlyClosed(false);
} catch (IOException e) {
throw new OStorageException("Error on release storage '" + name + "'", e);
}
modificationLock.allowModifications();
}
public boolean wasClusterSoftlyClosed(String clusterName) {
final OCluster indexCluster = clusterMap.get(clusterName);
return !(indexCluster instanceof OClusterLocal) || ((OClusterLocal) indexCluster).isSoftlyClosed();
}
@Override
public String getType() {
return OEngineLocal.NAME;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OStorageLocal.java
|
143 |
@Test
public class FloatSerializerTest {
private static final int FIELD_SIZE = 4;
private static final Float OBJECT = 3.14f;
private OFloatSerializer floatSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
floatSerializer = new OFloatSerializer();
}
public void testFieldSize() {
Assert.assertEquals(floatSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
floatSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(floatSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
floatSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(floatSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
floatSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(floatSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_FloatSerializerTest.java
|
501 |
public class CreateIndexAction extends IndicesAction<CreateIndexRequest, CreateIndexResponse, CreateIndexRequestBuilder> {
public static final CreateIndexAction INSTANCE = new CreateIndexAction();
public static final String NAME = "indices/create";
private CreateIndexAction() {
super(NAME);
}
@Override
public CreateIndexResponse newResponse() {
return new CreateIndexResponse();
}
@Override
public CreateIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new CreateIndexRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_create_CreateIndexAction.java
|
182 |
public class BroadleafPageController extends BroadleafAbstractController implements Controller {
protected static String MODEL_ATTRIBUTE_NAME="page";
@Override
public ModelAndView handleRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
ModelAndView model = new ModelAndView();
PageDTO page = (PageDTO) request.getAttribute(PageHandlerMapping.PAGE_ATTRIBUTE_NAME);
assert page != null;
model.addObject(MODEL_ATTRIBUTE_NAME, page);
model.setViewName(page.getTemplatePath());
return model;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_controller_BroadleafPageController.java
|
122 |
public abstract class AbstractPageRuleProcessor implements PageRuleProcessor {
private static final Log LOG = LogFactory.getLog(AbstractPageRuleProcessor.class);
private Map expressionCache = Collections.synchronizedMap(new LRUMap(1000));
private ParserContext parserContext;
private Map<String, String> contextClassNames = new HashMap<String, String> ();
/**
* Having a parser context that imports the classes speeds MVEL by up to 60%.
* @return
*/
protected ParserContext getParserContext() {
if (parserContext == null) {
parserContext = new ParserContext();
parserContext.addImport("MVEL", MVEL.class);
parserContext.addImport("MvelHelper", MvelHelper.class);
/* Getting errors when the following is in place.
for (String key : contextClassNames.keySet()) {
String className = contextClassNames.get(key);
try {
Class c = Class.forName(className);
parserContext.addImport(key, c);
} catch (ClassNotFoundException e) {
LOG.error("Error resolving classname while setting up MVEL context, rule processing based on the key " + key + " will not be optimized", e);
}
} */
}
return parserContext;
}
/**
* Helpful method for processing a boolean MVEL expression and associated arguments.
*
* Caches the expression in an LRUCache.
* @param expression
* @param vars
* @return the result of the expression
*/
protected Boolean executeExpression(String expression, Map<String, Object> vars) {
Serializable exp = (Serializable) expressionCache.get(expression);
vars.put("MVEL", MVEL.class);
if (exp == null) {
try {
exp = MVEL.compileExpression(expression, getParserContext());
} catch (CompileException ce) {
LOG.warn("Compile exception processing phrase: " + expression,ce);
return Boolean.FALSE;
}
expressionCache.put(expression, exp);
}
try {
return (Boolean) MVEL.executeExpression(exp, vars);
} catch (Exception e) {
LOG.error(e);
}
return false;
}
/**
* List of class names to add to the MVEL ParserContext.
*
* @return
* @see {@link ParserContext}
*/
public Map<String, String> getContextClassNames() {
return contextClassNames;
}
/**
* List of class names to add to the MVEL ParserContext.
*
* @return
* @see {@link ParserContext}
*/
public void setContextClassNames(Map<String, String> contextClassNames) {
this.contextClassNames = contextClassNames;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_service_AbstractPageRuleProcessor.java
|
5,346 |
public abstract class ValuesSourceMetricsAggregatorParser<S extends MetricsAggregation> implements Aggregator.Parser {
protected boolean requiresSortedValues() {
return false;
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
String field = null;
String script = null;
String scriptLang = null;
Map<String, Object> scriptParams = null;
boolean assumeSorted = false;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
if ("script_values_sorted".equals(currentFieldName)) {
assumeSorted = parser.booleanValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (script != null) {
config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
}
if (!assumeSorted && requiresSortedValues()) {
config.ensureSorted(true);
}
if (field == null) {
return createFactory(aggregationName, config);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return createFactory(aggregationName, config);
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return createFactory(aggregationName, config);
}
protected abstract AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<NumericValuesSource> config);
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_ValuesSourceMetricsAggregatorParser.java
|
180 |
public class OByteBufferUtilsTest {
private ByteBuffer buffer1;
private ByteBuffer buffer2;
@BeforeMethod
public void setUp() throws Exception {
buffer1 = ByteBuffer.allocate(10);
buffer2 = ByteBuffer.allocate(10);
}
@Test
public void testSplitShort() throws Exception {
short var = 42;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
short storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitLong() throws Exception {
long var = 42;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
long storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 2512513332512512344l;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitInt() throws Exception {
int var = 42;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
int storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251251333;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSpecialSplitShort() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 1);
ByteBuffer part2 = ByteBuffer.wrap(array, 1, 1);
ByteBuffer all = ByteBuffer.wrap(array, 0, 2);
short value = Short.MAX_VALUE;
OByteBufferUtils.splitShortToBuffers(part1, part2, value);
all.position(0);
short storedValue = all.getShort();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitInteger() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 2);
ByteBuffer part2 = ByteBuffer.wrap(array, 2, 2);
ByteBuffer all = ByteBuffer.wrap(array, 0, 4);
int value = Integer.MAX_VALUE;
OByteBufferUtils.splitIntToBuffers(part1, part2, value);
all.position(0);
int storedValue = all.getInt();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitLong() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 4);
ByteBuffer part2 = ByteBuffer.wrap(array, 4, 4);
ByteBuffer all = ByteBuffer.wrap(array, 0, 8);
long value = Long.MAX_VALUE;
OByteBufferUtils.splitLongToBuffers(part1, part2, value);
all.position(0);
long storedValue = all.getLong();
assertEquals(value, storedValue);
}
}
| 0true
|
core_src_test_java_com_orientechnologies_common_util_OByteBufferUtilsTest.java
|
3,204 |
public class ReplicationMessage<K, V>
implements IdentifiedDataSerializable {
private String name;
private K key;
private V value;
private VectorClock vectorClock;
private Member origin;
private int updateHash;
private long ttlMillis;
public ReplicationMessage() {
}
public ReplicationMessage(String name, K key, V v, VectorClock vectorClock, Member origin, int hash, long ttlMillis) {
this.name = name;
this.key = key;
this.value = v;
this.vectorClock = vectorClock;
this.origin = origin;
this.updateHash = hash;
this.ttlMillis = ttlMillis;
}
public String getName() {
return name;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public VectorClock getVectorClock() {
return vectorClock;
}
public Member getOrigin() {
return origin;
}
public long getTtlMillis() {
return ttlMillis;
}
public int getUpdateHash() {
return updateHash;
}
public boolean isRemove() {
return value == null;
}
public void writeData(ObjectDataOutput out)
throws IOException {
out.writeUTF(name);
out.writeObject(key);
out.writeObject(value);
vectorClock.writeData(out);
origin.writeData(out);
out.writeInt(updateHash);
out.writeLong(ttlMillis);
}
public void readData(ObjectDataInput in)
throws IOException {
name = in.readUTF();
key = (K) in.readObject();
value = (V) in.readObject();
vectorClock = new VectorClock();
vectorClock.readData(in);
origin = new MemberImpl();
origin.readData(in);
updateHash = in.readInt();
ttlMillis = in.readLong();
}
@Override
public int getFactoryId() {
return ReplicatedMapDataSerializerHook.F_ID;
}
@Override
public int getId() {
return ReplicatedMapDataSerializerHook.REPL_UPDATE_MESSAGE;
}
@Override
public String toString() {
return "ReplicationMessage{" + "key=" + key + ", value=" + value + ", vectorClock=" + vectorClock + ", origin=" + origin
+ ", updateHash=" + updateHash + ", ttlMillis=" + ttlMillis + '}';
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_messages_ReplicationMessage.java
|
377 |
public static class TestReducerFactory
implements ReducerFactory<String, Integer, Integer> {
@Override
public Reducer<String, Integer, Integer> newReducer(String key) {
return new TestReducer();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_DistributedMapperClientMapReduceTest.java
|
411 |
public class CleanImportsHandler extends AbstractHandler {
@Override
public Object execute(ExecutionEvent event)
throws ExecutionException {
CeylonEditor editor = (CeylonEditor) getCurrentEditor();
IDocument doc = editor.getCeylonSourceViewer().getDocument();
cleanImports(editor.getParseController(), doc);
return null;
}
public static void cleanImports(CeylonParseController cpc, IDocument doc) {
if (!isEnabled(cpc)) return;
Tree.CompilationUnit rootNode = cpc.getRootNode();
if (rootNode!=null) {
String imports = imports(rootNode, doc);
if (imports!=null &&
!(imports.trim().isEmpty() &&
rootNode.getImportList().getImports().isEmpty())) {
Tree.ImportList il = rootNode.getImportList();
int start;
int length;
String extra;
if (il==null || il.getImports().isEmpty()) {
start=0;
length=0;
extra=getDefaultLineDelimiter(doc);
}
else {
start = il.getStartIndex();
length = il.getStopIndex()-il.getStartIndex()+1;
extra="";
}
try {
if (!doc.get(start, length).equals(imports+extra)) {
DocumentChange change =
new DocumentChange("Organize Imports", doc);
change.setEdit(new ReplaceEdit(start, length, imports+extra));
try {
change.perform(new NullProgressMonitor());
}
catch (CoreException e) {
e.printStackTrace();
}
}
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
}
}
public static String imports(Node node, Tree.ImportList til,
IDocument doc) {
List<Declaration> unused = new ArrayList<Declaration>();
DetectUnusedImportsVisitor duiv =
new DetectUnusedImportsVisitor(unused);
til.visit(duiv);
node.visit(duiv);
return reorganizeImports(til, unused,
Collections.<Declaration>emptyList(), doc);
}
private static String imports(final Tree.CompilationUnit cu,
IDocument doc) {
List<Declaration> proposals = new ArrayList<Declaration>();
List<Declaration> unused = new ArrayList<Declaration>();
new ImportProposalsVisitor(cu, proposals).visit(cu);
new DetectUnusedImportsVisitor(unused).visit(cu);
return reorganizeImports(cu.getImportList(), unused, proposals, doc);
}
public static String imports(List<Declaration> proposed,
IDocument doc) {
return reorganizeImports(null,
Collections.<Declaration>emptyList(),
proposed, doc);
}
public static String reorganizeImports(Tree.ImportList til,
List<Declaration> unused, List<Declaration> proposed,
IDocument doc) {
Map<String,List<Tree.Import>> packages =
new TreeMap<String,List<Tree.Import>>();
if (til!=null) {
for (Tree.Import i: til.getImports()) {
String pn = packageName(i);
if (pn!=null) {
List<Tree.Import> is = packages.get(pn);
if (is==null) {
is = new ArrayList<Tree.Import>();
packages.put(pn, is);
}
is.add(i);
}
}
}
for (Declaration d: proposed) {
String pn = d.getUnit().getPackage().getNameAsString();
if (!packages.containsKey(pn)) {
packages.put(pn, Collections.<Tree.Import>emptyList());
}
}
StringBuilder builder = new StringBuilder();
String lastToplevel=null;
String delim = getDefaultLineDelimiter(doc);
for (Map.Entry<String, List<Tree.Import>> pack:
packages.entrySet()) {
String packageName = pack.getKey();
List<Tree.Import> imports = pack.getValue();
boolean hasWildcard = hasWildcard(imports);
List<Tree.ImportMemberOrType> list =
getUsedImportElements(imports, unused,
hasWildcard, packages);
if (hasWildcard || !list.isEmpty() ||
imports.isEmpty()) { //in this last case there is no existing import, but imports are proposed
lastToplevel = appendBreakIfNecessary(lastToplevel,
packageName, builder, doc);
Referenceable packageModel = imports.isEmpty() ?
null : //TODO: what to do in this case? look up the Package where?
imports.get(0).getImportPath().getModel();
String escapedPackageName = packageModel instanceof Package ?
escapePackageName((Package) packageModel) :
packageName;
if (builder.length()!=0) {
builder.append(delim);
}
builder.append("import ")
.append(escapedPackageName)
.append(" {");
appendImportElements(packageName, list, unused,
proposed, hasWildcard, builder, doc);
builder.append(delim).append("}");
}
}
return builder.toString();
}
private static boolean hasWildcard(List<Tree.Import> imports) {
boolean hasWildcard = false;
for (Tree.Import i: imports) {
hasWildcard = hasWildcard ||
i!=null && i.getImportMemberOrTypeList()
.getImportWildcard()!=null;
}
return hasWildcard;
}
private static String appendBreakIfNecessary(String lastToplevel,
String currentPackage, StringBuilder builder, IDocument doc) {
int di = currentPackage.indexOf('.');
String topLevel = di<0 ?
currentPackage:currentPackage.substring(0, di);
if (lastToplevel!=null && !topLevel.equals(lastToplevel)) {
builder.append(getDefaultLineDelimiter(doc));
}
return topLevel;
}
private static void appendImportElements(String packageName,
List<Tree.ImportMemberOrType> elements, List<Declaration> unused,
List<Declaration> proposed, boolean hasWildcard,
StringBuilder builder, IDocument doc) {
String indent = getDefaultIndent();
for (Tree.ImportMemberOrType i: elements) {
if (i.getDeclarationModel()!=null &&
i.getIdentifier().getErrors().isEmpty() &&
i.getErrors().isEmpty()) {
builder.append(getDefaultLineDelimiter(doc))
.append(indent);
if (!i.getImportModel().getAlias()
.equals(i.getDeclarationModel().getName())) {
String escapedAlias = escapeAliasedName(i.getDeclarationModel(),
i.getImportModel().getAlias());
builder.append(escapedAlias).append("=");
}
builder.append(escapeName(i.getDeclarationModel()));
appendNestedImportElements(i, unused, builder, doc);
builder.append(",");
}
}
for (Declaration d: proposed) {
if (d.getUnit().getPackage().getNameAsString()
.equals(packageName)) {
builder.append(getDefaultLineDelimiter(doc))
.append(indent);
builder.append(escapeName(d)).append(",");
}
}
if (hasWildcard) {
builder.append(getDefaultLineDelimiter(doc))
.append(indent)
.append("...");
}
else {
// remove trailing ,
builder.setLength(builder.length()-1);
}
}
private static void appendNestedImportElements(Tree.ImportMemberOrType imt,
List<Declaration> unused, StringBuilder builder, IDocument doc) {
String indent = getDefaultIndent();
if (imt.getImportMemberOrTypeList()!=null) {
builder.append(" {");
boolean found=false;
for (Tree.ImportMemberOrType nimt:
imt.getImportMemberOrTypeList()
.getImportMemberOrTypes()) {
if (nimt.getDeclarationModel()!=null &&
nimt.getIdentifier().getErrors().isEmpty() &&
nimt.getErrors().isEmpty()) {
if (!unused.contains(nimt.getDeclarationModel())) {
found=true;
builder.append(getDefaultLineDelimiter(doc))
.append(indent).append(indent);
if (!nimt.getImportModel().getAlias()
.equals(nimt.getDeclarationModel().getName())) {
builder.append(nimt.getImportModel().getAlias())
.append("=");
}
builder.append(nimt.getDeclarationModel().getName())
.append(",");
}
}
}
if (imt.getImportMemberOrTypeList()
.getImportWildcard()!=null) {
found=true;
builder.append(getDefaultLineDelimiter(doc))
.append(indent).append(indent)
.append("...,");
}
if (found) {
// remove trailing ","
builder.setLength(builder.length()-1);
builder.append(getDefaultLineDelimiter(doc))
.append(indent)
.append('}');
} else {
// remove the " {"
builder.setLength(builder.length()-2);
}
}
}
private static boolean hasRealErrors(Node node) {
for (Message m: node.getErrors()) {
if (m instanceof AnalysisError) {
return true;
}
}
return false;
}
private static List<Tree.ImportMemberOrType> getUsedImportElements(
List<Tree.Import> imports, List<Declaration> unused,
boolean hasWildcard, Map<String, List<Tree.Import>> packages) {
List<Tree.ImportMemberOrType> list =
new ArrayList<Tree.ImportMemberOrType>();
for (Tree.Import ti: imports) {
for (Tree.ImportMemberOrType imt:
ti.getImportMemberOrTypeList()
.getImportMemberOrTypes()) {
Declaration dm = imt.getDeclarationModel();
if (dm!=null &&
!hasRealErrors(imt.getIdentifier()) &&
!hasRealErrors(imt)) {
Tree.ImportMemberOrTypeList nimtl = imt.getImportMemberOrTypeList();
if (unused.contains(dm)) {
if (nimtl!=null) {
for (Tree.ImportMemberOrType nimt:
nimtl.getImportMemberOrTypes()) {
Declaration ndm = nimt.getDeclarationModel();
if (ndm!=null &&
!hasRealErrors(nimt.getIdentifier()) &&
!hasRealErrors(nimt)) {
if (!unused.contains(ndm)) {
list.add(imt);
break;
}
}
}
if (nimtl.getImportWildcard()!=null) {
list.add(imt);
}
}
}
else {
if (!hasWildcard ||
imt.getAlias()!=null ||
nimtl!=null ||
preventAmbiguityDueWildcards(dm, packages)) {
list.add(imt);
}
}
}
}
}
return list;
}
private static boolean preventAmbiguityDueWildcards(Declaration d,
Map<String, List<Tree.Import>> importsMap) {
Module module = d.getUnit().getPackage().getModule();
String containerName = d.getContainer().getQualifiedNameString();
for (Map.Entry<String, List<Tree.Import>> importEntry:
importsMap.entrySet()) {
String packageName = importEntry.getKey();
List<Tree.Import> importList = importEntry.getValue();
if (!packageName.equals(containerName) &&
hasWildcard(importList)) {
Package p2 = module.getPackage(packageName);
if (p2 != null) {
Declaration d2 = p2.getMember(d.getName(), null, false);
if (d2!=null &&
d2.isToplevel() &&
d2.isShared() &&
!d2.isAnonymous() &&
!isImportedWithAlias(d2, importList)) {
return true;
}
}
}
}
return false;
}
private static boolean isImportedWithAlias(Declaration d,
List<Tree.Import> importList) {
for (Tree.Import i: importList) {
for (Tree.ImportMemberOrType imt:
i.getImportMemberOrTypeList()
.getImportMemberOrTypes()) {
if (d.getName().equals(imt.getIdentifier().getText()) &&
imt.getAlias() != null) {
return true;
}
}
}
return false;
}
private static String packageName(Tree.Import i) {
if (i.getImportPath()!=null) {
return formatPath(i.getImportPath().getIdentifiers());
}
else {
return null;
}
}
@Override
public boolean isEnabled() {
IEditorPart editor = getCurrentEditor();
if (super.isEnabled() &&
editor instanceof CeylonEditor &&
editor.getEditorInput() instanceof IFileEditorInput) {
CeylonParseController cpc =
((CeylonEditor) editor).getParseController();
return isEnabled(cpc);
}
else {
return false;
}
}
public static boolean isEnabled(CeylonParseController cpc) {
return cpc!=null &&
cpc.getStage().ordinal()>=Stage.TYPE_ANALYSIS.ordinal() &&
cpc.getRootNode()!=null;
}
public static Declaration select(List<Declaration> proposals) {
CeylonEditor editor = (CeylonEditor) getCurrentEditor();
ImportSelectionDialog fid =
new ImportSelectionDialog(editor.getSite().getShell(),
proposals);
if (fid.open() == Window.OK) {
return (Declaration) fid.getFirstResult();
}
else {
return null;
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_imports_CleanImportsHandler.java
|
207 |
private class ClientPacketProcessor implements Runnable {
final ClientPacket packet;
ClientPacketProcessor(ClientPacket packet) {
this.packet = packet;
}
@Override
public void run() {
final ClientConnection conn = (ClientConnection) packet.getConn();
final ClientResponse clientResponse = getSerializationService().toObject(packet.getData());
final int callId = clientResponse.getCallId();
final Data response = clientResponse.getResponse();
if (clientResponse.isEvent()) {
handleEvent(response, callId, conn);
} else {
handlePacket(response, clientResponse.isError(), callId, conn);
}
conn.decrementPacketCount();
}
private void handlePacket(Object response, boolean isError, int callId, ClientConnection conn) {
final ClientCallFuture future = conn.deRegisterCallId(callId);
if (future == null) {
LOGGER.warning("No call for callId: " + callId + ", response: " + response);
return;
}
if (isError) {
response = getSerializationService().toObject(response);
}
future.notify(response);
}
private void handleEvent(Data event, int callId, ClientConnection conn) {
final EventHandler eventHandler = conn.getEventHandler(callId);
final Object eventObject = getSerializationService().toObject(event);
if (eventHandler == null) {
LOGGER.warning("No eventHandler for callId: " + callId + ", event: " + eventObject + ", conn: " + conn);
return;
}
eventHandler.handle(eventObject);
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientConnectionManagerImpl.java
|
461 |
public abstract class IndicesAction<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
extends GenericAction<Request, Response> {
protected IndicesAction(String name) {
super(name);
}
public abstract RequestBuilder newRequestBuilder(IndicesAdminClient client);
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_IndicesAction.java
|
244 |
static final Comparator<Pair<Long,BytesRef>> weightComparator = new Comparator<Pair<Long,BytesRef>> () {
@Override
public int compare(Pair<Long,BytesRef> left, Pair<Long,BytesRef> right) {
return left.output1.compareTo(right.output1);
}
};
| 0true
|
src_main_java_org_apache_lucene_search_suggest_analyzing_XAnalyzingSuggester.java
|
6,323 |
public static class Names {
public static final String SAME = "same";
public static final String GENERIC = "generic";
public static final String GET = "get";
public static final String INDEX = "index";
public static final String BULK = "bulk";
public static final String SEARCH = "search";
public static final String SUGGEST = "suggest";
public static final String PERCOLATE = "percolate";
public static final String MANAGEMENT = "management";
public static final String FLUSH = "flush";
public static final String MERGE = "merge";
public static final String REFRESH = "refresh";
public static final String WARMER = "warmer";
public static final String SNAPSHOT = "snapshot";
public static final String OPTIMIZE = "optimize";
}
| 1no label
|
src_main_java_org_elasticsearch_threadpool_ThreadPool.java
|
827 |
getDatabase().getStorage().callInLock(new Callable<Object>() {
@Override
public Object call() throws Exception {
final OClass cls = classes.get(key);
if (cls == null)
throw new OSchemaException("Class " + iClassName + " was not found in current database");
if (cls.getBaseClasses().hasNext())
throw new OSchemaException("Class " + iClassName
+ " cannot be dropped because it has sub classes. Remove the dependencies before trying to drop it again");
final StringBuilder cmd = new StringBuilder("drop class ");
cmd.append(iClassName);
Object result = getDatabase().command(new OCommandSQL(cmd.toString())).execute();
if (result instanceof Boolean && (Boolean) result) {
classes.remove(key);
}
getDatabase().reload();
reload();
return null;
}
}, true);
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OSchemaShared.java
|
144 |
public static class Order {
public static final int Rules = 1000;
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java
|
1,513 |
@Component("blCategoriesProcessor")
public class CategoriesProcessor extends AbstractModelVariableModifierProcessor {
@Resource(name = "blCatalogService")
protected CatalogService catalogService;
/**
* Sets the name of this processor to be used in Thymeleaf template
*/
public CategoriesProcessor() {
super("categories");
}
@Override
public int getPrecedence() {
return 10000;
}
@Override
protected void modifyModelAttributes(Arguments arguments, Element element) {
String resultVar = element.getAttributeValue("resultVar");
String parentCategory = element.getAttributeValue("parentCategory");
String unparsedMaxResults = element.getAttributeValue("maxResults");
// TODO: Potentially write an algorithm that will pick the minimum depth category
// instead of the first category in the list
List<Category> categories = catalogService.findCategoriesByName(parentCategory);
if (categories != null && categories.size() > 0) {
// gets child categories in order ONLY if they are in the xref table and active
List<CategoryXref> subcategories = categories.get(0).getChildCategoryXrefs();
if (subcategories != null && !subcategories.isEmpty()) {
if (StringUtils.isNotEmpty(unparsedMaxResults)) {
int maxResults = Integer.parseInt(unparsedMaxResults);
if (subcategories.size() > maxResults) {
subcategories = subcategories.subList(0, maxResults);
}
}
}
List<Category> results = new ArrayList<Category>(subcategories.size());
for (CategoryXref xref : subcategories) {
results.add(xref.getSubCategory());
}
addToModel(arguments, resultVar, results);
}
}
}
| 1no label
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_processor_CategoriesProcessor.java
|
749 |
public class OBonsaiBucketAbstract extends ODurablePage {
public OBonsaiBucketAbstract(ODirectMemoryPointer pagePointer, TrackMode trackMode) {
super(pagePointer, trackMode);
}
protected void setBucketPointer(int pageOffset, OBonsaiBucketPointer value) throws IOException {
setLongValue(pageOffset, value.getPageIndex());
setIntValue(pageOffset + OLongSerializer.LONG_SIZE, value.getPageOffset());
}
protected OBonsaiBucketPointer getBucketPointer(int freePointer) {
final long pageIndex = getLongValue(freePointer);
final int pageOffset = getIntValue(freePointer + OLongSerializer.LONG_SIZE);
return new OBonsaiBucketPointer(pageIndex, pageOffset);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OBonsaiBucketAbstract.java
|
114 |
public interface PageTemplate extends Serializable {
public Long getId();
public void setId(Long id);
public String getTemplateName();
public void setTemplateName(String templateName);
public String getTemplateDescription();
public void setTemplateDescription(String templateDescription);
public String getTemplatePath();
public void setTemplatePath(String templatePath);
public Locale getLocale();
public void setLocale(Locale locale);
public List<FieldGroup> getFieldGroups();
public void setFieldGroups(List<FieldGroup> fieldGroups);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageTemplate.java
|
10 |
@SuppressWarnings("serial")
public class OLimitedMap<K, V> extends LinkedHashMap<K, V> {
protected final int limit;
public OLimitedMap(final int initialCapacity, final float loadFactor, final int limit) {
super(initialCapacity, loadFactor, true);
this.limit = limit;
}
@Override
protected boolean removeEldestEntry(final Map.Entry<K, V> eldest) {
return limit > 0 ? size() - limit > 0 : false;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OLimitedMap.java
|
6,090 |
clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
RestoreInfo restoreInfo = null;
@Override
public ClusterState execute(ClusterState currentState) {
// Check if another restore process is already running - cannot run two restore processes at the
// same time
RestoreMetaData restoreMetaData = currentState.metaData().custom(RestoreMetaData.TYPE);
if (restoreMetaData != null && !restoreMetaData.entries().isEmpty()) {
throw new ConcurrentSnapshotExecutionException(snapshotId, "Restore process is already running in this cluster");
}
// Updating cluster state
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
if (!metaData.indices().isEmpty()) {
// We have some indices to restore
ImmutableMap.Builder<ShardId, RestoreMetaData.ShardRestoreStatus> shards = ImmutableMap.builder();
for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
// Make sure that index was fully snapshotted - don't restore
if (failed(snapshot, index)) {
throw new SnapshotRestoreException(snapshotId, "index [" + index + "] wasn't fully snapshotted - cannot restore");
}
RestoreSource restoreSource = new RestoreSource(snapshotId, index);
String renamedIndex = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndex);
if (currentIndexMetaData == null) {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
createIndexService.validateIndexName(renamedIndex, currentState);
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex);
IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
rtBuilder.addAsNewRestore(updatedIndexMetaData, restoreSource);
mdBuilder.put(updatedIndexMetaData, true);
} else {
// Index exist - checking that it's closed
if (currentIndexMetaData.state() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] because it's open");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
"] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
// Index exists and it's closed - open it in metadata and start recovery
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(currentIndexMetaData).state(IndexMetaData.State.OPEN);
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build();
rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);
blocks.removeIndexBlock(index, INDEX_CLOSED_BLOCK);
mdBuilder.put(updatedIndexMetaData, true);
}
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
shards.put(new ShardId(renamedIndex, shard), new RestoreMetaData.ShardRestoreStatus(clusterService.state().nodes().localNodeId()));
}
}
RestoreMetaData.Entry restoreEntry = new RestoreMetaData.Entry(snapshotId, RestoreMetaData.State.INIT, ImmutableList.copyOf(renamedIndices.keySet()), shards.build());
mdBuilder.putCustom(RestoreMetaData.TYPE, new RestoreMetaData(restoreEntry));
}
// Restore global state if needed
if (request.includeGlobalState()) {
if (metaData.persistentSettings() != null) {
mdBuilder.persistentSettings(metaData.persistentSettings());
}
if (metaData.templates() != null) {
// TODO: Should all existing templates be deleted first?
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
mdBuilder.put(cursor.value);
}
}
if (metaData.customs() != null) {
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
// Don't restore repositories while we are working with them
// TODO: Should we restore them at the end?
mdBuilder.putCustom(cursor.key, cursor.value);
}
}
}
}
if (metaData.indices().isEmpty()) {
// We don't have any indices to restore - we are done
restoreInfo = new RestoreInfo(request.name(), ImmutableList.<String>of(), 0, 0);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocks).routingTable(rtBuilder).build();
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}] failed to restore snapshot", t, snapshotId);
listener.onFailure(t);
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(restoreInfo);
}
});
| 1no label
|
src_main_java_org_elasticsearch_snapshots_RestoreService.java
|
86 |
nodeEngine.getExecutionService().schedule(new Runnable() {
public void run() {
if (connection.live()) {
try {
connection.close();
} catch (Throwable e) {
logger.warning("While closing client connection: " + e.toString());
}
}
}
}, DESTROY_ENDPOINT_DELAY_MS, TimeUnit.MILLISECONDS);
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_ClientEngineImpl.java
|
634 |
public class IndicesStatusRequest extends BroadcastOperationRequest<IndicesStatusRequest> {
private boolean recovery = false;
private boolean snapshot = false;
public IndicesStatusRequest() {
this(Strings.EMPTY_ARRAY);
}
public IndicesStatusRequest(String... indices) {
super(indices);
}
/**
* Should the status include recovery information. Defaults to <tt>false</tt>.
*/
public IndicesStatusRequest recovery(boolean recovery) {
this.recovery = recovery;
return this;
}
public boolean recovery() {
return this.recovery;
}
/**
* Should the status include recovery information. Defaults to <tt>false</tt>.
*/
public IndicesStatusRequest snapshot(boolean snapshot) {
this.snapshot = snapshot;
return this;
}
public boolean snapshot() {
return this.snapshot;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(recovery);
out.writeBoolean(snapshot);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
recovery = in.readBoolean();
snapshot = in.readBoolean();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_IndicesStatusRequest.java
|
126 |
public abstract class ClientRequest implements Portable, SecureRequest {
protected int callId = -1;
protected transient ClientEngineImpl clientEngine;
protected transient Object service;
protected transient ClientEndpoint endpoint;
/**
* Some request needs to use a single connection like transaction requests and
* {@link com.hazelcast.cluster.client.ClientPingRequest}
* if true then request will not be retried
*/
private transient boolean singleConnection;
/**
* mark this request as SingleConnection
*/
public void setSingleConnection() {
this.singleConnection = true;
}
/**
* @return true if this request is SingleConnection false otherwise
*/
public boolean isSingleConnection() {
return singleConnection;
}
abstract void process() throws Exception;
public ClientEngine getClientEngine() {
return clientEngine;
}
final void setClientEngine(ClientEngineImpl clientEngine) {
this.clientEngine = clientEngine;
}
public <S> S getService() {
return (S) service;
}
final void setService(Object service) {
this.service = service;
}
public ClientEndpoint getEndpoint() {
return endpoint;
}
final void setEndpoint(ClientEndpoint endpoint) {
this.endpoint = endpoint;
}
public abstract String getServiceName();
public int getCallId() {
return callId;
}
public void setCallId(int callId) {
this.callId = callId;
}
@Override
public final void writePortable(PortableWriter writer) throws IOException {
writer.writeInt("cId", callId);
write(writer);
}
public void write(PortableWriter writer) throws IOException {
}
@Override
public final void readPortable(PortableReader reader) throws IOException {
callId = reader.readInt("cId");
read(reader);
}
public void read(PortableReader reader) throws IOException {
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientRequest.java
|
1,229 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SHIPPING_RATE")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@Deprecated
public class ShippingRateImpl implements ShippingRate {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "ShippingRateId")
@GenericGenerator(
name="ShippingRateId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="ShippingRateImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.pricing.domain.ShippingRateImpl")
}
)
@Column(name = "ID")
protected Long id;
@Column(name = "FEE_TYPE", nullable=false)
@Index(name="SHIPPINGRATE_FEE_INDEX", columnNames={"FEE_TYPE"})
protected String feeType;
@Column(name = "FEE_SUB_TYPE")
@Index(name="SHIPPINGRATE_FEESUB_INDEX", columnNames={"FEE_SUB_TYPE"})
protected String feeSubType;
@Column(name = "FEE_BAND", nullable=false)
protected Integer feeBand;
@Column(name = "BAND_UNIT_QTY", nullable=false)
protected BigDecimal bandUnitQuantity;
@Column(name = "BAND_RESULT_QTY", nullable=false)
protected BigDecimal bandResultQuantity;
@Column(name = "BAND_RESULT_PCT", nullable=false)
protected Integer bandResultPercent;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getFeeType() {
return feeType;
}
@Override
public void setFeeType(String feeType) {
this.feeType = feeType;
}
@Override
public String getFeeSubType() {
return feeSubType;
}
@Override
public void setFeeSubType(String feeSubType) {
this.feeSubType = feeSubType;
}
@Override
public Integer getFeeBand() {
return feeBand;
}
@Override
public void setFeeBand(Integer feeBand) {
this.feeBand = feeBand;
}
@Override
public BigDecimal getBandUnitQuantity() {
return bandUnitQuantity;
}
@Override
public void setBandUnitQuantity(BigDecimal bandUnitQuantity) {
this.bandUnitQuantity = bandUnitQuantity;
}
@Override
public BigDecimal getBandResultQuantity() {
return bandResultQuantity;
}
@Override
public void setBandResultQuantity(BigDecimal bandResultQuantity) {
this.bandResultQuantity = bandResultQuantity;
}
@Override
public Integer getBandResultPercent() {
return bandResultPercent;
}
@Override
public void setBandResultPercent(Integer bandResultPercent) {
this.bandResultPercent = bandResultPercent;
}
@Override
public String toString() {
return getFeeSubType() + " " + getBandResultQuantity() + " " + getBandResultPercent();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((bandResultPercent == null) ? 0 : bandResultPercent.hashCode());
result = prime * result + ((bandResultQuantity == null) ? 0 : bandResultQuantity.hashCode());
result = prime * result + ((bandUnitQuantity == null) ? 0 : bandUnitQuantity.hashCode());
result = prime * result + ((feeBand == null) ? 0 : feeBand.hashCode());
result = prime * result + ((feeSubType == null) ? 0 : feeSubType.hashCode());
result = prime * result + ((feeType == null) ? 0 : feeType.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ShippingRateImpl other = (ShippingRateImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (bandResultPercent == null) {
if (other.bandResultPercent != null)
return false;
} else if (!bandResultPercent.equals(other.bandResultPercent))
return false;
if (bandResultQuantity == null) {
if (other.bandResultQuantity != null)
return false;
} else if (!bandResultQuantity.equals(other.bandResultQuantity))
return false;
if (bandUnitQuantity == null) {
if (other.bandUnitQuantity != null)
return false;
} else if (!bandUnitQuantity.equals(other.bandUnitQuantity))
return false;
if (feeBand == null) {
if (other.feeBand != null)
return false;
} else if (!feeBand.equals(other.feeBand))
return false;
if (feeSubType == null) {
if (other.feeSubType != null)
return false;
} else if (!feeSubType.equals(other.feeSubType))
return false;
if (feeType == null) {
if (other.feeType != null)
return false;
} else if (!feeType.equals(other.feeType))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_domain_ShippingRateImpl.java
|
5 |
public class OIterableObject<T> implements Iterable<T>, OResettable, Iterator<T> {
private final T object;
private boolean alreadyRead = false;
public OIterableObject(T o) {
object = o;
}
/**
* Returns an iterator over a set of elements of type T.
*
* @return an Iterator.
*/
public Iterator<T> iterator() {
return this;
}
@Override
public void reset() {
alreadyRead = false;
}
@Override
public boolean hasNext() {
return !alreadyRead;
}
@Override
public T next() {
if (!alreadyRead) {
alreadyRead = true;
return object;
} else
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OIterableObject.java
|
33 |
@Test
public class OMVRBTreeCompositeTest {
protected OMVRBTree<OCompositeKey, Double> tree;
@BeforeMethod
public void beforeMethod() throws Exception {
tree = new OMVRBTreeMemory<OCompositeKey, Double>(4, 0.5f, 2);
for (double i = 1; i < 4; i++) {
for (double j = 1; j < 10; j++) {
final OCompositeKey compositeKey = new OCompositeKey();
compositeKey.addKey(i);
compositeKey.addKey(j);
tree.put(compositeKey, i * 4 + j);
}
}
}
@Test
public void testGetEntrySameKeys() {
OMVRBTreeEntry<OCompositeKey, Double> result = tree.getEntry(compositeKey(1.0, 2.0), OMVRBTree.PartialSearchMode.NONE);
assertEquals(result.getKey(), compositeKey(1.0, 2.0));
result = tree.getEntry(compositeKey(1.0, 2.0), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(result.getKey(), compositeKey(1.0, 2.0));
result = tree.getEntry(compositeKey(1.0, 2.0), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(result.getKey(), compositeKey(1.0, 2.0));
}
@Test
public void testGetEntryPartialKeys() {
OMVRBTreeEntry<OCompositeKey, Double> result = tree.getEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.NONE);
assertEquals(result.getKey().getKeys().get(0), 2.0);
result = tree.getEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(result.getKey(), compositeKey(2.0, 1.0));
result = tree.getEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(result.getKey(), compositeKey(2.0, 9.0));
}
@Test
public void testSubMapInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), true, compositeKey(3.0), true)
.descendingMap();
assertEquals(navigableMap.size(), 18);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapFromInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), true, compositeKey(3.0), false)
.descendingMap();
assertEquals(navigableMap.size(), 9);
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, j)));
}
}
@Test
public void testSubMapToInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), false, compositeKey(3.0), true)
.descendingMap();
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(3.0, i)));
}
}
@Test
public void testSubMapNonInclusiveDescending() {
ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), false, compositeKey(3.0), false)
.descendingMap();
assertEquals(navigableMap.size(), 0);
assertTrue(navigableMap.isEmpty());
navigableMap = tree.subMap(compositeKey(1.0), false, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, i)));
}
}
@Test
public void testSubMapInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), true, compositeKey(3.0), true);
assertEquals(navigableMap.size(), 18);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapFromInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), true, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 9);
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, j)));
}
}
@Test
public void testSubMapToInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), false, compositeKey(3.0), true);
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(3.0, i)));
}
}
@Test
public void testSubMapNonInclusive() {
ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0), false, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 0);
assertTrue(navigableMap.isEmpty());
navigableMap = tree.subMap(compositeKey(1.0), false, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, i)));
}
}
@Test
public void testSubMapInclusivePartialKey() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), true, compositeKey(3.0), true);
assertEquals(navigableMap.size(), 15);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
if (i == 2 && j < 4)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapFromInclusivePartialKey() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), true, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 6);
for (double j = 4; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, j)));
}
}
@Test
public void testSubMapToInclusivePartialKey() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), false, compositeKey(3.0), true);
assertEquals(navigableMap.size(), 14);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
if (i == 2 && j <= 4)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapNonInclusivePartial() {
ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), false, compositeKey(3.0), false);
assertEquals(navigableMap.size(), 5);
for (double i = 5; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, i)));
}
}
@Test
public void testSubMapInclusivePartialKeyDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), true, compositeKey(3.0), true)
.descendingMap();
assertEquals(navigableMap.size(), 15);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
if (i == 2 && j < 4)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapFromInclusivePartialKeyDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), true, compositeKey(3.0), false)
.descendingMap();
assertEquals(navigableMap.size(), 6);
for (double j = 4; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, j)));
}
}
@Test
public void testSubMapToInclusivePartialKeyDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), false, compositeKey(3.0), true)
.descendingMap();
assertEquals(navigableMap.size(), 14);
for (double i = 2; i <= 3; i++) {
for (double j = 1; j <= 9; j++) {
if (i == 2 && j <= 4)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
}
@Test
public void testSubMapNonInclusivePartialDescending() {
ONavigableMap<OCompositeKey, Double> navigableMap = tree.subMap(compositeKey(2.0, 4.0), false, compositeKey(3.0), false)
.descendingMap();
assertEquals(navigableMap.size(), 5);
for (double i = 5; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(2.0, i)));
}
}
@Test
public void testTailMapInclusivePartial() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0), true);
assertEquals(navigableMap.size(), 18);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testTailMapNonInclusivePartial() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0), false);
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(3.0, i)));
}
}
@Test
public void testTailMapInclusivePartialDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0), true).descendingMap();
assertEquals(navigableMap.size(), 18);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testTailMapNonInclusivePartialDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0), false).descendingMap();
assertEquals(navigableMap.size(), 9);
for (double i = 1; i <= 9; i++) {
assertTrue(navigableMap.containsKey(compositeKey(3.0, i)));
}
}
@Test
public void testTailMapInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0, 3.0), true);
assertEquals(navigableMap.size(), 16);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 2 && j < 3)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testTailMapNonInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0, 3.0), false);
assertEquals(navigableMap.size(), 15);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 2 && j <= 3)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testTailMapInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0, 3.0), true).descendingMap();
assertEquals(navigableMap.size(), 16);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 2 && j < 3)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testTailMapNonInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.tailMap(compositeKey(2.0, 3.0), false).descendingMap();
assertEquals(navigableMap.size(), 15);
for (double i = 2; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 2 && j <= 3)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapInclusivePartial() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0), true);
assertEquals(navigableMap.size(), 27);
for (double i = 1; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapNonInclusivePartial() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0), false);
assertEquals(navigableMap.size(), 18);
for (double i = 1; i < 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapInclusivePartialDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0), true).descendingMap();
assertEquals(navigableMap.size(), 27);
for (double i = 1; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapNonInclusivePartialDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0), false).descendingMap();
assertEquals(navigableMap.size(), 18);
for (double i = 1; i < 3; i++)
for (double j = 1; j <= 9; j++) {
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0, 2.0), true);
assertEquals(navigableMap.size(), 20);
for (double i = 1; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 3 && j > 2)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapNonInclusive() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0, 2.0), false);
assertEquals(navigableMap.size(), 19);
for (double i = 1; i < 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 3 && j >= 2)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0, 2.0), true).descendingMap();
assertEquals(navigableMap.size(), 20);
for (double i = 1; i <= 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 3 && j > 2)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testHeadMapNonInclusiveDescending() {
final ONavigableMap<OCompositeKey, Double> navigableMap = tree.headMap(compositeKey(3.0, 2.0), false).descendingMap();
assertEquals(navigableMap.size(), 19);
for (double i = 1; i < 3; i++)
for (double j = 1; j <= 9; j++) {
if (i == 3 && j >= 2)
continue;
assertTrue(navigableMap.containsKey(compositeKey(i, j)));
}
}
@Test
public void testGetCeilingEntryKeyExistPartial() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getCeilingEntry(compositeKey(3.0), OMVRBTree.PartialSearchMode.NONE);
assertEquals(entry.getKey().getKeys().get(0), 3.0);
entry = tree.getCeilingEntry(compositeKey(3.0), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(3.0, 9.0));
entry = tree.getCeilingEntry(compositeKey(3.0), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(3.0, 1.0));
}
@Test
public void testGetCeilingEntryKeyNotExistPartial() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getCeilingEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.NONE);
assertEquals(entry.getKey().getKeys().get(0), 2.0);
entry = tree.getCeilingEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(2.0, 9.0));
entry = tree.getCeilingEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(2.0, 1.0));
}
@Test
public void testGetFloorEntryKeyExistPartial() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getFloorEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.NONE);
assertEquals(entry.getKey().getKeys().get(0), 2.0);
entry = tree.getFloorEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(2.0, 9.0));
entry = tree.getFloorEntry(compositeKey(2.0), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(2.0, 1.0));
}
@Test
public void testGetFloorEntryKeyNotExistPartial() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getFloorEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.NONE);
assertEquals(entry.getKey().getKeys().get(0), 1.0);
entry = tree.getFloorEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(1.0, 9.0));
entry = tree.getFloorEntry(compositeKey(1.3), OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
assertEquals(entry.getKey(), compositeKey(1.0, 1.0));
}
@Test
public void testHigherEntryKeyExistPartial() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getHigherEntry(compositeKey(2.0));
assertEquals(entry.getKey(), compositeKey(3.0, 1.0));
}
@Test
public void testHigherEntryKeyNotExist() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getHigherEntry(compositeKey(1.3));
assertEquals(entry.getKey(), compositeKey(2.0, 1.0));
}
@Test
public void testHigherEntryNullResult() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getHigherEntry(compositeKey(12.0));
assertNull(entry);
}
@Test
public void testLowerEntryNullResult() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getLowerEntry(compositeKey(0.0));
assertNull(entry);
}
@Test
public void testLowerEntryKeyExist() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getLowerEntry(compositeKey(2.0));
assertEquals(entry.getKey(), compositeKey(1.0, 9.0));
}
@Test
public void testLowerEntryKeyNotExist() {
OMVRBTreeEntry<OCompositeKey, Double> entry = tree.getLowerEntry(compositeKey(2.5));
assertEquals(entry.getKey(), compositeKey(2.0, 9.0));
}
private OCompositeKey compositeKey(Comparable<?>... params) {
return new OCompositeKey(Arrays.asList(params));
}
}
| 0true
|
core_src_test_java_com_orientechnologies_common_collection_OMVRBTreeCompositeTest.java
|
2,074 |
public class MultipleEntryOperation extends AbstractMapOperation
implements BackupAwareOperation, PartitionAwareOperation {
private static final EntryEventType __NO_NEED_TO_FIRE_EVENT = null;
private EntryProcessor entryProcessor;
private Set<Data> keys;
MapEntrySet response;
public MultipleEntryOperation() {
}
public MultipleEntryOperation(String name, Set<Data> keys, EntryProcessor entryProcessor) {
super(name);
this.keys = keys;
this.entryProcessor = entryProcessor;
}
public void innerBeforeRun() {
final ManagedContext managedContext = getNodeEngine().getSerializationService().getManagedContext();
managedContext.initialize(entryProcessor);
}
@Override
public void run() throws Exception {
response = new MapEntrySet();
final InternalPartitionService partitionService = getNodeEngine().getPartitionService();
final RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name);
final LocalMapStatsImpl mapStats = mapService.getLocalMapStatsImpl(name);
MapEntrySimple entry;
for (Data key : keys) {
if (partitionService.getPartitionId(key) != getPartitionId())
continue;
long start = System.currentTimeMillis();
Object objectKey = mapService.toObject(key);
final Map.Entry<Data, Object> mapEntry = recordStore.getMapEntry(key);
final Object valueBeforeProcess = mapEntry.getValue();
final Object valueBeforeProcessObject = mapService.toObject(valueBeforeProcess);
entry = new MapEntrySimple(objectKey, valueBeforeProcessObject);
final Object result = entryProcessor.process(entry);
final Object valueAfterProcess = entry.getValue();
Data dataValue = null;
if (result != null) {
dataValue = mapService.toData(result);
response.add(new AbstractMap.SimpleImmutableEntry<Data, Data>(key, dataValue));
}
EntryEventType eventType;
if (valueAfterProcess == null) {
recordStore.remove(key);
mapStats.incrementRemoves(getLatencyFrom(start));
eventType = EntryEventType.REMOVED;
} else {
if (valueBeforeProcessObject == null) {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.ADDED;
}
// take this case as a read so no need to fire an event.
else if (!entry.isModified()) {
mapStats.incrementGets(getLatencyFrom(start));
eventType = __NO_NEED_TO_FIRE_EVENT;
} else {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.UPDATED;
}
// todo if this is a read only operation, record access operations should be done.
if (eventType != __NO_NEED_TO_FIRE_EVENT) {
recordStore.put(new AbstractMap.SimpleImmutableEntry<Data, Object>(key, valueAfterProcess));
}
}
if (eventType != __NO_NEED_TO_FIRE_EVENT) {
final Data oldValue = mapService.toData(valueBeforeProcess);
final Data value = mapService.toData(valueAfterProcess);
mapService.publishEvent(getCallerAddress(), name, eventType, key, oldValue, value);
if (mapService.isNearCacheAndInvalidationEnabled(name)) {
mapService.invalidateAllNearCaches(name, key);
}
if (mapContainer.getWanReplicationPublisher() != null && mapContainer.getWanMergePolicy() != null) {
if (EntryEventType.REMOVED.equals(eventType)) {
mapService.publishWanReplicationRemove(name, key, Clock.currentTimeMillis());
} else {
Record record = recordStore.getRecord(key);
Data tempValue = mapService.toData(dataValue);
final SimpleEntryView entryView = mapService.createSimpleEntryView(key, tempValue, record);
mapService.publishWanReplicationUpdate(name, entryView);
}
}
}
}
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public Object getResponse() {
return response;
}
@Override
public String toString() {
return "MultipleEntryOperation{}";
}
@Override
public boolean shouldBackup() {
return entryProcessor.getBackupProcessor() != null;
}
@Override
public int getSyncBackupCount() {
return 0;
}
@Override
public int getAsyncBackupCount() {
return mapContainer.getTotalBackupCount();
}
@Override
public Operation getBackupOperation() {
EntryBackupProcessor backupProcessor = entryProcessor.getBackupProcessor();
return backupProcessor != null ? new MultipleEntryBackupOperation(name, keys, backupProcessor) : null;
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
entryProcessor = in.readObject();
int size = in.readInt();
keys = new HashSet<Data>(size);
for (int i = 0; i < size; i++) {
Data key = new Data();
key.readData(in);
keys.add(key);
}
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(entryProcessor);
out.writeInt(keys.size());
for (Data key : keys) {
key.writeData(out);
}
}
private long getLatencyFrom(long begin) {
return Clock.currentTimeMillis() - begin;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_MultipleEntryOperation.java
|
5 |
public class BrowserInformationControl extends AbstractInformationControl
implements IInformationControlExtension2, IDelayedInputChangeProvider {
/**
* Tells whether the SWT Browser widget and hence this information
* control is available.
*
* @param parent the parent component used for checking or <code>null</code> if none
* @return <code>true</code> if this control is available
*/
public static boolean isAvailable(Composite parent) {
if (!fgAvailabilityChecked) {
try {
Browser browser= new Browser(parent, SWT.NONE);
browser.dispose();
fgIsAvailable= true;
Slider sliderV= new Slider(parent, SWT.VERTICAL);
Slider sliderH= new Slider(parent, SWT.HORIZONTAL);
int width= sliderV.computeSize(SWT.DEFAULT, SWT.DEFAULT).x;
int height= sliderH.computeSize(SWT.DEFAULT, SWT.DEFAULT).y;
fgScrollBarSize= new Point(width, height);
sliderV.dispose();
sliderH.dispose();
} catch (SWTError er) {
fgIsAvailable= false;
} finally {
fgAvailabilityChecked= true;
}
}
return fgIsAvailable;
}
/**
* Minimal size constraints.
* @since 3.2
*/
private static final int MIN_WIDTH= 80;
private static final int MIN_HEIGHT= 50;
/**
* Availability checking cache.
*/
private static boolean fgIsAvailable= false;
private static boolean fgAvailabilityChecked= false;
/**
* Cached scroll bar width and height
* @since 3.4
*/
private static Point fgScrollBarSize;
/** The control's browser widget */
private Browser fBrowser;
/** Tells whether the browser has content */
private boolean fBrowserHasContent;
/** Text layout used to approximate size of content when rendered in browser */
private TextLayout fTextLayout;
/** Bold text style */
private TextStyle fBoldStyle;
private BrowserInput fInput;
/**
* <code>true</code> iff the browser has completed loading of the last
* input set via {@link #setInformation(String)}.
* @since 3.4
*/
private boolean fCompleted= false;
/**
* The listener to be notified when a delayed location changing event happened.
* @since 3.4
*/
private IInputChangedListener fDelayedInputChangeListener;
/**
* The listeners to be notified when the input changed.
* @since 3.4
*/
private ListenerList/*<IInputChangedListener>*/fInputChangeListeners= new ListenerList(ListenerList.IDENTITY);
/**
* The symbolic name of the font used for size computations, or <code>null</code> to use dialog font.
* @since 3.4
*/
private final String fSymbolicFontName;
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param resizable <code>true</code> if the control should be resizable
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
boolean resizable) {
super(parent, resizable);
fSymbolicFontName= symbolicFontName;
create();
}
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param statusFieldText the text to be used in the optional status field
* or <code>null</code> if the status field should be hidden
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
String statusFieldText) {
super(parent, statusFieldText);
fSymbolicFontName= symbolicFontName;
create();
}
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param toolBarManager the manager or <code>null</code> if toolbar is not desired
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
ToolBarManager toolBarManager) {
super(parent, toolBarManager);
fSymbolicFontName= symbolicFontName;
create();
}
@Override
protected void createContent(Composite parent) {
fBrowser= new Browser(parent, SWT.NONE);
fBrowser.setJavascriptEnabled(false);
Display display= getShell().getDisplay();
fBrowser.setForeground(display.getSystemColor(SWT.COLOR_INFO_FOREGROUND));
fBrowser.setBackground(display.getSystemColor(SWT.COLOR_INFO_BACKGROUND));
//fBrowser.setBackground(color);
fBrowser.addProgressListener(new ProgressAdapter() {
@Override
public void completed(ProgressEvent event) {
fCompleted= true;
}
});
fBrowser.addOpenWindowListener(new OpenWindowListener() {
@Override
public void open(WindowEvent event) {
event.required= true; // Cancel opening of new windows
}
});
// Replace browser's built-in context menu with none
fBrowser.setMenu(new Menu(getShell(), SWT.NONE));
createTextLayout();
}
/**
* {@inheritDoc}
* @deprecated use {@link #setInput(Object)}
*/
@Override
public void setInformation(final String content) {
setInput(new BrowserInput(null) {
@Override
public String getHtml() {
return content;
}
@Override
public String getInputName() {
return "";
}
});
}
/**
* {@inheritDoc} This control can handle {@link String} and
* {@link BrowserInput}.
*/
@Override
public void setInput(Object input) {
Assert.isLegal(input == null ||
input instanceof String ||
input instanceof BrowserInput);
if (input instanceof String) {
setInformation((String)input);
return;
}
fInput= (BrowserInput) input;
String content= null;
if (fInput != null)
content= fInput.getHtml();
fBrowserHasContent= content != null && content.length() > 0;
if (!fBrowserHasContent)
content= "<html><body ></html>"; //$NON-NLS-1$
boolean RTL= (getShell().getStyle() & SWT.RIGHT_TO_LEFT) != 0;
boolean resizable= isResizable();
// The default "overflow:auto" would not result in a predictable width for the client area
// and the re-wrapping would cause visual noise
String[] styles= null;
if (RTL && resizable)
styles= new String[] { "direction:rtl;", "overflow:scroll;", "word-wrap:break-word;" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
else if (RTL && !resizable)
styles= new String[] { "direction:rtl;", "overflow:hidden;", "word-wrap:break-word;" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
else if (!resizable)
//XXX: In IE, "word-wrap: break-word;" causes bogus wrapping even in non-broken words :-(see e.g. Javadoc of String).
// Re-check whether we really still need this now that the Javadoc Hover header already sets this style.
styles= new String[] { "overflow:hidden;"/*, "word-wrap: break-word;"*/}; //$NON-NLS-1$
else
styles= new String[] { "overflow:scroll;" }; //$NON-NLS-1$
StringBuilder buffer= new StringBuilder(content);
HTMLPrinter.insertStyles(buffer, styles);
content= buffer.toString();
/*
* XXX: Should add some JavaScript here that shows something like
* "(continued...)" or "..." at the end of the visible area when the page overflowed
* with "overflow:hidden;".
*/
fCompleted= false;
fBrowser.setText(content);
Object[] listeners= fInputChangeListeners.getListeners();
for (int i= 0; i < listeners.length; i++)
((IInputChangedListener)listeners[i]).inputChanged(fInput);
}
@Override
public void setVisible(boolean visible) {
Shell shell= getShell();
if (shell.isVisible() == visible)
return;
if (!visible) {
super.setVisible(false);
setInput(null);
return;
}
/*
* The Browser widget flickers when made visible while it is not completely loaded.
* The fix is to delay the call to setVisible until either loading is completed
* (see ProgressListener in constructor), or a timeout has been reached.
*/
final Display display= shell.getDisplay();
// Make sure the display wakes from sleep after timeout:
display.timerExec(100, new Runnable() {
@Override
public void run() {
fCompleted= true;
}
});
while (!fCompleted) {
// Drive the event loop to process the events required to load the browser widget's contents:
if (!display.readAndDispatch()) {
display.sleep();
}
}
shell= getShell();
if (shell == null || shell.isDisposed())
return;
/*
* Avoids flickering when replacing hovers, especially on Vista in ON_CLICK mode.
* Causes flickering on GTK. Carbon does not care.
*/
if ("win32".equals(SWT.getPlatform())) //$NON-NLS-1$
shell.moveAbove(null);
super.setVisible(true);
}
@Override
public void setSize(int width, int height) {
fBrowser.setRedraw(false); // avoid flickering
try {
super.setSize(width, height);
} finally {
fBrowser.setRedraw(true);
}
}
/**
* Creates and initializes the text layout used
* to compute the size hint.
*
* @since 3.2
*/
private void createTextLayout() {
fTextLayout= new TextLayout(fBrowser.getDisplay());
// Initialize fonts
String symbolicFontName= fSymbolicFontName == null ? JFaceResources.DIALOG_FONT : fSymbolicFontName;
Font font= JFaceResources.getFont(symbolicFontName);
fTextLayout.setFont(font);
fTextLayout.setWidth(-1);
font= JFaceResources.getFontRegistry().getBold(symbolicFontName);
fBoldStyle= new TextStyle(font, null, null);
// Compute and set tab width
fTextLayout.setText(" "); //$NON-NLS-1$
int tabWidth= fTextLayout.getBounds().width;
fTextLayout.setTabs(new int[] { tabWidth });
fTextLayout.setText(""); //$NON-NLS-1$
}
@Override
protected void handleDispose() {
if (fTextLayout != null) {
fTextLayout.dispose();
fTextLayout= null;
}
fBrowser= null;
super.handleDispose();
}
@Override
public Point computeSizeHint() {
Point sizeConstraints = getSizeConstraints();
Rectangle trim = computeTrim();
//FIXME: The HTML2TextReader does not render <p> like a browser.
// Instead of inserting an empty line, it just adds a single line break.
// Furthermore, the indentation of <dl><dd> elements is too small (e.g with a long @see line)
TextPresentation presentation= new TextPresentation();
HTML2TextReader reader= new HTML2TextReader(new StringReader(fInput.getHtml()), presentation);
String text;
try {
text= reader.getString();
} catch (IOException e) {
text= "";
}
finally {
try {
reader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
fTextLayout.setText(text);
fTextLayout.setWidth(sizeConstraints==null ? SWT.DEFAULT : sizeConstraints.x-trim.width);
@SuppressWarnings("unchecked")
Iterator<StyleRange> iter = presentation.getAllStyleRangeIterator();
while (iter.hasNext()) {
StyleRange sr = iter.next();
if (sr.fontStyle == SWT.BOLD)
fTextLayout.setStyle(fBoldStyle, sr.start, sr.start + sr.length);
}
Rectangle bounds = fTextLayout.getBounds(); // does not return minimum width, see https://bugs.eclipse.org/bugs/show_bug.cgi?id=217446
int lineCount = fTextLayout.getLineCount();
int textWidth = 0;
for (int i=0; i<lineCount; i++) {
Rectangle rect = fTextLayout.getLineBounds(i);
int lineWidth = rect.x + rect.width;
if (i==0) {
lineWidth *= 1.25; //to accommodate it is not only bold but also monospace
lineWidth += 20;
}
textWidth = Math.max(textWidth, lineWidth);
}
bounds.width = textWidth;
fTextLayout.setText("");
int minWidth = textWidth;
int minHeight = trim.height + bounds.height;
// Add some air to accommodate for different browser renderings
minWidth += 30;
minHeight += 60;
// Apply max size constraints
if (sizeConstraints!=null) {
if (sizeConstraints.x!=SWT.DEFAULT)
minWidth = Math.min(sizeConstraints.x, minWidth + trim.width);
if (sizeConstraints.y!=SWT.DEFAULT)
minHeight = Math.min(sizeConstraints.y, minHeight);
}
// Ensure minimal size
int width = Math.max(MIN_WIDTH, minWidth);
int height = Math.max(MIN_HEIGHT, minHeight);
return new Point(width, height);
}
@Override
public Rectangle computeTrim() {
Rectangle trim= super.computeTrim();
if (isResizable() && fgScrollBarSize!=null) {
boolean RTL= (getShell().getStyle() & SWT.RIGHT_TO_LEFT) != 0;
if (RTL) {
trim.x-= fgScrollBarSize.x;
}
trim.width+= fgScrollBarSize.x;
trim.height+= fgScrollBarSize.y;
}
return trim;
}
/**
* Adds the listener to the collection of listeners who will be
* notified when the current location has changed or is about to change.
*
* @param listener the location listener
* @since 3.4
*/
public void addLocationListener(LocationListener listener) {
fBrowser.addLocationListener(listener);
}
@Override
public void setForegroundColor(Color foreground) {
super.setForegroundColor(foreground);
fBrowser.setForeground(foreground);
}
@Override
public void setBackgroundColor(Color background) {
super.setBackgroundColor(background);
fBrowser.setBackground(background);
}
@Override
public boolean hasContents() {
return fBrowserHasContent;
}
/**
* Adds a listener for input changes to this input change provider.
* Has no effect if an identical listener is already registered.
*
* @param inputChangeListener the listener to add
* @since 3.4
*/
public void addInputChangeListener(IInputChangedListener inputChangeListener) {
Assert.isNotNull(inputChangeListener);
fInputChangeListeners.add(inputChangeListener);
}
/**
* Removes the given input change listener from this input change provider.
* Has no effect if an identical listener is not registered.
*
* @param inputChangeListener the listener to remove
* @since 3.4
*/
public void removeInputChangeListener(IInputChangedListener inputChangeListener) {
fInputChangeListeners.remove(inputChangeListener);
}
@Override
public void setDelayedInputChangeListener(IInputChangedListener inputChangeListener) {
fDelayedInputChangeListener= inputChangeListener;
}
/**
* Tells whether a delayed input change listener is registered.
*
* @return <code>true</code> iff a delayed input change
* listener is currently registered
* @since 3.4
*/
public boolean hasDelayedInputChangeListener() {
return fDelayedInputChangeListener != null;
}
/**
* Notifies listeners of a delayed input change.
*
* @param newInput the new input, or <code>null</code> to request cancellation
* @since 3.4
*/
public void notifyDelayedInputChange(Object newInput) {
if (fDelayedInputChangeListener != null)
fDelayedInputChangeListener.inputChanged(newInput);
}
@Override
public String toString() {
String style= (getShell().getStyle() & SWT.RESIZE) == 0 ? "fixed" : "resizeable"; //$NON-NLS-1$ //$NON-NLS-2$
return super.toString() + " - style: " + style; //$NON-NLS-1$
}
/**
* @return the current browser input or <code>null</code>
*/
public BrowserInput getInput() {
return fInput;
}
@Override
public Point computeSizeConstraints(int widthInChars, int heightInChars) {
if (fSymbolicFontName == null)
return null;
GC gc= new GC(fBrowser);
Font font= fSymbolicFontName == null ? JFaceResources.getDialogFont() : JFaceResources.getFont(fSymbolicFontName);
gc.setFont(font);
int width= gc.getFontMetrics().getAverageCharWidth();
int height= gc.getFontMetrics().getHeight();
gc.dispose();
return new Point(widthInChars * width, heightInChars * height);
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java
|
298 |
new Thread() {
public void run() {
l.forceUnlock();
latch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java
|
1,623 |
public class ClusterDynamicSettingsModule extends AbstractModule {
private final DynamicSettings clusterDynamicSettings;
public ClusterDynamicSettingsModule() {
clusterDynamicSettings = new DynamicSettings();
clusterDynamicSettings.addDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES);
clusterDynamicSettings.addDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT);
clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, Validator.FLOAT);
clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT);
clusterDynamicSettings.addDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT);
clusterDynamicSettings.addDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER);
clusterDynamicSettings.addDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE);
clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION);
clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION);
clusterDynamicSettings.addDynamicSetting(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION);
clusterDynamicSettings.addDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, Validator.INTEGER);
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_SIZE);
clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_EXPIRE, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE);
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(MetaData.SETTING_READ_ONLY);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
clusterDynamicSettings.addDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
clusterDynamicSettings.addDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED);
clusterDynamicSettings.addDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED);
clusterDynamicSettings.addDynamicSetting(InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING, Validator.MEMORY_SIZE);
clusterDynamicSettings.addDynamicSetting(InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
clusterDynamicSettings.addDynamicSetting(DestructiveOperations.REQUIRES_NAME);
}
public void addDynamicSettings(String... settings) {
clusterDynamicSettings.addDynamicSettings(settings);
}
public void addDynamicSetting(String setting, Validator validator) {
clusterDynamicSettings.addDynamicSetting(setting, validator);
}
@Override
protected void configure() {
bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings);
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_settings_ClusterDynamicSettingsModule.java
|
623 |
static final class Fields {
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString STATE = new XContentBuilderString("state");
static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
static final XContentBuilderString NODE = new XContentBuilderString("node");
static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_ShardStats.java
|
863 |
public class MoveFileRefactoringParticipant extends MoveParticipant {
private IFile file;
private static Map<String,TextFileChange> fileChanges =
new HashMap<String,TextFileChange>();
private static List<IResource> movingFiles =
new ArrayList<IResource>();
@Override
protected boolean initialize(Object element) {
file = (IFile) element;
if (getProcessor() instanceof MoveProcessor) {
MoveProcessor moveProcessor = (MoveProcessor) getProcessor();
movingFiles.addAll(Arrays.asList((IResource[]) moveProcessor.getElements()));
return getProjectTypeChecker(file.getProject())!=null &&
file.getFileExtension()!=null &&
(file.getFileExtension().equals("ceylon") ||
file.getFileExtension().equals("java"));
}
else {
return false;
}
}
@Override
public String getName() {
return "Move file participant for Ceylon source";
}
@Override
public RefactoringStatus checkConditions(IProgressMonitor pm,
CheckConditionsContext context)
throws OperationCanceledException {
return new RefactoringStatus();
}
@Override
public Change createChange(IProgressMonitor pm)
throws CoreException, OperationCanceledException {
return null;
}
@Override
public Change createPreChange(IProgressMonitor pm)
throws CoreException, OperationCanceledException {
try {
IProject project = file.getProject();
IFolder folder = (IFolder) getArguments().getDestination();
String newName = folder.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString()
.replace('/', '.');
String movedRelFilePath = file.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString();
String movedRelPath = file.getParent()
.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString();
String oldName = movedRelPath.replace('/', '.');
List<Change> changes = new ArrayList<Change>();
if (file.getFileExtension().equals("java")) {
updateRefsToMovedJavaFile(project, newName, oldName, changes);
}
else {
PhasedUnit movedPhasedUnit =
getProjectTypeChecker(project)
.getPhasedUnitFromRelativePath(movedRelFilePath);
if (movedPhasedUnit==null) {
return null;
}
List<Declaration> declarations =
movedPhasedUnit.getDeclarations();
if (newName.equals(oldName)) return null;
updateRefsFromMovedCeylonFile(project, newName, oldName, changes,
movedPhasedUnit, declarations);
updateRefsToMovedCeylonFile(project, newName, oldName, changes,
movedPhasedUnit, declarations);
}
if (changes.isEmpty())
return null;
CompositeChange result =
new CompositeChange("Ceylon source changes") {
@Override
public Change perform(IProgressMonitor pm)
throws CoreException {
fileChanges.clear();
movingFiles.clear();
return super.perform(pm);
}
};
for (Change change: changes) {
result.add(change);
}
return result;
}
catch (Exception e) {
e.printStackTrace();
return null;
}
}
protected void updateRefsFromMovedCeylonFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes, final PhasedUnit movedPhasedUnit,
final List<Declaration> declarations) {
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
movedPhasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && !declarations.contains(dec)) {
Unit unit = dec.getUnit();
if (unit instanceof ProjectSourceFile &&
movingFiles.contains(((ProjectSourceFile) unit).getFileResource())) {
//also moving
}
else if (unit.getPackage().equals(movedPhasedUnit.getPackage())) {
imports.put(dec, id.getText());
}
}
}
//TODO: DocLinks!!
});
collectEditsToMovedFile(newName, oldName, changes,
movedPhasedUnit, imports);
}
protected void updateRefsToMovedCeylonFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes, PhasedUnit movedPhasedUnit,
final List<Declaration> declarations) {
if (!getArguments().getUpdateReferences()) return;
for (PhasedUnit phasedUnit: getProjectTypeChecker(project)
.getPhasedUnits().getPhasedUnits()) {
if (phasedUnit==movedPhasedUnit ||
phasedUnit.getUnit() instanceof ProjectSourceFile &&
movingFiles.contains(((ProjectSourceFile) phasedUnit.getUnit()).getFileResource())) {
continue;
}
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && declarations.contains(dec)) {
imports.put(dec, id.getText());
}
}
//TODO: DocLinks!!
});
collectEdits(newName, oldName, changes, phasedUnit, imports);
}
}
protected void updateRefsToMovedJavaFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes) throws JavaModelException {
if (!getArguments().getUpdateReferences()) return;
ICompilationUnit jcu = (ICompilationUnit) JavaCore.create(file);
final IType[] types = jcu.getTypes();
TypeChecker tc = getProjectTypeChecker(project);
if (tc==null) return;
for (PhasedUnit phasedUnit: tc.getPhasedUnits().getPhasedUnits()) {
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
for (IType type: types) {
if (dec!=null && dec.getQualifiedNameString()
.equals(getQualifiedName(type))) {
imports.put(dec, id.getText());
}
}
}
protected String getQualifiedName(IMember dec) {
IJavaElement parent = dec.getParent();
if (parent instanceof ICompilationUnit) {
return parent.getParent().getElementName() + "::" +
dec.getElementName();
}
else if (dec.getDeclaringType()!=null) {
return getQualifiedName(dec.getDeclaringType()) + "." +
dec.getElementName();
}
else {
return "@";
}
}
});
collectEdits(newName, oldName, changes, phasedUnit, imports);
}
}
private void collectEditsToMovedFile(String newName,
String oldName, List<Change> changes,
PhasedUnit movedPhasedUnit,
Map<Declaration, String> imports) {
try {
IFileVirtualFile virtualFile =
(IFileVirtualFile) movedPhasedUnit.getUnitFile();
IFile file = virtualFile.getFile();
String path = file.getProjectRelativePath().toPortableString();
TextFileChange change = fileChanges.get(path);
if (change==null) {
change = new TextFileChange(file.getName(), file);
change.setEdit(new MultiTextEdit());
changes.add(change);
fileChanges.put(path, change);
}
Tree.CompilationUnit cu =
movedPhasedUnit.getCompilationUnit();
if (!imports.isEmpty()) {
List<InsertEdit> edits = importEdits(cu,
imports.keySet(), imports.values(), null,
EditorUtil.getDocument(change));
for (TextEdit edit: edits) {
change.addEdit(edit);
}
}
Tree.Import toDelete = findImportNode(cu, newName);
if (toDelete!=null) {
change.addEdit(new DeleteEdit(toDelete.getStartIndex(),
toDelete.getStopIndex()-toDelete.getStartIndex()+1));
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private void collectEdits(String newName,
String oldName, List<Change> changes,
PhasedUnit phasedUnit,
Map<Declaration, String> imports) {
try {
Tree.CompilationUnit cu =
phasedUnit.getCompilationUnit();
if (!imports.isEmpty()) {
IFileVirtualFile virtualFile =
(IFileVirtualFile) phasedUnit.getUnitFile();
IFile file = virtualFile.getFile();
String path = file.getProjectRelativePath().toPortableString();
TextFileChange change = fileChanges.get(path);
if (change==null) {
change = new TextFileChange(file.getName(), file);
change.setEdit(new MultiTextEdit());
changes.add(change);
fileChanges.put(path, change);
}
List<TextEdit> edits =
importEditForMove(cu,
imports.keySet(), imports.values(),
newName, oldName,
EditorUtil.getDocument(change));
if (!edits.isEmpty()) {
for (TextEdit edit: edits) {
change.addEdit(edit);
}
}
}
}
catch (Exception e) {
e.printStackTrace();
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_MoveFileRefactoringParticipant.java
|
112 |
public interface OProgressListener {
public void onBegin(Object iTask, long iTotal);
public boolean onProgress(Object iTask, long iCounter, float iPercent);
public void onCompletition(Object iTask, boolean iSucceed);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_listener_OProgressListener.java
|
2,589 |
public class ElectMasterService extends AbstractComponent {
public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes";
private final NodeComparator nodeComparator = new NodeComparator();
private volatile int minimumMasterNodes;
public ElectMasterService(Settings settings) {
super(settings);
this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}
public void minimumMasterNodes(int minimumMasterNodes) {
this.minimumMasterNodes = minimumMasterNodes;
}
public int minimumMasterNodes() {
return minimumMasterNodes;
}
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
if (minimumMasterNodes < 1) {
return true;
}
int count = 0;
for (DiscoveryNode node : nodes) {
if (node.masterNode()) {
count++;
}
}
return count >= minimumMasterNodes;
}
/**
* Returns a list of the next possible masters.
*/
public DiscoveryNode[] nextPossibleMasters(ObjectContainer<DiscoveryNode> nodes, int numberOfPossibleMasters) {
List<DiscoveryNode> sortedNodes = sortedMasterNodes(Arrays.asList(nodes.toArray(DiscoveryNode.class)));
if (sortedNodes == null) {
return new DiscoveryNode[0];
}
List<DiscoveryNode> nextPossibleMasters = Lists.newArrayListWithCapacity(numberOfPossibleMasters);
int counter = 0;
for (DiscoveryNode nextPossibleMaster : sortedNodes) {
if (++counter >= numberOfPossibleMasters) {
break;
}
nextPossibleMasters.add(nextPossibleMaster);
}
return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
}
/**
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
* if no master has been elected.
*/
public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
if (sortedNodes == null || sortedNodes.isEmpty()) {
return null;
}
return sortedNodes.get(0);
}
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
List<DiscoveryNode> possibleNodes = Lists.newArrayList(nodes);
if (possibleNodes.isEmpty()) {
return null;
}
// clean non master nodes
for (Iterator<DiscoveryNode> it = possibleNodes.iterator(); it.hasNext(); ) {
DiscoveryNode node = it.next();
if (!node.masterNode()) {
it.remove();
}
}
CollectionUtil.introSort(possibleNodes, nodeComparator);
return possibleNodes;
}
private static class NodeComparator implements Comparator<DiscoveryNode> {
@Override
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
return o1.id().compareTo(o2.id());
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_elect_ElectMasterService.java
|
85 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_STATIC_ASSET_STRG")
public class StaticAssetStorageImpl implements StaticAssetStorage {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StaticAssetStorageId")
@GenericGenerator(
name="StaticAssetStorageId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StaticAssetStorageImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.file.domain.StaticAssetStorageImpl")
}
)
@Column(name = "STATIC_ASSET_STRG_ID")
protected Long id;
@Column(name ="STATIC_ASSET_ID", nullable = false)
@Index(name="STATIC_ASSET_ID_INDEX", columnNames={"STATIC_ASSET_ID"})
protected Long staticAssetId;
@Column (name = "FILE_DATA", length = Integer.MAX_VALUE - 1)
@Lob
protected Blob fileData;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Blob getFileData() {
return fileData;
}
@Override
public void setFileData(Blob fileData) {
this.fileData = fileData;
}
@Override
public Long getStaticAssetId() {
return staticAssetId;
}
@Override
public void setStaticAssetId(Long staticAssetId) {
this.staticAssetId = staticAssetId;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetStorageImpl.java
|
291 |
public interface DataDrivenEnumerationValue extends Serializable {
public String getDisplay();
public void setDisplay(String display);
public Boolean getHidden();
public void setHidden(Boolean hidden);
public Long getId();
public void setId(Long id);
public String getKey();
public void setKey(String key);
public DataDrivenEnumeration getType();
public void setType(DataDrivenEnumeration type);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_enumeration_domain_DataDrivenEnumerationValue.java
|
2,361 |
public class MapCombineTask<KeyIn, ValueIn, KeyOut, ValueOut, Chunk> {
private final AtomicBoolean cancelled = new AtomicBoolean();
private final Mapper<KeyIn, ValueIn, KeyOut, ValueOut> mapper;
private final MappingPhase<KeyIn, ValueIn, KeyOut, ValueOut> mappingPhase;
private final KeyValueSource<KeyIn, ValueIn> keyValueSource;
private final MapReduceService mapReduceService;
private final JobSupervisor supervisor;
private final NodeEngine nodeEngine;
private final String name;
private final String jobId;
private final int chunkSize;
public MapCombineTask(JobTaskConfiguration configuration, JobSupervisor supervisor,
MappingPhase<KeyIn, ValueIn, KeyOut, ValueOut> mappingPhase) {
this.mappingPhase = mappingPhase;
this.supervisor = supervisor;
this.mapper = configuration.getMapper();
this.name = configuration.getName();
this.jobId = configuration.getJobId();
this.chunkSize = configuration.getChunkSize();
this.nodeEngine = configuration.getNodeEngine();
this.mapReduceService = supervisor.getMapReduceService();
this.keyValueSource = configuration.getKeyValueSource();
}
public String getName() {
return name;
}
public String getJobId() {
return jobId;
}
public int getChunkSize() {
return chunkSize;
}
public void cancel() {
cancelled.set(true);
mappingPhase.cancel();
}
public void process() {
ExecutorService es = mapReduceService.getExecutorService(name);
if (keyValueSource instanceof PartitionIdAware) {
es.submit(new PartitionProcessor());
} else {
es.submit(new SingleExecutionProcessor());
}
}
public final void processMapping(int partitionId, DefaultContext<KeyOut, ValueOut> context,
KeyValueSource<KeyIn, ValueIn> keyValueSource)
throws Exception {
context.setPartitionId(partitionId);
if (mapper instanceof LifecycleMapper) {
((LifecycleMapper) mapper).initialize(context);
}
mappingPhase.executeMappingPhase(keyValueSource, mapper, context);
if (mapper instanceof LifecycleMapper) {
((LifecycleMapper) mapper).finalized(context);
}
if (cancelled.get()) {
return;
}
}
void onEmit(DefaultContext<KeyOut, ValueOut> context, int partitionId) {
// If we have a reducer let's test for chunk size otherwise
// we need to collect all values locally and wait for final request
if (supervisor.getConfiguration().getReducerFactory() != null) {
if (context.getCollected() == chunkSize) {
Map<KeyOut, Chunk> chunkMap = context.requestChunk();
// Wrap into IntermediateChunkNotification object
Map<Address, Map<KeyOut, Chunk>> mapping = mapResultToMember(supervisor, chunkMap);
// Register remote addresses and partitionId for receiving reducer events
supervisor.registerReducerEventInterests(partitionId, mapping.keySet());
for (Map.Entry<Address, Map<KeyOut, Chunk>> entry : mapping.entrySet()) {
mapReduceService.sendNotification(entry.getKey(),
new IntermediateChunkNotification(entry.getKey(), name, jobId, entry.getValue(), partitionId));
}
}
}
}
private void finalizeMapping(int partitionId, DefaultContext<KeyOut, ValueOut> context)
throws Exception {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestPartitionReducing(name, jobId, partitionId), name);
if (result.getResultState() == SUCCESSFUL) {
// If we have a reducer defined just send it over
if (supervisor.getConfiguration().getReducerFactory() != null) {
Map<KeyOut, Chunk> chunkMap = context.finish();
if (chunkMap.size() > 0) {
sendLastChunkToAssignedReducers(partitionId, chunkMap);
} else {
finalizeProcessing(partitionId);
}
}
}
}
private void finalizeProcessing(int partitionId)
throws Exception {
// If nothing to reduce we just set partition to processed
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestPartitionProcessed(name, jobId, partitionId, REDUCING),
name);
if (result.getResultState() != SUCCESSFUL) {
throw new RuntimeException("Could not finalize processing for partitionId " + partitionId);
}
}
private void sendLastChunkToAssignedReducers(int partitionId, Map<KeyOut, Chunk> chunkMap) {
Address sender = mapReduceService.getLocalAddress();
// Wrap into LastChunkNotification object
Map<Address, Map<KeyOut, Chunk>> mapping = mapResultToMember(supervisor, chunkMap);
// Register remote addresses and partitionId for receiving reducer events
supervisor.registerReducerEventInterests(partitionId, mapping.keySet());
// Send LastChunk notifications
for (Map.Entry<Address, Map<KeyOut, Chunk>> entry : mapping.entrySet()) {
Address receiver = entry.getKey();
Map<KeyOut, Chunk> chunk = entry.getValue();
mapReduceService
.sendNotification(receiver, new LastChunkNotification(receiver, name, jobId, sender, partitionId, chunk));
}
// Send LastChunk notification to notify reducers that received at least one chunk
Set<Address> addresses = mapping.keySet();
Collection<Address> reducerInterests = supervisor.getReducerEventInterests(partitionId);
if (reducerInterests != null) {
for (Address address : reducerInterests) {
if (!addresses.contains(address)) {
mapReduceService.sendNotification(address,
new LastChunkNotification(address, name, jobId, sender, partitionId, Collections.emptyMap()));
}
}
}
}
private void postponePartitionProcessing(int partitionId)
throws Exception {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new PostPonePartitionProcessing(name, jobId, partitionId), name);
if (result.getResultState() != SUCCESSFUL) {
throw new RuntimeException("Could not postpone processing for partitionId " + partitionId + " -> "
+ result.getResultState());
}
}
private void handleProcessorThrowable(Throwable t) {
notifyRemoteException(supervisor, t);
if (t instanceof Error) {
ExceptionUtil.sneakyThrow(t);
}
}
/**
* This class implements the partitionId based mapping phase
*/
private class PartitionProcessor
implements Runnable {
@Override
public void run() {
KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource;
if (supervisor.getConfiguration().isCommunicateStats()) {
delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor);
}
while (true) {
if (cancelled.get()) {
return;
}
Integer partitionId = findNewPartitionProcessing();
if (partitionId == null) {
// Job's done
return;
}
// Migration event occurred, just retry
if (partitionId == -1) {
continue;
}
try {
// This call cannot be delegated
((PartitionIdAware) keyValueSource).setPartitionId(partitionId);
delegate.reset();
if (delegate.open(nodeEngine)) {
DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this);
processMapping(partitionId, context, delegate);
delegate.close();
finalizeMapping(partitionId, context);
} else {
// Partition assignment might not be ready yet, postpone the processing and retry later
postponePartitionProcessing(partitionId);
}
} catch (Throwable t) {
handleProcessorThrowable(t);
}
}
}
private Integer findNewPartitionProcessing() {
try {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestPartitionMapping(name, jobId), name);
// JobSupervisor doesn't exists anymore on jobOwner, job done?
if (result.getResultState() == NO_SUPERVISOR) {
return null;
} else if (result.getResultState() == CHECK_STATE_FAILED) {
// retry
return -1;
} else if (result.getResultState() == NO_MORE_PARTITIONS) {
return null;
} else {
return result.getPartitionId();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
/**
* This class implements the non partitionId based mapping phase
*/
private class SingleExecutionProcessor
implements Runnable {
@Override
public void run() {
try {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestMemberIdAssignment(name, jobId), name);
// JobSupervisor doesn't exists anymore on jobOwner, job done?
if (result.getResultState() == NO_SUPERVISOR) {
return;
} else if (result.getResultState() == NO_MORE_PARTITIONS) {
return;
}
int partitionId = result.getPartitionId();
KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource;
if (supervisor.getConfiguration().isCommunicateStats()) {
delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor);
}
delegate.reset();
if (delegate.open(nodeEngine)) {
DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this);
processMapping(partitionId, context, delegate);
delegate.close();
finalizeMapping(partitionId, context);
} else {
// Partition assignment might not be ready yet, postpone the processing and retry later
postponePartitionProcessing(partitionId);
}
} catch (Throwable t) {
handleProcessorThrowable(t);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_MapCombineTask.java
|
1,138 |
public class OSQLMethodAsList extends OAbstractSQLMethod {
public static final String NAME = "aslist";
public OSQLMethodAsList() {
super(NAME);
}
@SuppressWarnings("unchecked")
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
if (ioResult instanceof List)
// ALREADY A LIST
return ioResult;
if (ioResult == null)
// NULL VALUE, RETURN AN EMPTY SET
return new ArrayList<Object>();
if (ioResult instanceof Collection<?>)
return new ArrayList<Object>((Collection<Object>) ioResult);
else if (ioResult instanceof Iterable<?>)
ioResult = ((Iterable<?>) ioResult).iterator();
if (ioResult instanceof Iterator<?>) {
final List<Object> list = ioResult instanceof OSizeable ? new ArrayList<Object>(((OSizeable) ioResult).size())
: new ArrayList<Object>();
for (Iterator<Object> iter = (Iterator<Object>) ioResult; iter.hasNext();)
list.add(iter.next());
return list;
}
// SINGLE ITEM: ADD IT AS UNIQUE ITEM
final Set<Object> list = new HashSet<Object>();
list.add(ioResult);
return list;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodAsList.java
|
8 |
setInput(new BrowserInput(null) {
@Override
public String getHtml() {
return content;
}
@Override
public String getInputName() {
return "";
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java
|
1,541 |
@Service("bli18nUpdateCartServiceExtensionHandler")
public class i18nUpdateCartServiceExtensionHandler extends AbstractUpdateCartServiceExtensionHandler
implements UpdateCartServiceExtensionHandler {
protected static final Log LOG = LogFactory.getLog(i18nUpdateCartServiceExtensionHandler.class);
@Value("${clearCartOnLocaleSwitch}")
protected boolean clearCartOnLocaleSwitch = false;
@Resource(name = "blCatalogService")
protected CatalogService catalogService;
@Resource(name = "blUpdateCartServiceExtensionManager")
protected UpdateCartServiceExtensionManager extensionManager;
@PostConstruct
public void init() {
if (isEnabled()) {
extensionManager.getHandlers().add(this);
}
}
/**
* If the locale of the cart does not match the current locale, then this extension handler will
* attempt to translate the order items.
*
* The property "clearCartOnLocaleSwitch" can be set to true if the implementation desires to
* create a new cart when the locale is switched (3.0.6 and prior behavior).
*
* @param cart
* @param resultHolder
* @return
*/
public ExtensionResultStatusType updateAndValidateCart(Order cart, ExtensionResultHolder resultHolder) {
if (BroadleafRequestContext.hasLocale()) {
BroadleafRequestContext brc = BroadleafRequestContext.getBroadleafRequestContext();
if (!brc.getLocale().getLocaleCode().matches(cart.getLocale().getLocaleCode())) {
if (LOG.isDebugEnabled()) {
LOG.debug("The cart Locale [" + cart.getLocale().getLocaleCode() +
"] does not match the current locale [" + brc.getLocale().getLocaleCode() + "]");
}
if (clearCartOnLocaleSwitch) {
resultHolder.getContextMap().put("clearCart", Boolean.TRUE);
} else {
fixTranslations(cart);
cart.setLocale(brc.getLocale());
resultHolder.getContextMap().put("saveCart", Boolean.TRUE);
}
}
}
return ExtensionResultStatusType.HANDLED_CONTINUE;
}
protected void fixTranslations(Order cart) {
for (DiscreteOrderItem orderItem : cart.getDiscreteOrderItems()) {
Sku sku = orderItem.getSku();
translateOrderItem(orderItem, sku);
}
for (OrderItem orderItem : cart.getOrderItems()) {
if (orderItem instanceof BundleOrderItem) {
BundleOrderItem bundleItem = (BundleOrderItem) orderItem;
Sku sku = bundleItem.getSku();
translateOrderItem(orderItem, sku);
}
}
}
protected void translateOrderItem(OrderItem orderItem, Sku sku) {
if (sku != null) {
orderItem.setName(sku.getName());
}
if (sku.getProductOptionValues() != null) {
for (ProductOptionValue optionValue : sku.getProductOptionValues()) {
String key = optionValue.getProductOption().getAttributeName();
OrderItemAttribute attr = orderItem.getOrderItemAttributes().get(key);
if (attr != null) {
attr.setValue(optionValue.getAttributeValue());
} else {
OrderItemAttribute attribute = new OrderItemAttributeImpl();
attribute.setName(key);
attribute.setValue(optionValue.getAttributeValue());
attribute.setOrderItem(orderItem);
orderItem.getOrderItemAttributes().put(key, attribute);
}
}
}
}
}
| 1no label
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_service_i18nUpdateCartServiceExtensionHandler.java
|
1,094 |
public abstract class OSQLFunctionConfigurableAbstract extends OSQLFunctionAbstract {
protected Object[] configuredParameters;
protected OSQLFunctionConfigurableAbstract(final String iName, final int iMinParams, final int iMaxParams) {
super(iName, iMinParams, iMaxParams);
}
@Override
public void config(final Object[] iConfiguredParameters) {
configuredParameters = iConfiguredParameters;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_OSQLFunctionConfigurableAbstract.java
|
27 |
static final class RunAfterEither extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
RunAfterEither(CompletableFuture<?> src,
CompletableFuture<?> snd,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
974 |
public class OCompositeKeySerializer implements OBinarySerializer<OCompositeKey>, OStreamSerializer {
public static final String NAME = "cks";
public static final OCompositeKeySerializer INSTANCE = new OCompositeKeySerializer();
public static final byte ID = 14;
@SuppressWarnings("unchecked")
public int getObjectSize(OCompositeKey compositeKey, Object... hints) {
final OType[] types = getKeyTypes(hints);
final List<Object> keys = compositeKey.getKeys();
int size = 2 * OIntegerSerializer.INT_SIZE;
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keys.size(); i++) {
final Object key = keys.get(i);
final OType type;
if (types.length > i)
type = types[i];
else
type = OType.getTypeByClass(key.getClass());
size += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE
+ ((OBinarySerializer<Object>) factory.getObjectSerializer(type)).getObjectSize(key);
}
return size;
}
public void serialize(OCompositeKey compositeKey, byte[] stream, int startPosition, Object... hints) {
final OType[] types = getKeyTypes(hints);
final List<Object> keys = compositeKey.getKeys();
final int keysSize = keys.size();
final int oldStartPosition = startPosition;
startPosition += OIntegerSerializer.INT_SIZE;
OIntegerSerializer.INSTANCE.serialize(keysSize, stream, startPosition);
startPosition += OIntegerSerializer.INT_SIZE;
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keys.size(); i++) {
final Object key = keys.get(i);
final OType type;
if (types.length > i)
type = types[i];
else
type = OType.getTypeByClass(key.getClass());
@SuppressWarnings("unchecked")
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(type);
stream[startPosition] = binarySerializer.getId();
startPosition += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
binarySerializer.serialize(key, stream, startPosition);
startPosition += binarySerializer.getObjectSize(key);
}
OIntegerSerializer.INSTANCE.serialize((startPosition - oldStartPosition), stream, oldStartPosition);
}
@SuppressWarnings("unchecked")
public OCompositeKey deserialize(byte[] stream, int startPosition) {
final OCompositeKey compositeKey = new OCompositeKey();
startPosition += OIntegerSerializer.INT_SIZE;
final int keysSize = OIntegerSerializer.INSTANCE.deserialize(stream, startPosition);
startPosition += OIntegerSerializer.INSTANCE.getObjectSize(keysSize);
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keysSize; i++) {
final byte serializerId = stream[startPosition];
startPosition += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(serializerId);
final Object key = binarySerializer.deserialize(stream, startPosition);
compositeKey.addKey(key);
startPosition += binarySerializer.getObjectSize(key);
}
return compositeKey;
}
public int getObjectSize(byte[] stream, int startPosition) {
return OIntegerSerializer.INSTANCE.deserialize(stream, startPosition);
}
public byte getId() {
return ID;
}
public byte[] toStream(final Object iObject) throws IOException {
throw new UnsupportedOperationException("CSV storage format is out of dated and is not supported.");
}
public Object fromStream(final byte[] iStream) throws IOException {
final OCompositeKey compositeKey = new OCompositeKey();
final OMemoryInputStream inputStream = new OMemoryInputStream(iStream);
final int keysSize = inputStream.getAsInteger();
for (int i = 0; i < keysSize; i++) {
final byte[] keyBytes = inputStream.getAsByteArray();
final String keyString = OBinaryProtocol.bytes2string(keyBytes);
final int typeSeparatorPos = keyString.indexOf(',');
final OType type = OType.valueOf(keyString.substring(0, typeSeparatorPos));
compositeKey.addKey(ORecordSerializerStringAbstract.simpleValueFromStream(keyString.substring(typeSeparatorPos + 1), type));
}
return compositeKey;
}
public String getName() {
return NAME;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return OIntegerSerializer.INSTANCE.deserializeNative(stream, startPosition);
}
public void serializeNative(OCompositeKey compositeKey, byte[] stream, int startPosition, Object... hints) {
final OType[] types = getKeyTypes(hints);
final List<Object> keys = compositeKey.getKeys();
final int keysSize = keys.size();
final int oldStartPosition = startPosition;
startPosition += OIntegerSerializer.INT_SIZE;
OIntegerSerializer.INSTANCE.serializeNative(keysSize, stream, startPosition);
startPosition += OIntegerSerializer.INT_SIZE;
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keys.size(); i++) {
final Object key = keys.get(i);
final OType type;
if (types.length > i)
type = types[i];
else
type = OType.getTypeByClass(key.getClass());
@SuppressWarnings("unchecked")
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(type);
stream[startPosition] = binarySerializer.getId();
startPosition += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
binarySerializer.serializeNative(key, stream, startPosition);
startPosition += binarySerializer.getObjectSize(key);
}
OIntegerSerializer.INSTANCE.serializeNative((startPosition - oldStartPosition), stream, oldStartPosition);
}
public OCompositeKey deserializeNative(byte[] stream, int startPosition) {
final OCompositeKey compositeKey = new OCompositeKey();
startPosition += OIntegerSerializer.INT_SIZE;
final int keysSize = OIntegerSerializer.INSTANCE.deserializeNative(stream, startPosition);
startPosition += OIntegerSerializer.INSTANCE.getObjectSize(keysSize);
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keysSize; i++) {
final byte serializerId = stream[startPosition];
startPosition += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(serializerId);
final Object key = binarySerializer.deserializeNative(stream, startPosition);
compositeKey.addKey(key);
startPosition += binarySerializer.getObjectSize(key);
}
return compositeKey;
}
@Override
public void serializeInDirectMemory(OCompositeKey compositeKey, ODirectMemoryPointer pointer, long offset, Object... hints) {
final OType[] types = getKeyTypes(hints);
final List<Object> keys = compositeKey.getKeys();
final int keysSize = keys.size();
final long oldStartOffset = offset;
offset += OIntegerSerializer.INT_SIZE;
pointer.setInt(offset, keysSize);
offset += OIntegerSerializer.INT_SIZE;
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keys.size(); i++) {
final Object key = keys.get(i);
final OType type;
if (types.length > i)
type = types[i];
else
type = OType.getTypeByClass(key.getClass());
@SuppressWarnings("unchecked")
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(type);
pointer.setByte(offset, binarySerializer.getId());
offset += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
binarySerializer.serializeInDirectMemory(key, pointer, offset);
offset += binarySerializer.getObjectSize(key);
}
pointer.setInt(oldStartOffset, (int) (offset - oldStartOffset));
}
private OType[] getKeyTypes(Object[] hints) {
final OType[] types;
if (hints != null && hints.length > 0)
types = (OType[]) hints;
else
types = new OType[0];
return types;
}
@Override
public OCompositeKey deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
final OCompositeKey compositeKey = new OCompositeKey();
offset += OIntegerSerializer.INT_SIZE;
final int keysSize = pointer.getInt(offset);
offset += OIntegerSerializer.INT_SIZE;
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keysSize; i++) {
final byte serializerId = pointer.getByte(offset);
offset += OBinarySerializerFactory.TYPE_IDENTIFIER_SIZE;
OBinarySerializer<Object> binarySerializer = (OBinarySerializer<Object>) factory.getObjectSerializer(serializerId);
final Object key = binarySerializer.deserializeFromDirectMemory(pointer, offset);
compositeKey.addKey(key);
offset += binarySerializer.getObjectSize(key);
}
return compositeKey;
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return pointer.getInt(offset);
}
public boolean isFixedLength() {
return false;
}
public int getFixedLength() {
return 0;
}
@Override
public OCompositeKey preprocess(OCompositeKey value, Object... hints) {
if (value == null)
return null;
final OType[] types = getKeyTypes(hints);
final List<Object> keys = value.getKeys();
final OCompositeKey compositeKey = new OCompositeKey();
final OBinarySerializerFactory factory = OBinarySerializerFactory.INSTANCE;
for (int i = 0; i < keys.size(); i++) {
final Object key = keys.get(i);
final OType type;
if (types.length > i)
type = types[i];
else
type = OType.getTypeByClass(key.getClass());
OBinarySerializer<Object> keySerializer = ((OBinarySerializer<Object>) factory.getObjectSerializer(type));
compositeKey.addKey(keySerializer.preprocess(key));
}
return compositeKey;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_binary_impl_index_OCompositeKeySerializer.java
|
162 |
@Test
public class StringSerializerTest {
private int FIELD_SIZE;
private String OBJECT;
private OStringSerializer stringSerializer;
byte[] stream;
@BeforeClass
public void beforeClass() {
stringSerializer = new OStringSerializer();
Random random = new Random();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < random.nextInt(20) + 5; i++) {
sb.append((char) random.nextInt());
}
OBJECT = sb.toString();
FIELD_SIZE = OBJECT.length() * 2 + 4 + 7;
stream = new byte[FIELD_SIZE];
}
public void testFieldSize() {
Assert.assertEquals(stringSerializer.getObjectSize(OBJECT), FIELD_SIZE - 7);
}
public void testSerialize() {
stringSerializer.serialize(OBJECT, stream, 7);
Assert.assertEquals(stringSerializer.deserialize(stream, 7), OBJECT);
}
public void testSerializeNative() {
stringSerializer.serializeNative(OBJECT, stream, 7);
Assert.assertEquals(stringSerializer.deserializeNative(stream, 7), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
stringSerializer.serializeNative(OBJECT, stream, 7);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(stringSerializer.deserializeFromDirectMemory(pointer, 7), OBJECT);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_StringSerializerTest.java
|
1,909 |
public class QueryResult implements DataSerializable {
private List<Integer> partitionIds;
private final Set<QueryResultEntry> result = new LinkedHashSet<QueryResultEntry>();
public List<Integer> getPartitionIds() {
return partitionIds;
}
public void setPartitionIds(List<Integer> partitionIds) {
this.partitionIds = partitionIds;
}
public void add(QueryResultEntry resultEntry) {
result.add(resultEntry);
}
public Set<QueryResultEntry> getResult() {
return result;
}
public void writeData(ObjectDataOutput out) throws IOException {
int psize = (partitionIds == null) ? 0 : partitionIds.size();
out.writeInt(psize);
for (int i = 0; i < psize; i++) {
out.writeInt(partitionIds.get(i));
}
int rsize = result.size();
out.writeInt(rsize);
if (rsize > 0) {
Iterator<QueryResultEntry> iterator = result.iterator();
for (int i = 0; i < rsize; i++) {
final QueryResultEntryImpl queryableEntry = (QueryResultEntryImpl) iterator.next();
queryableEntry.writeData(out);
}
}
}
public void readData(ObjectDataInput in) throws IOException {
int psize = in.readInt();
if (psize > 0) {
partitionIds = new ArrayList<Integer>(psize);
for (int i = 0; i < psize; i++) {
partitionIds.add(in.readInt());
}
}
int rsize = in.readInt();
if (rsize > 0) {
for (int i = 0; i < rsize; i++) {
final QueryResultEntryImpl resultEntry = new QueryResultEntryImpl();
resultEntry.readData(in);
result.add(resultEntry);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_QueryResult.java
|
94 |
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
public @interface ConsoleParameter {
String name() default "";
String description() default "";
boolean optional() default false;
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_annotation_ConsoleParameter.java
|
313 |
public class StandardConfigLocations {
private static final Log LOG = LogFactory.getLog(StandardConfigLocations.class);
public static final String EXTRACONFIGLOCATIONSKEY = "extra.config.locations";
public static final int ALLCONTEXTTYPE = 0;
public static final int WEBCONTEXTTYPE = 1;
public static final int SERVICECONTEXTTYPE = 2;
public static final int TESTCONTEXTTYPE = 3;
public static final int APPCONTEXTTYPE = 4;
public static String[] retrieveAll(int contextType) throws IOException {
String[] response;
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(StandardConfigLocations.class.getResourceAsStream("StandardConfigLocations.txt")));
ArrayList<String> items = new ArrayList<String>();
boolean eof = false;
while (!eof) {
String temp = reader.readLine();
if (temp == null) {
eof = true;
} else {
addContextFile(contextType, items, temp);
}
}
String extraConfigFiles = System.getProperty(EXTRACONFIGLOCATIONSKEY);
if (extraConfigFiles != null) {
String[] files = extraConfigFiles.split(" ");
for (String file : files) {
addContextFile(contextType, items, file);
}
}
response = new String[]{};
response = items.toArray(response);
} finally {
if (reader != null) {
try{ reader.close(); } catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
}
}
return response;
}
private static void addContextFile(int contextType, ArrayList<String> items, String temp) {
if (!temp.startsWith("#") && temp.trim().length() > 0 && StandardConfigLocations.class.getClassLoader().getResource(temp.trim()) != null) {
if (
contextType == ALLCONTEXTTYPE ||
((contextType == WEBCONTEXTTYPE || contextType == APPCONTEXTTYPE) && temp.indexOf("-web-") >= 0) ||
((contextType == SERVICECONTEXTTYPE || contextType == TESTCONTEXTTYPE || contextType == APPCONTEXTTYPE) && temp.indexOf("-web-") < 0 && temp.indexOf("-test") < 0 && temp.indexOf("-admin-") < 0) ||
((contextType == SERVICECONTEXTTYPE || contextType == TESTCONTEXTTYPE || contextType == APPCONTEXTTYPE) && temp.indexOf("-admin-applicationContext-persistence") >= 0) ||
(contextType == TESTCONTEXTTYPE && (temp.indexOf("-test") >= 0 || temp.indexOf("-admin-") >= 0 || temp.indexOf("-web-") >= 0))
){
items.add(temp.trim());
}
}
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_StandardConfigLocations.java
|
2,609 |
static class PingRequest extends TransportRequest {
// the (assumed) node id we are pinging
private String nodeId;
PingRequest() {
}
PingRequest(String nodeId) {
this.nodeId = nodeId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodeId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(nodeId);
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_NodesFaultDetection.java
|
109 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientMemberAttributeTest extends HazelcastTestSupport {
@After
public void cleanup() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test(timeout = 40000)
public void testChangeMemberAttributes() throws Exception {
final int count = 100;
final HazelcastInstance instance = Hazelcast.newHazelcastInstance();
final ClientConfig config = new ClientConfig();
final ListenerConfig listenerConfig = new ListenerConfig();
final CountDownLatch countDownLatch = new CountDownLatch(count);
listenerConfig.setImplementation(new LatchMembershipListener(countDownLatch));
config.addListenerConfig(listenerConfig);
HazelcastClient.newHazelcastClient(config);
final Member localMember = instance.getCluster().getLocalMember();
for (int i = 0; i < count; i++) {
localMember.setStringAttribute("key" + i, HazelcastTestSupport.randomString());
}
assertOpenEventually(countDownLatch);
}
@Test(timeout = 120000)
public void testConfigAttributes() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1").setEnabled(true);
MemberAttributeConfig memberAttributeConfig = c.getMemberAttributeConfig();
memberAttributeConfig.setIntAttribute("Test", 123);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
Member m1 = h1.getCluster().getLocalMember();
assertEquals(123, (int) m1.getIntAttribute("Test"));
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
Member m2 = h2.getCluster().getLocalMember();
assertEquals(123, (int) m2.getIntAttribute("Test"));
assertEquals(2, h2.getCluster().getMembers().size());
Member member = null;
for (Member m : h2.getCluster().getMembers()) {
if (m == h2.getCluster().getLocalMember())
continue;
member = m;
}
assertNotNull(member);
assertEquals(m1, member);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(123, (int) member.getIntAttribute("Test"));
HazelcastInstance client = HazelcastClient.newHazelcastClient();
Collection<Member> members = client.getCluster().getMembers();
for (Member m : members) {
assertEquals(123, (int) m.getIntAttribute("Test"));
}
client.shutdown();
h1.shutdown();
h2.shutdown();
}
@Test(timeout = 120000)
public void testPresharedAttributes() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1").setEnabled(true);
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
Member m1 = h1.getCluster().getLocalMember();
m1.setIntAttribute("Test", 123);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h2.getCluster().getMembers().size());
Member member = null;
for (Member m : h2.getCluster().getMembers()) {
if (m == h2.getCluster().getLocalMember())
continue;
member = m;
}
assertNotNull(member);
assertEquals(m1, member);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(123, (int) member.getIntAttribute("Test"));
boolean found = false;
HazelcastInstance client = HazelcastClient.newHazelcastClient();
Collection<Member> members = client.getCluster().getMembers();
for (Member m : members) {
if (m.equals(m1)) {
assertEquals(123, (int) m.getIntAttribute("Test"));
found = true;
}
}
assertTrue(found);
client.shutdown();
h1.shutdown();
h2.shutdown();
}
@Test(timeout = 120000)
public void testAddAttributes() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1").setEnabled(true);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
Member m1 = h1.getCluster().getLocalMember();
m1.setIntAttribute("Test", 123);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h2.getCluster().getMembers().size());
Member member = null;
for (Member m : h2.getCluster().getMembers()) {
if (m == h2.getCluster().getLocalMember())
continue;
member = m;
}
assertNotNull(member);
assertEquals(m1, member);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(123, (int) member.getIntAttribute("Test"));
HazelcastInstance client = HazelcastClient.newHazelcastClient();
final CountDownLatch latch = new CountDownLatch(3);
final MembershipListener listener = new LatchMembershipListener(latch);
h2.getCluster().addMembershipListener(listener);
h1.getCluster().addMembershipListener(listener);
client.getCluster().addMembershipListener(listener);
m1.setIntAttribute("Test2", 321);
// Force sleep to distribute value
assertOpenEventually(latch);
assertNotNull(member.getIntAttribute("Test2"));
assertEquals(321, (int) member.getIntAttribute("Test2"));
boolean found = false;
Collection<Member> members = client.getCluster().getMembers();
for (Member m : members) {
if (m.equals(m1)) {
assertEquals(321, (int) m.getIntAttribute("Test2"));
found = true;
}
}
assertTrue(found);
client.shutdown();
h1.shutdown();
h2.shutdown();
}
@Test(timeout = 120000)
public void testChangeAttributes() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1").setEnabled(true);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
Member m1 = h1.getCluster().getLocalMember();
m1.setIntAttribute("Test", 123);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h2.getCluster().getMembers().size());
Member member = null;
for (Member m : h2.getCluster().getMembers()) {
if (m == h2.getCluster().getLocalMember())
continue;
member = m;
}
assertNotNull(member);
assertEquals(m1, member);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(123, (int) member.getIntAttribute("Test"));
HazelcastInstance client = HazelcastClient.newHazelcastClient();
final CountDownLatch latch = new CountDownLatch(3);
final MembershipListener listener = new LatchMembershipListener(latch);
h2.getCluster().addMembershipListener(listener);
h1.getCluster().addMembershipListener(listener);
client.getCluster().addMembershipListener(listener);
m1.setIntAttribute("Test", 321);
// Force sleep to distribute value
assertOpenEventually(latch);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(321, (int) member.getIntAttribute("Test"));
boolean found = false;
Collection<Member> members = client.getCluster().getMembers();
for (Member m : members) {
if (m.equals(m1)) {
assertEquals(321, (int) m.getIntAttribute("Test"));
found = true;
}
}
assertTrue(found);
client.getLifecycleService().shutdown();
h1.shutdown();
h2.shutdown();
}
@Test(timeout = 120000)
public void testRemoveAttributes() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1").setEnabled(true);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
Member m1 = h1.getCluster().getLocalMember();
m1.setIntAttribute("Test", 123);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h2.getCluster().getMembers().size());
Member member = null;
for (Member m : h2.getCluster().getMembers()) {
if (m == h2.getCluster().getLocalMember())
continue;
member = m;
}
assertNotNull(member);
assertEquals(m1, member);
assertNotNull(member.getIntAttribute("Test"));
assertEquals(123, (int) member.getIntAttribute("Test"));
HazelcastInstance client = HazelcastClient.newHazelcastClient();
final CountDownLatch latch = new CountDownLatch(3);
final MembershipListener listener = new LatchMembershipListener(latch);
h2.getCluster().addMembershipListener(listener);
h1.getCluster().addMembershipListener(listener);
client.getCluster().addMembershipListener(listener);
m1.removeAttribute("Test");
// Force sleep to distribute value
assertOpenEventually(latch);
assertNull(member.getIntAttribute("Test"));
boolean found = false;
Collection<Member> members = client.getCluster().getMembers();
for (Member m : members) {
if (m.equals(m1)) {
assertNull(m.getIntAttribute("Test"));
found = true;
}
}
assertTrue(found);
client.shutdown();
h1.shutdown();
h2.shutdown();
}
private static class LatchMembershipListener implements MembershipListener {
private final CountDownLatch latch;
private LatchMembershipListener(CountDownLatch latch) {
this.latch = latch;
}
@Override
public void memberAdded(MembershipEvent membershipEvent) {
}
@Override
public void memberRemoved(MembershipEvent membershipEvent) {
}
@Override
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
latch.countDown();
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientMemberAttributeTest.java
|
488 |
public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearIndicesCacheRequest> {
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler = false;
private String[] fields = null;
private String[] filterKeys = null;
ClearIndicesCacheRequest() {
}
public ClearIndicesCacheRequest(String... indices) {
super(indices);
}
public boolean filterCache() {
return filterCache;
}
public ClearIndicesCacheRequest filterCache(boolean filterCache) {
this.filterCache = filterCache;
return this;
}
public boolean fieldDataCache() {
return this.fieldDataCache;
}
public ClearIndicesCacheRequest fieldDataCache(boolean fieldDataCache) {
this.fieldDataCache = fieldDataCache;
return this;
}
public ClearIndicesCacheRequest fields(String... fields) {
this.fields = fields;
return this;
}
public String[] fields() {
return this.fields;
}
public ClearIndicesCacheRequest filterKeys(String... filterKeys) {
this.filterKeys = filterKeys;
return this;
}
public String[] filterKeys() {
return this.filterKeys;
}
public boolean idCache() {
return this.idCache;
}
public ClearIndicesCacheRequest recycler(boolean recycler) {
this.recycler = recycler;
return this;
}
public boolean recycler() {
return this.recycler;
}
public ClearIndicesCacheRequest idCache(boolean idCache) {
this.idCache = idCache;
return this;
}
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
filterKeys = in.readStringArray();
}
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeStringArrayNullable(filterKeys);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_cache_clear_ClearIndicesCacheRequest.java
|
5,148 |
public final class BytesRefHash extends AbstractHash {
private LongArray startOffsets;
private ByteArray bytes;
private IntArray hashes; // we cache hashes for faster re-hashing
private final BytesRef spare;
// Constructor with configurable capacity and default maximum load factor.
public BytesRefHash(long capacity, PageCacheRecycler recycler) {
this(capacity, DEFAULT_MAX_LOAD_FACTOR, recycler);
}
//Constructor with configurable capacity and load factor.
public BytesRefHash(long capacity, float maxLoadFactor, PageCacheRecycler recycler) {
super(capacity, maxLoadFactor, recycler);
startOffsets = BigArrays.newLongArray(capacity + 1, recycler, false);
bytes = BigArrays.newByteArray(capacity * 3, recycler, false);
hashes = BigArrays.newIntArray(capacity, recycler, false);
spare = new BytesRef();
}
// BytesRef has a weak hashCode function so we try to improve it by rehashing using Murmur3
// Feel free to remove rehashing if BytesRef gets a better hash function
private static int rehash(int hash) {
return MurmurHash3.hash(hash);
}
/**
* Return the key at <code>0 <e; index <e; capacity()</code>. The result is undefined if the slot is unused.
*/
public BytesRef get(long id, BytesRef dest) {
final long startOffset = startOffsets.get(id);
final int length = (int) (startOffsets.get(id + 1) - startOffset);
bytes.get(startOffset, length, dest);
return dest;
}
/**
* Get the id associated with <code>key</code>
*/
public long find(BytesRef key, int code) {
final long slot = slot(rehash(code), mask);
for (long index = slot; ; index = nextSlot(index, mask)) {
final long id = id(index);
if (id == -1L || UnsafeUtils.equals(key, get(id, spare))) {
return id;
}
}
}
/** Sugar for {@link #find(BytesRef, int) find(key, key.hashCode()} */
public long find(BytesRef key) {
return find(key, key.hashCode());
}
private long set(BytesRef key, int code, long id) {
assert rehash(key.hashCode()) == code;
assert size < maxSize;
final long slot = slot(code, mask);
for (long index = slot; ; index = nextSlot(index, mask)) {
final long curId = id(index);
if (curId == -1) { // means unset
id(index, id);
append(id, key, code);
++size;
return id;
} else if (UnsafeUtils.equals(key, get(curId, spare))) {
return -1 - curId;
}
}
}
private void append(long id, BytesRef key, int code) {
assert size == id;
final long startOffset = startOffsets.get(size);
bytes = BigArrays.grow(bytes, startOffset + key.length);
bytes.set(startOffset, key.bytes, key.offset, key.length);
startOffsets = BigArrays.grow(startOffsets, size + 2);
startOffsets.set(size + 1, startOffset + key.length);
hashes = BigArrays.grow(hashes, id + 1);
hashes.set(id, code);
}
private boolean assertConsistent(long id, int code) {
get(id, spare);
return rehash(spare.hashCode()) == code;
}
private void reset(int code, long id) {
assert assertConsistent(id, code);
final long slot = slot(code, mask);
for (long index = slot; ; index = nextSlot(index, mask)) {
final long curId = id(index);
if (curId == -1) { // means unset
id(index, id);
break;
}
}
}
/**
* Try to add <code>key</code>. Return its newly allocated id if it wasn't in the hash table yet, or </code>-1-id</code>
* if it was already present in the hash table.
*/
public long add(BytesRef key, int code) {
if (size >= maxSize) {
assert size == maxSize;
grow();
}
assert size < maxSize;
return set(key, rehash(code), size);
}
/** Sugar to {@link #add(BytesRef, int) add(key, key.hashCode()}. */
public long add(BytesRef key) {
return add(key, key.hashCode());
}
@Override
protected void removeAndAdd(long index, long id) {
final int code = hashes.get(id);
reset(code, id);
}
@Override
public boolean release() {
boolean success = false;
try {
super.release();
success = true;
} finally {
Releasables.release(success, bytes, hashes, startOffsets);
}
return true;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_BytesRefHash.java
|
282 |
@SuppressWarnings("serial")
public class OCommandScript extends OCommandRequestTextAbstract {
private String language;
private CompiledScript compiledScript;
public OCommandScript() {
useCache = true;
}
public OCommandScript(final String iLanguage, final String iText) {
super(iText);
language = iLanguage;
useCache = true;
}
public OCommandScript(final String iText) {
super(iText);
}
public boolean isIdempotent() {
return false;
}
public String getLanguage() {
return language;
}
public OCommandScript setLanguage(String language) {
this.language = language;
return this;
}
public OSerializableStream fromStream(byte[] iStream) throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream(iStream);
language = buffer.getAsString();
fromStream(buffer);
return this;
}
public byte[] toStream() throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream();
buffer.set(language);
return toStream(buffer);
}
public void setCompiledScript(CompiledScript script) {
compiledScript = script;
}
public CompiledScript getCompiledScript() {
return compiledScript;
}
@Override
public String toString() {
if (language != null)
return language + "." + OIOUtils.getStringMaxLength(text, 200, "...");
return "script." + OIOUtils.getStringMaxLength(text, 200, "...");
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OCommandScript.java
|
3 |
Collection<Long> perm2Ids = BLCCollectionUtils.collect(perm2, new TypedTransformer<Long>() {
@Override
public Long transform(Object input) {
return ((ProductOptionValue) input).getId();
}
});
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_AdminCatalogServiceImpl.java
|
5,119 |
public class AggregationPhase implements SearchPhase {
private final AggregationParseElement parseElement;
private final AggregationBinaryParseElement binaryParseElement;
@Inject
public AggregationPhase(AggregationParseElement parseElement, AggregationBinaryParseElement binaryParseElement) {
this.parseElement = parseElement;
this.binaryParseElement = binaryParseElement;
}
@Override
public Map<String, ? extends SearchParseElement> parseElements() {
return ImmutableMap.<String, SearchParseElement>builder()
.put("aggregations", parseElement)
.put("aggs", parseElement)
.put("aggregations_binary", binaryParseElement)
.put("aggregationsBinary", binaryParseElement)
.put("aggs_binary", binaryParseElement)
.put("aggsBinary", binaryParseElement)
.build();
}
@Override
public void preProcess(SearchContext context) {
if (context.aggregations() != null) {
AggregationContext aggregationContext = new AggregationContext(context);
context.aggregations().aggregationContext(aggregationContext);
List<Aggregator> collectors = new ArrayList<Aggregator>();
Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext);
for (int i = 0; i < aggregators.length; i++) {
if (!(aggregators[i] instanceof GlobalAggregator)) {
Aggregator aggregator = aggregators[i];
if (aggregator.shouldCollect()) {
collectors.add(aggregator);
}
}
}
context.aggregations().aggregators(aggregators);
if (!collectors.isEmpty()) {
context.searcher().addMainQueryCollector(new AggregationsCollector(collectors, aggregationContext));
}
}
}
@Override
public void execute(SearchContext context) throws ElasticsearchException {
if (context.aggregations() == null) {
return;
}
if (context.queryResult().aggregations() != null) {
// no need to compute the facets twice, they should be computed on a per context basis
return;
}
Aggregator[] aggregators = context.aggregations().aggregators();
boolean success = false;
try {
List<Aggregator> globals = new ArrayList<Aggregator>();
for (int i = 0; i < aggregators.length; i++) {
if (aggregators[i] instanceof GlobalAggregator) {
globals.add(aggregators[i]);
}
}
// optimize the global collector based execution
if (!globals.isEmpty()) {
AggregationsCollector collector = new AggregationsCollector(globals, context.aggregations().aggregationContext());
Query query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
Filter searchFilter = context.searchFilter(context.types());
if (searchFilter != null) {
query = new XFilteredQuery(query, searchFilter);
}
try {
context.searcher().search(query, collector);
} catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
}
collector.postCollection();
}
List<InternalAggregation> aggregations = new ArrayList<InternalAggregation>(aggregators.length);
for (Aggregator aggregator : context.aggregations().aggregators()) {
aggregations.add(aggregator.buildAggregation(0));
}
context.queryResult().aggregations(new InternalAggregations(aggregations));
success = true;
} finally {
Releasables.release(success, aggregators);
}
}
public static class AggregationsCollector extends XCollector {
private final AggregationContext aggregationContext;
private final Aggregator[] collectors;
public AggregationsCollector(Collection<Aggregator> collectors, AggregationContext aggregationContext) {
this.collectors = collectors.toArray(new Aggregator[collectors.size()]);
this.aggregationContext = aggregationContext;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
aggregationContext.setScorer(scorer);
}
@Override
public void collect(int doc) throws IOException {
for (Aggregator collector : collectors) {
collector.collect(doc, 0);
}
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
aggregationContext.setNextReader(context);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
@Override
public void postCollection() {
for (Aggregator collector : collectors) {
collector.postCollection();
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_AggregationPhase.java
|
1,072 |
public class VertexLongList implements VertexListInternal {
private final StandardTitanTx tx;
private LongArrayList vertices;
private boolean sorted;
public VertexLongList(StandardTitanTx tx, LongArrayList vertices, boolean sorted) {
assert !sorted || AbstractLongListUtil.isSorted(vertices);
this.tx = tx;
this.vertices = vertices;
this.sorted = sorted;
}
@Override
public void add(TitanVertex n) {
if (!vertices.isEmpty()) sorted = sorted && vertices.get(vertices.size()-1)<=n.getLongId();
vertices.add(n.getLongId());
}
@Override
public long getID(int pos) {
return vertices.get(pos);
}
@Override
public LongArrayList getIDs() {
return vertices;
}
@Override
public TitanVertex get(int pos) {
return tx.getInternalVertex(getID(pos));
}
@Override
public void sort() {
if (sorted) return;
Arrays.sort(vertices.buffer,0,vertices.size());
sorted = true;
}
@Override
public boolean isSorted() {
return sorted;
}
@Override
public VertexList subList(int fromPosition, int length) {
LongArrayList subList = new LongArrayList(length);
subList.add(vertices.buffer, fromPosition, length);
assert subList.size()==length;
return new VertexLongList(tx,subList,sorted);
}
@Override
public int size() {
return vertices.size();
}
@Override
public void addAll(VertexList vertexlist) {
LongArrayList othervertexids = null;
if (vertexlist instanceof VertexLongList) {
othervertexids = ((VertexLongList) vertexlist).vertices;
} else if (vertexlist instanceof VertexArrayList) {
VertexArrayList other = (VertexArrayList) vertexlist;
othervertexids = new LongArrayList(other.size());
for (int i = 0; i < other.size(); i++) othervertexids.add(other.getID(i));
} else {
throw new IllegalArgumentException("Unsupported vertex-list: " + vertexlist.getClass());
}
if (sorted && vertexlist.isSorted()) {
//Merge join
vertices = AbstractLongListUtil.mergeSort(vertices,othervertexids);
} else {
sorted = false;
vertices.add(othervertexids.buffer, 0, othervertexids.size());
}
}
public VertexArrayList toVertexArrayList() {
VertexArrayList list = new VertexArrayList(tx);
for (int i=0;i<vertices.size();i++) {
list.add(get(i));
}
return list;
}
@Override
public Iterator<TitanVertex> iterator() {
return new Iterator<TitanVertex>() {
private int pos = -1;
@Override
public boolean hasNext() {
return (pos + 1) < size();
}
@Override
public TitanVertex next() {
pos++;
return get(pos);
}
@Override
public void remove() {
throw new UnsupportedOperationException("Vertices cannot be removed from neighborhood list");
}
};
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_query_vertex_VertexLongList.java
|
46 |
static final class PackageDescriptorProposal extends CompletionProposal {
PackageDescriptorProposal(int offset, String prefix, String packageName) {
super(offset, prefix, PACKAGE,
"package " + packageName,
"package " + packageName + ";");
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_PackageCompletions.java
|
4 |
public class CeylonAndroidPlugin extends AbstractUIPlugin {
public static final String PLUGIN_ID = "com.redhat.ceylon.eclipse.android.plugin";
private static CeylonAndroidPlugin plugin;
@Override
public void start(BundleContext context) throws Exception {
super.start(context);
plugin = this;
}
@Override
public void stop(BundleContext context) throws Exception {
plugin = null;
super.stop(context);
}
public static CeylonAndroidPlugin getDefault() {
return plugin;
}
public static void logInfo(String msg) {
plugin.getLog().log(new Status(IStatus.INFO, PLUGIN_ID, msg));
}
public static void logInfo(String msg, IOException e) {
plugin.getLog().log(new Status(IStatus.INFO, PLUGIN_ID, msg, e));
}
public static void logError(String msg, Exception e) {
plugin.getLog().log(new Status(IStatus.ERROR, PLUGIN_ID, msg, e));
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.android.plugin_src_com_redhat_ceylon_eclipse_android_plugin_CeylonAndroidPlugin.java
|
715 |
class ShardCountRequest extends BroadcastShardOperationRequest {
private float minScore;
private BytesReference querySource;
private String[] types = Strings.EMPTY_ARRAY;
private long nowInMillis;
@Nullable
private String[] filteringAliases;
ShardCountRequest() {
}
public ShardCountRequest(String index, int shardId, @Nullable String[] filteringAliases, CountRequest request) {
super(index, shardId, request);
this.minScore = request.minScore();
this.querySource = request.source();
this.types = request.types();
this.filteringAliases = filteringAliases;
this.nowInMillis = request.nowInMillis;
}
public float minScore() {
return minScore;
}
public BytesReference querySource() {
return querySource;
}
public String[] types() {
return this.types;
}
public String[] filteringAliases() {
return filteringAliases;
}
public long nowInMillis() {
return this.nowInMillis;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
minScore = in.readFloat();
querySource = in.readBytesReference();
int typesSize = in.readVInt();
if (typesSize > 0) {
types = new String[typesSize];
for (int i = 0; i < typesSize; i++) {
types[i] = in.readString();
}
}
int aliasesSize = in.readVInt();
if (aliasesSize > 0) {
filteringAliases = new String[aliasesSize];
for (int i = 0; i < aliasesSize; i++) {
filteringAliases[i] = in.readString();
}
}
nowInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeFloat(minScore);
out.writeBytesReference(querySource);
out.writeVInt(types.length);
for (String type : types) {
out.writeString(type);
}
if (filteringAliases != null) {
out.writeVInt(filteringAliases.length);
for (String alias : filteringAliases) {
out.writeString(alias);
}
} else {
out.writeVInt(0);
}
out.writeVLong(nowInMillis);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_count_ShardCountRequest.java
|
55 |
@SuppressWarnings("serial")
static final class ForEachMappingTask<K,V>
extends BulkTask<K,V,Void> {
final BiAction<? super K, ? super V> action;
ForEachMappingTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiAction<? super K,? super V> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final BiAction<? super K, ? super V> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachMappingTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
action.apply(p.key, p.val);
propagateCompletion();
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
293 |
public class OTraverse implements OCommand, Iterable<OIdentifiable>, Iterator<OIdentifiable> {
private OTraverseContext context = new OTraverseContext();
private OCommandPredicate predicate;
private Iterator<? extends OIdentifiable> target;
private List<Object> fields = new ArrayList<Object>();
private long resultCount = 0;
private long limit = 0;
private OIdentifiable lastTraversed;
private STRATEGY strategy = STRATEGY.DEPTH_FIRST;
public enum STRATEGY {
DEPTH_FIRST, BREADTH_FIRST
};
/*
* Executes a traverse collecting all the result in the returning List<OIdentifiable>. This could be memory expensive because for
* large results the list could be huge. it's always better to use it as an Iterable and lazy fetch each result on next() call.
*
* @see com.orientechnologies.orient.core.command.OCommand#execute()
*/
public List<OIdentifiable> execute() {
final List<OIdentifiable> result = new ArrayList<OIdentifiable>();
while (hasNext())
result.add(next());
return result;
}
public OTraverseAbstractProcess<?> currentProcess() {
return (OTraverseAbstractProcess<?>) context.peek();
}
public boolean hasNext() {
if (limit > 0 && resultCount >= limit)
return false;
if (lastTraversed == null)
// GET THE NEXT
lastTraversed = next();
if (lastTraversed == null && context.peek() != null)
throw new IllegalStateException("Traverse ended abnormally");
if (!context.checkTimeout())
return false;
// BROWSE ALL THE RECORDS
return lastTraversed != null;
}
public OIdentifiable next() {
if (Thread.interrupted())
throw new OCommandExecutionException("The traverse execution has been interrupted");
if (lastTraversed != null) {
// RETURN LATEST AND RESET IT
final OIdentifiable result = lastTraversed;
lastTraversed = null;
return result;
}
if (limit > 0 && resultCount >= limit)
return null;
OIdentifiable result;
OTraverseAbstractProcess<?> toProcess;
// RESUME THE LAST PROCESS
while ((toProcess = currentProcess()) != null) {
result = (OIdentifiable) toProcess.process();
if (result != null) {
resultCount++;
return result;
}
}
return null;
}
public void remove() {
throw new UnsupportedOperationException("remove()");
}
public Iterator<OIdentifiable> iterator() {
return this;
}
public OTraverseContext getContext() {
return context;
}
public OTraverse target(final Iterable<? extends OIdentifiable> iTarget) {
return target(iTarget.iterator());
}
public OTraverse target(final OIdentifiable... iRecords) {
final List<OIdentifiable> list = new ArrayList<OIdentifiable>();
for (OIdentifiable id : iRecords)
list.add(id);
return target(list.iterator());
}
@SuppressWarnings("unchecked")
public OTraverse target(final Iterator<? extends OIdentifiable> iTarget) {
target = iTarget;
context.reset();
new OTraverseRecordSetProcess(this, (Iterator<OIdentifiable>) target);
return this;
}
public Iterator<? extends OIdentifiable> getTarget() {
return target;
}
public OTraverse predicate(final OCommandPredicate iPredicate) {
predicate = iPredicate;
return this;
}
public OCommandPredicate getPredicate() {
return predicate;
}
public OTraverse field(final Object iField) {
if (!fields.contains(iField))
fields.add(iField);
return this;
}
public OTraverse fields(final Collection<Object> iFields) {
for (Object f : iFields)
field(f);
return this;
}
public OTraverse fields(final String... iFields) {
for (String f : iFields)
field(f);
return this;
}
public List<Object> getFields() {
return fields;
}
public long getLimit() {
return limit;
}
public OTraverse limit(final long iLimit) {
if (iLimit < -1)
throw new IllegalArgumentException("Limit cannot be negative. 0 = infinite");
this.limit = iLimit;
return this;
}
@Override
public String toString() {
return String.format("OTraverse.target(%s).fields(%s).limit(%d).predicate(%s)", target, fields, limit, predicate);
}
public long getResultCount() {
return resultCount;
}
public OIdentifiable getLastTraversed() {
return lastTraversed;
}
public STRATEGY getStrategy() {
return strategy;
}
public void setStrategy(STRATEGY strategy) {
this.strategy = strategy;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverse.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.