Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
1,949 |
@Repository("blCustomerDao")
public class CustomerDaoImpl implements CustomerDao {
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
public Customer readCustomerById(Long id) {
return em.find(CustomerImpl.class, id);
}
public Customer readCustomerByUsername(String username) {
List<Customer> customers = readCustomersByUsername(username);
return customers == null || customers.isEmpty() ? null : customers.get(0);
}
@SuppressWarnings("unchecked")
public List<Customer> readCustomersByUsername(String username) {
Query query = em.createNamedQuery("BC_READ_CUSTOMER_BY_USER_NAME");
query.setParameter("username", username);
return query.getResultList();
}
public Customer readCustomerByEmail(String emailAddress) {
List<Customer> customers = readCustomersByEmail(emailAddress);
return customers == null || customers.isEmpty() ? null : customers.get(0);
}
@SuppressWarnings("unchecked")
public List<Customer> readCustomersByEmail(String emailAddress) {
Query query = em.createNamedQuery("BC_READ_CUSTOMER_BY_EMAIL");
query.setParameter("email", emailAddress);
return query.getResultList();
}
public Customer save(Customer customer) {
return em.merge(customer);
}
public Customer create() {
Customer customer = (Customer) entityConfiguration.createEntityInstance(Customer.class.getName());
return customer;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_dao_CustomerDaoImpl.java
|
24 |
@Controller("blAdminProductController")
@RequestMapping("/" + AdminProductController.SECTION_KEY)
public class AdminProductController extends AdminBasicEntityController {
protected static final String SECTION_KEY = "product";
@Override
protected String getSectionKey(Map<String, String> pathVars) {
//allow external links to work for ToOne items
if (super.getSectionKey(pathVars) != null) {
return super.getSectionKey(pathVars);
}
return SECTION_KEY;
}
@Override
public String[] getSectionCustomCriteria() {
return new String[]{"productDirectEdit"};
}
protected String showAddAdditionalSku(HttpServletRequest request, HttpServletResponse response, Model model,
String id) throws Exception {
String collectionField = "additionalSkus";
String mainClassName = getClassNameForSection(SECTION_KEY);
ClassMetadata mainMetadata = service.getClassMetadata(getSectionPersistencePackageRequest(mainClassName));
Property collectionProperty = mainMetadata.getPMap().get(collectionField);
FieldMetadata md = collectionProperty.getMetadata();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withCustomCriteria(new String[] { id });
BasicCollectionMetadata fmd = (BasicCollectionMetadata) md;
ClassMetadata cmd = service.getClassMetadata(ppr);
// If the entity type isn't specified, we need to determine if there are various polymorphic types
// for this entity.
String entityType = null;
if (request.getParameter("entityType") != null) {
entityType = request.getParameter("entityType");
}
if (StringUtils.isBlank(entityType)) {
if (cmd.getPolymorphicEntities().getChildren().length == 0) {
entityType = cmd.getPolymorphicEntities().getFullyQualifiedClassname();
} else {
entityType = getDefaultEntityType();
}
} else {
entityType = URLDecoder.decode(entityType, "UTF-8");
}
if (StringUtils.isBlank(entityType)) {
List<ClassTree> entityTypes = getAddEntityTypes(cmd.getPolymorphicEntities());
model.addAttribute("entityTypes", entityTypes);
model.addAttribute("viewType", "modal/entityTypeSelection");
model.addAttribute("entityFriendlyName", cmd.getPolymorphicEntities().getFriendlyName());
String requestUri = request.getRequestURI();
if (!request.getContextPath().equals("/") && requestUri.startsWith(request.getContextPath())) {
requestUri = requestUri.substring(request.getContextPath().length() + 1, requestUri.length());
}
model.addAttribute("currentUri", requestUri);
model.addAttribute("modalHeaderType", "addEntity");
setModelAttributes(model, SECTION_KEY);
return "modules/modalContainer";
} else {
ppr = ppr.withCeilingEntityClassname(entityType);
}
ClassMetadata collectionMetadata = service.getClassMetadata(ppr);
EntityForm entityForm = formService.createEntityForm(collectionMetadata);
entityForm.setCeilingEntityClassname(ppr.getCeilingEntityClassname());
entityForm.setEntityType(ppr.getCeilingEntityClassname());
formService.removeNonApplicableFields(collectionMetadata, entityForm, ppr.getCeilingEntityClassname());
entityForm.removeAction(DefaultEntityFormActions.DELETE);
removeRequiredValidation(entityForm);
model.addAttribute("entityForm", entityForm);
model.addAttribute("viewType", "modal/simpleAddEntity");
model.addAttribute("currentUrl", request.getRequestURL().toString());
model.addAttribute("modalHeaderType", "addCollectionItem");
model.addAttribute("collectionProperty", collectionProperty);
setModelAttributes(model, SECTION_KEY);
return "modules/modalContainer";
}
@Override
protected String buildAddCollectionItemModel(HttpServletRequest request, HttpServletResponse response,
Model model,
String id,
String collectionField,
String sectionKey,
Property collectionProperty,
FieldMetadata md, PersistencePackageRequest ppr, EntityForm entityForm, Entity entity) throws ServiceException {
if ("additionalSkus".equals(collectionField) && ppr.getCustomCriteria().length == 0) {
ppr.withCustomCriteria(new String[] { id });
}
return super.buildAddCollectionItemModel(request, response, model, id, collectionField, sectionKey, collectionProperty, md, ppr, entityForm, entity);
}
protected String showUpdateAdditionalSku(HttpServletRequest request, HttpServletResponse response, Model model,
String id,
String collectionItemId) throws Exception {
String collectionField = "additionalSkus";
// Find out metadata for the additionalSkus property
String mainClassName = getClassNameForSection(SECTION_KEY);
ClassMetadata mainMetadata = service.getClassMetadata(getSectionPersistencePackageRequest(mainClassName));
Property collectionProperty = mainMetadata.getPMap().get(collectionField);
FieldMetadata md = collectionProperty.getMetadata();
// Find the metadata and the entity for the selected sku
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withCustomCriteria(new String[] { id });
ClassMetadata collectionMetadata = service.getClassMetadata(ppr);
if (collectionMetadata.getCeilingType().equals(SkuImpl.class.getName())) {
collectionMetadata.setCeilingType(Sku.class.getName());
}
Entity entity = service.getRecord(ppr, collectionItemId, collectionMetadata, true);
// Find the records for all subcollections of Sku
Map<String, DynamicResultSet> subRecordsMap = service.getRecordsForAllSubCollections(ppr, entity);
// Build the entity form for the modal that includes the subcollections
EntityForm entityForm = formService.createEntityForm(collectionMetadata, entity, subRecordsMap);
entityForm.removeAction(DefaultEntityFormActions.DELETE);
// Ensure that operations on the Sku subcollections go to the proper URL
for (ListGrid lg : entityForm.getAllListGrids()) {
lg.setSectionKey("org.broadleafcommerce.core.catalog.domain.Sku");
}
removeRequiredValidation(entityForm);
model.addAttribute("entityForm", entityForm);
model.addAttribute("viewType", "modal/simpleEditEntity");
model.addAttribute("currentUrl", request.getRequestURL().toString());
model.addAttribute("modalHeaderType", "updateCollectionItem");
model.addAttribute("collectionProperty", collectionProperty);
setModelAttributes(model, SECTION_KEY);
return "modules/modalContainer";
}
@Override
@RequestMapping(value = "/{id}/{collectionField:.*}/{collectionItemId}", method = RequestMethod.GET)
public String showUpdateCollectionItem(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id,
@PathVariable(value="collectionField") String collectionField,
@PathVariable(value="collectionItemId") String collectionItemId) throws Exception {
if ("additionalSkus".equals(collectionField)) {
return showUpdateAdditionalSku(request, response, model, id, collectionItemId);
}
return super.showUpdateCollectionItem(request, response, model, pathVars, id, collectionField, collectionItemId);
}
@Override
@RequestMapping(value = "/{id}/{collectionField}/add", method = RequestMethod.GET)
public String showAddCollectionItem(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id,
@PathVariable(value="collectionField") String collectionField,
@RequestParam MultiValueMap<String, String> requestParams) throws Exception {
if ("additionalSkus".equals(collectionField)) {
return showAddAdditionalSku(request, response, model, id);
}
return super.showAddCollectionItem(request, response, model, pathVars, id, collectionField, requestParams);
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
public String viewEntityForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value = "id") String id) throws Exception {
String view = super.viewEntityForm(request, response, model, pathVars, id);
//Skus have a specific toolbar action to generate Skus based on permutations
EntityForm form = (EntityForm) model.asMap().get("entityForm");
ListGridAction generateSkusAction = new ListGridAction(ListGridAction.GEN_SKUS).withDisplayText("Generate_Skus")
.withIconClass("icon-fighter-jet")
.withButtonClass("generate-skus")
.withUrlPostfix("/generate-skus");
ListGrid skusGrid = form.findListGrid("additionalSkus");
if (skusGrid != null) {
skusGrid.addToolbarAction(generateSkusAction);
skusGrid.setCanFilterAndSort(false);
}
// When we're dealing with product bundles, we don't want to render the product options and additional skus
// list grids. Remove them from the form.
if (ProductBundle.class.isAssignableFrom(Class.forName(form.getEntityType()))) {
form.removeListGrid("additionalSkus");
form.removeListGrid("productOptions");
}
form.removeListGrid("defaultSku.skuAttributes");
return view;
}
/**
* Clears out any required validation on the fields within an entity form. Used for additional Skus since none of those
* fields should be required.
*
* @param entityForm
*/
protected void removeRequiredValidation(EntityForm entityForm) {
for (Field field : entityForm.getFields().values()) {
field.setRequired(false);
}
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_controller_entity_AdminProductController.java
|
482 |
public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<AnalyzeRequest, AnalyzeResponse, AnalyzeRequestBuilder> {
public AnalyzeRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new AnalyzeRequest());
}
public AnalyzeRequestBuilder(IndicesAdminClient indicesClient, String index, String text) {
super((InternalIndicesAdminClient) indicesClient, new AnalyzeRequest(index, text));
}
/**
* Sets the index to use to analyzer the text (for example, if it holds specific analyzers
* registered).
*/
public AnalyzeRequestBuilder setIndex(String index) {
request.index(index);
return this;
}
/**
* Sets the analyzer name to use in order to analyze the text.
*
* @param analyzer The analyzer name.
*/
public AnalyzeRequestBuilder setAnalyzer(String analyzer) {
request.analyzer(analyzer);
return this;
}
/**
* Sets the field that its analyzer will be used to analyze the text. Note, requires an index
* to be set.
*/
public AnalyzeRequestBuilder setField(String field) {
request.field(field);
return this;
}
/**
* Instead of setting the analyzer, sets the tokenizer that will be used as part of a custom
* analyzer.
*/
public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
request.tokenizer(tokenizer);
return this;
}
/**
* Sets token filters that will be used on top of a tokenizer provided.
*/
public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
request.tokenFilters(tokenFilters);
return this;
}
@Override
protected void doExecute(ActionListener<AnalyzeResponse> listener) {
((IndicesAdminClient) client).analyze(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeRequestBuilder.java
|
337 |
public class NodesRestartResponse extends NodesOperationResponse<NodesRestartResponse.NodeRestartResponse> {
NodesRestartResponse() {
}
public NodesRestartResponse(ClusterName clusterName, NodeRestartResponse[] nodes) {
super(clusterName, nodes);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeRestartResponse[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeRestartResponse.readNodeRestartResponse(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeRestartResponse node : nodes) {
node.writeTo(out);
}
}
public static class NodeRestartResponse extends NodeOperationResponse {
NodeRestartResponse() {
}
public NodeRestartResponse(DiscoveryNode node) {
super(node);
}
public static NodeRestartResponse readNodeRestartResponse(StreamInput in) throws IOException {
NodeRestartResponse res = new NodeRestartResponse();
res.readFrom(in);
return res;
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_NodesRestartResponse.java
|
4,211 |
public class StoreFileMetaData implements Streamable {
private String name;
// the actual file size on "disk", if compressed, the compressed size
private long length;
private String checksum;
private transient Directory directory;
private StoreFileMetaData() {
}
public StoreFileMetaData(String name, long length, String checksum) {
this(name, length, checksum, null);
}
public StoreFileMetaData(String name, long length, String checksum, @Nullable Directory directory) {
this.name = name;
this.length = length;
this.checksum = checksum;
this.directory = directory;
}
public Directory directory() {
return this.directory;
}
public String name() {
return name;
}
/**
* the actual file size on "disk", if compressed, the compressed size
*/
public long length() {
return length;
}
@Nullable
public String checksum() {
return this.checksum;
}
public boolean isSame(StoreFileMetaData other) {
if (checksum == null || other.checksum == null) {
return false;
}
return length == other.length && checksum.equals(other.checksum);
}
public static StoreFileMetaData readStoreFileMetaData(StreamInput in) throws IOException {
StoreFileMetaData md = new StoreFileMetaData();
md.readFrom(in);
return md;
}
@Override
public String toString() {
return "name [" + name + "], length [" + length + "], checksum [" + checksum + "]";
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
length = in.readVLong();
if (in.readBoolean()) {
checksum = in.readString();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVLong(length);
if (checksum == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(checksum);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_store_StoreFileMetaData.java
|
3,401 |
public final class ExecutionServiceImpl implements ExecutionService {
private final NodeEngineImpl nodeEngine;
private final ExecutorService cachedExecutorService;
private final ScheduledExecutorService scheduledExecutorService;
private final ScheduledExecutorService defaultScheduledExecutorServiceDelegate;
private final ILogger logger;
private final CompletableFutureTask completableFutureTask;
private final ConcurrentMap<String, ManagedExecutorService> executors
= new ConcurrentHashMap<String, ManagedExecutorService>();
public ExecutionServiceImpl(NodeEngineImpl nodeEngine) {
this.nodeEngine = nodeEngine;
final Node node = nodeEngine.getNode();
logger = node.getLogger(ExecutionService.class.getName());
final ClassLoader classLoader = node.getConfigClassLoader();
final ThreadFactory threadFactory = new PoolExecutorThreadFactory(node.threadGroup,
node.getThreadPoolNamePrefix("cached"), classLoader);
cachedExecutorService = new ThreadPoolExecutor(
3, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(), threadFactory, new RejectedExecutionHandler() {
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
if (logger.isFinestEnabled()) {
logger.finest("Node is shutting down; discarding the task: " + r);
}
}
}
);
final String scheduledThreadName = node.getThreadNamePrefix("scheduled");
scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new SingleExecutorThreadFactory(node.threadGroup, classLoader, scheduledThreadName));
enableRemoveOnCancelIfAvailable();
final int coreSize = Runtime.getRuntime().availableProcessors();
// default executors
register(SYSTEM_EXECUTOR, coreSize, Integer.MAX_VALUE, ExecutorType.CACHED);
register(SCHEDULED_EXECUTOR, coreSize * 5, coreSize * 100000, ExecutorType.CACHED);
defaultScheduledExecutorServiceDelegate = getScheduledExecutor(SCHEDULED_EXECUTOR);
// Register CompletableFuture task
completableFutureTask = new CompletableFutureTask();
scheduleWithFixedDelay(completableFutureTask, 1000, 100, TimeUnit.MILLISECONDS);
}
private void enableRemoveOnCancelIfAvailable() {
try {
final Method m = scheduledExecutorService.getClass().getMethod("setRemoveOnCancelPolicy", boolean.class);
m.invoke(scheduledExecutorService, true);
} catch (NoSuchMethodException ignored) {
} catch (InvocationTargetException ignored) {
} catch (IllegalAccessException ignored) {
}
}
@Override
public ManagedExecutorService register(String name, int poolSize, int queueCapacity, ExecutorType type) {
ExecutorConfig cfg = nodeEngine.getConfig().getExecutorConfigs().get(name);
if (cfg != null) {
poolSize = cfg.getPoolSize();
queueCapacity = cfg.getQueueCapacity() <= 0 ? Integer.MAX_VALUE : cfg.getQueueCapacity();
}
ManagedExecutorService executor = createExecutor(name, poolSize, queueCapacity, type);
if (executors.putIfAbsent(name, executor) != null) {
throw new IllegalArgumentException("ExecutorService['" + name + "'] already exists!");
}
return executor;
}
private final ConstructorFunction<String, ManagedExecutorService> constructor =
new ConstructorFunction<String, ManagedExecutorService>() {
public ManagedExecutorService createNew(String name) {
final ExecutorConfig cfg = nodeEngine.getConfig().findExecutorConfig(name);
final int queueCapacity = cfg.getQueueCapacity() <= 0 ? Integer.MAX_VALUE : cfg.getQueueCapacity();
return createExecutor(name, cfg.getPoolSize(), queueCapacity, ExecutorType.CACHED);
}
};
private ManagedExecutorService createExecutor(String name, int poolSize, int queueCapacity, ExecutorType type) {
ManagedExecutorService executor;
if (type == ExecutorType.CACHED) {
executor = new CachedExecutorServiceDelegate(nodeEngine, name, cachedExecutorService, poolSize, queueCapacity);
} else if (type == ExecutorType.CONCRETE) {
Node node = nodeEngine.getNode();
String internalName = name.startsWith("hz:") ? name.substring(3) : name;
NamedThreadPoolExecutor pool = new NamedThreadPoolExecutor(name, poolSize, poolSize,
60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(queueCapacity),
new PoolExecutorThreadFactory(node.threadGroup,
node.getThreadPoolNamePrefix(internalName), node.getConfigClassLoader())
);
pool.allowCoreThreadTimeOut(true);
executor = pool;
} else {
throw new IllegalArgumentException("Unknown executor type: " + type);
}
return executor;
}
@Override
public ManagedExecutorService getExecutor(String name) {
return ConcurrencyUtil.getOrPutIfAbsent(executors, name, constructor);
}
@Override
public <V> ICompletableFuture<V> asCompletableFuture(Future<V> future) {
if (future == null) {
throw new IllegalArgumentException("future must not be null");
}
if (future instanceof ICompletableFuture) {
return (ICompletableFuture<V>) future;
}
return registerCompletableFuture(future);
}
@Override
public void execute(String name, Runnable command) {
getExecutor(name).execute(command);
}
@Override
public Future<?> submit(String name, Runnable task) {
return getExecutor(name).submit(task);
}
@Override
public <T> Future<T> submit(String name, Callable<T> task) {
return getExecutor(name).submit(task);
}
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
return defaultScheduledExecutorServiceDelegate.schedule(command, delay, unit);
}
@Override
public ScheduledFuture<?> schedule(String name, Runnable command, long delay, TimeUnit unit) {
return getScheduledExecutor(name).schedule(command, delay, unit);
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
return defaultScheduledExecutorServiceDelegate.scheduleAtFixedRate(command, initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(String name, Runnable command, long initialDelay,
long period, TimeUnit unit) {
return getScheduledExecutor(name).scheduleAtFixedRate(command, initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long period, TimeUnit unit) {
return defaultScheduledExecutorServiceDelegate.scheduleWithFixedDelay(command, initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(String name, Runnable command, long initialDelay,
long period, TimeUnit unit) {
return getScheduledExecutor(name).scheduleWithFixedDelay(command, initialDelay, period, unit);
}
@Override
public ScheduledExecutorService getDefaultScheduledExecutor() {
return defaultScheduledExecutorServiceDelegate;
}
@Override
public ScheduledExecutorService getScheduledExecutor(String name) {
return new ScheduledExecutorServiceDelegate(scheduledExecutorService, getExecutor(name));
}
@PrivateApi
void shutdown() {
logger.finest("Stopping executors...");
cachedExecutorService.shutdown();
scheduledExecutorService.shutdownNow();
try {
cachedExecutorService.awaitTermination(3, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.finest(e);
}
for (ExecutorService executorService : executors.values()) {
executorService.shutdown();
}
executors.clear();
}
@Override
public void shutdownExecutor(String name) {
final ExecutorService ex = executors.remove(name);
if (ex != null) {
ex.shutdown();
}
}
private <V> ICompletableFuture<V> registerCompletableFuture(Future<V> future) {
CompletableFutureEntry<V> entry = new CompletableFutureEntry<V>(future, nodeEngine, completableFutureTask);
completableFutureTask.registerCompletableFutureEntry(entry);
return entry.completableFuture;
}
private static class CompletableFutureTask implements Runnable {
private final List<CompletableFutureEntry> entries = new ArrayList<CompletableFutureEntry>();
private final Lock entriesLock = new ReentrantLock();
private <V> void registerCompletableFutureEntry(CompletableFutureEntry<V> entry) {
entriesLock.lock();
try {
entries.add(entry);
} finally {
entriesLock.unlock();
}
}
@Override
public void run() {
if (entries.isEmpty()) {
return;
}
CompletableFutureEntry[] copy;
entriesLock.lock();
try {
copy = new CompletableFutureEntry[entries.size()];
copy = this.entries.toArray(copy);
} finally {
entriesLock.unlock();
}
List<CompletableFutureEntry> removes = null;
for (CompletableFutureEntry entry : copy) {
if (entry.processState()) {
if (removes == null) {
removes = new ArrayList<CompletableFutureEntry>(copy.length / 2);
}
removes.add(entry);
}
}
// Remove processed elements
if (removes != null && !removes.isEmpty()) {
entriesLock.lock();
try {
for (int i = 0; i < removes.size(); i++) {
entries.remove(removes.get(i));
}
} finally {
entriesLock.unlock();
}
}
}
}
static class CompletableFutureEntry<V> {
private final BasicCompletableFuture<V> completableFuture;
private final CompletableFutureTask completableFutureTask;
private CompletableFutureEntry(Future<V> future, NodeEngine nodeEngine,
CompletableFutureTask completableFutureTask) {
this.completableFutureTask = completableFutureTask;
this.completableFuture = new BasicCompletableFuture<V>(future, nodeEngine);
}
private boolean processState() {
if (completableFuture.isDone()) {
Object result;
try {
result = completableFuture.future.get();
} catch (Throwable t) {
result = t;
}
completableFuture.setResult(result);
return true;
}
return false;
}
}
private static class ScheduledExecutorServiceDelegate implements ScheduledExecutorService {
private final ScheduledExecutorService scheduledExecutorService;
private final ExecutorService executor;
private ScheduledExecutorServiceDelegate(ScheduledExecutorService scheduledExecutorService, ExecutorService executor) {
this.scheduledExecutorService = scheduledExecutorService;
this.executor = executor;
}
@Override
public void execute(Runnable command) {
executor.execute(command);
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return executor.submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return executor.submit(task, result);
}
@Override
public Future<?> submit(Runnable task) {
return executor.submit(task);
}
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
return scheduledExecutorService.schedule(new ScheduledTaskRunner(command, executor), delay, unit);
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
return scheduledExecutorService.scheduleAtFixedRate(new ScheduledTaskRunner(command, executor), initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) {
return scheduledExecutorService.scheduleWithFixedDelay(new ScheduledTaskRunner(command, executor), initialDelay, delay, unit);
}
@Override
public void shutdown() {
throw new UnsupportedOperationException();
}
@Override
public List<Runnable> shutdownNow() {
throw new UnsupportedOperationException();
}
@Override
public boolean isShutdown() {
return false;
}
@Override
public boolean isTerminated() {
return false;
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
throw new UnsupportedOperationException();
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
throw new UnsupportedOperationException();
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException {
throw new UnsupportedOperationException();
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
throw new UnsupportedOperationException();
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
throw new UnsupportedOperationException();
}
@Override
public <V> ScheduledFuture<V> schedule(final Callable<V> callable, long delay, TimeUnit unit) {
throw new UnsupportedOperationException();
}
}
static class BasicCompletableFuture<V> extends AbstractCompletableFuture<V> {
private final Future<V> future;
BasicCompletableFuture(Future<V> future, NodeEngine nodeEngine) {
super(nodeEngine, nodeEngine.getLogger(BasicCompletableFuture.class));
this.future = future;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return future.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return future.isCancelled();
}
@Override
public boolean isDone() {
boolean done = future.isDone();
if (done && !super.isDone()) {
forceSetResult();
return true;
}
return done || super.isDone();
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
V result = future.get(timeout, unit);
// If not yet set by CompletableFuture task runner, we can go for it!
if (!super.isDone()) {
setResult(result);
}
return result;
}
private void forceSetResult() {
Object result;
try {
result = future.get();
} catch (Throwable t) {
result = t;
}
setResult(result);
}
}
private static class ScheduledTaskRunner implements Runnable {
private final Executor executor;
private final Runnable runnable;
public ScheduledTaskRunner(Runnable runnable, Executor executor) {
this.executor = executor;
this.runnable = runnable;
}
@Override
public void run() {
try {
executor.execute(runnable);
} catch (Throwable t) {
ExceptionUtil.sneakyThrow(t);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ExecutionServiceImpl.java
|
240 |
highlighter = new XPostingsHighlighter() {
@Override
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
return new Passage[0];
}
};
| 0true
|
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
|
1,976 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CUSTOMER_PASSWORD_TOKEN")
public class CustomerForgotPasswordSecurityTokenImpl implements CustomerForgotPasswordSecurityToken {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "PASSWORD_TOKEN", nullable = false)
protected String token;
@Column(name = "CREATE_DATE", nullable = false)
@Temporal(TemporalType.TIMESTAMP)
protected Date createDate;
@Column(name = "TOKEN_USED_DATE")
@Temporal(TemporalType.TIMESTAMP)
protected Date tokenUsedDate;
@Column(name = "CUSTOMER_ID", nullable = false)
protected Long customerId;
@Column(name = "TOKEN_USED_FLAG", nullable = false)
protected boolean tokenUsedFlag;
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public Date getTokenUsedDate() {
return tokenUsedDate;
}
public void setTokenUsedDate(Date tokenUsedDate) {
this.tokenUsedDate = tokenUsedDate;
}
public Long getCustomerId() {
return customerId;
}
public void setCustomerId(Long customerId) {
this.customerId = customerId;
}
public boolean isTokenUsedFlag() {
return tokenUsedFlag;
}
public void setTokenUsedFlag(boolean tokenUsedFlag) {
this.tokenUsedFlag = tokenUsedFlag;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CustomerForgotPasswordSecurityTokenImpl that = (CustomerForgotPasswordSecurityTokenImpl) o;
if (token != null ? !token.equals(that.token) : that.token != null) return false;
return true;
}
@Override
public int hashCode() {
return token != null ? token.hashCode() : 0;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_CustomerForgotPasswordSecurityTokenImpl.java
|
582 |
executionService.scheduleWithFixedDelay(executorName, new Runnable() {
public void run() {
sendMemberListToOthers();
}
}, memberListPublishInterval, memberListPublishInterval, TimeUnit.SECONDS);
| 1no label
|
hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java
|
483 |
public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContent {
public static class AnalyzeToken implements Streamable {
private String term;
private int startOffset;
private int endOffset;
private int position;
private String type;
AnalyzeToken() {
}
public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type) {
this.term = term;
this.position = position;
this.startOffset = startOffset;
this.endOffset = endOffset;
this.type = type;
}
public String getTerm() {
return this.term;
}
public int getStartOffset() {
return this.startOffset;
}
public int getEndOffset() {
return this.endOffset;
}
public int getPosition() {
return this.position;
}
public String getType() {
return this.type;
}
public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException {
AnalyzeToken analyzeToken = new AnalyzeToken();
analyzeToken.readFrom(in);
return analyzeToken;
}
@Override
public void readFrom(StreamInput in) throws IOException {
term = in.readString();
startOffset = in.readInt();
endOffset = in.readInt();
position = in.readVInt();
type = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(term);
out.writeInt(startOffset);
out.writeInt(endOffset);
out.writeVInt(position);
out.writeOptionalString(type);
}
}
private List<AnalyzeToken> tokens;
AnalyzeResponse() {
}
public AnalyzeResponse(List<AnalyzeToken> tokens) {
this.tokens = tokens;
}
public List<AnalyzeToken> getTokens() {
return this.tokens;
}
@Override
public Iterator<AnalyzeToken> iterator() {
return tokens.iterator();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.TOKENS);
for (AnalyzeToken token : tokens) {
builder.startObject();
builder.field(Fields.TOKEN, token.getTerm());
builder.field(Fields.START_OFFSET, token.getStartOffset());
builder.field(Fields.END_OFFSET, token.getEndOffset());
builder.field(Fields.TYPE, token.getType());
builder.field(Fields.POSITION, token.getPosition());
builder.endObject();
}
builder.endArray();
return builder;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
tokens = new ArrayList<AnalyzeToken>(size);
for (int i = 0; i < size; i++) {
tokens.add(AnalyzeToken.readAnalyzeToken(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(tokens.size());
for (AnalyzeToken token : tokens) {
token.writeTo(out);
}
}
static final class Fields {
static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
static final XContentBuilderString TOKEN = new XContentBuilderString("token");
static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString POSITION = new XContentBuilderString("position");
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeResponse.java
|
311 |
public class MergeFileSystemXMLApplicationContext extends AbstractMergeXMLApplicationContext {
public MergeFileSystemXMLApplicationContext(ApplicationContext parent) {
super(parent);
}
/**
* Create a new MergeClassPathXMLApplicationContext, loading the definitions from the given files. Note,
* all sourceLocation files will be merged using standard Spring configuration override rules. However, the patch
* files are fully merged into the result of the sourceLocations simple merge. Patch merges are first executed according
* to beans with the same id. Subsequent merges within a bean are executed against tagnames - ignoring any
* further id attributes.
*
* @param sourceLocations array of absolute file system paths for the source application context files
* @param patchLocations array of absolute file system paths for the patch application context files
* @throws BeansException
*/
public MergeFileSystemXMLApplicationContext(String[] sourceLocations, String[] patchLocations) throws BeansException {
this(sourceLocations, patchLocations, null);
}
/**
* Create a new MergeClassPathXMLApplicationContext, loading the definitions from the given files. Note,
* all sourceLocation files will be merged using standard Spring configuration override rules. However, the patch
* files are fully merged into the result of the sourceLocations simple merge. Patch merges are first executed according
* to beans with the same id. Subsequent merges within a bean are executed against tagnames - ignoring any
* further id attributes.
*
* @param sourceLocations array of absolute file system paths for the source application context files
* @param patchLocations array of absolute file system paths for the patch application context files
* @param parent the parent context
* @throws BeansException
*/
public MergeFileSystemXMLApplicationContext(String[] sourceLocations, String[] patchLocations, ApplicationContext parent) throws BeansException {
this(parent);
ResourceInputStream[] sources;
ResourceInputStream[] patches;
try {
sources = new ResourceInputStream[sourceLocations.length];
for (int j=0;j<sourceLocations.length;j++){
File temp = new File(sourceLocations[j]);
sources[j] = new ResourceInputStream(new BufferedInputStream(new FileInputStream(temp)), sourceLocations[j]);
}
patches = new ResourceInputStream[patchLocations.length];
for (int j=0;j<patches.length;j++){
File temp = new File(patchLocations[j]);
sources[j] = new ResourceInputStream(new BufferedInputStream(new FileInputStream(temp)), patchLocations[j]);
}
} catch (FileNotFoundException e) {
throw new FatalBeanException("Unable to merge context files", e);
}
ImportProcessor importProcessor = new ImportProcessor(this);
try {
sources = importProcessor.extract(sources);
patches = importProcessor.extract(patches);
} catch (MergeException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
}
this.configResources = new MergeApplicationContextXmlConfigResource().getConfigResources(sources, patches);
refresh();
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_MergeFileSystemXMLApplicationContext.java
|
3,134 |
public class QueueIterator<E> implements Iterator<E> {
private final Iterator<Data> iterator;
private final SerializationService serializationService;
private final boolean binary;
public QueueIterator(Iterator<Data> iterator, SerializationService serializationService, boolean binary) {
this.iterator = iterator;
this.serializationService = serializationService;
this.binary = binary;
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public E next() {
Data data = iterator.next();
if (binary) {
return (E) data;
}
return (E) serializationService.toObject(data);
}
@Override
public void remove() {
iterator.remove();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_queue_proxy_QueueIterator.java
|
322 |
static class NodeRequest extends NodeOperationRequest {
NodesHotThreadsRequest request;
NodeRequest() {
}
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new NodesHotThreadsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_TransportNodesHotThreadsAction.java
|
2,845 |
@SuppressWarnings("unchecked")
private static class PartitionTable {
final Set<Integer>[] partitions = new Set[InternalPartition.MAX_REPLICA_COUNT];
Set<Integer> getPartitions(int index) {
check(index);
Set<Integer> set = partitions[index];
if (set == null) {
set = new HashSet<Integer>();
partitions[index] = set;
}
return set;
}
boolean add(int index, Integer partitionId) {
return getPartitions(index).add(partitionId);
}
boolean contains(int index, Integer partitionId) {
return getPartitions(index).contains(partitionId);
}
boolean contains(Integer partitionId) {
for (Set<Integer> set : partitions) {
if (set != null && set.contains(partitionId)) {
return true;
}
}
return false;
}
boolean remove(int index, Integer partitionId) {
return getPartitions(index).remove(partitionId);
}
int size(int index) {
return getPartitions(index).size();
}
void reset() {
for (Set<Integer> set : partitions) {
if (set != null) {
set.clear();
}
}
}
private void check(int index) {
if (index < 0 || index >= InternalPartition.MAX_REPLICA_COUNT) {
throw new ArrayIndexOutOfBoundsException(index);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_PartitionStateGeneratorImpl.java
|
95 |
public class NamedOperationManagerImpl implements NamedOperationManager {
protected List<NamedOperationComponent> namedOperationComponents = new ArrayList<NamedOperationComponent>();
@Override
public Map<String, String> manageNamedParameters(Map<String, String> parameterMap) {
List<String> utilizedNames = new ArrayList<String>();
Map<String, String> derivedMap = new LinkedHashMap<String, String>();
for (NamedOperationComponent namedOperationComponent : namedOperationComponents) {
utilizedNames.addAll(namedOperationComponent.setOperationValues(parameterMap, derivedMap));
}
for (String utilizedName : utilizedNames) {
parameterMap.remove(utilizedName);
}
derivedMap.putAll(parameterMap);
return derivedMap;
}
public List<NamedOperationComponent> getNamedOperationComponents() {
return namedOperationComponents;
}
public void setNamedOperationComponents(List<NamedOperationComponent> namedOperationComponents) {
this.namedOperationComponents = namedOperationComponents;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_service_operation_NamedOperationManagerImpl.java
|
1,731 |
public class EntryEventFilter implements EventFilter,DataSerializable {
boolean includeValue = false;
Data key = null;
public EntryEventFilter(boolean includeValue, Data key) {
this.includeValue = includeValue;
this.key = key;
}
public EntryEventFilter() {
}
public boolean isIncludeValue() {
return includeValue;
}
public Data getKey() {
return key;
}
public boolean eval(Object arg) {
return key == null || key.equals(arg);
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeBoolean(includeValue);
out.writeObject(key);
}
public void readData(ObjectDataInput in) throws IOException {
includeValue = in.readBoolean();
key = in.readObject();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_EntryEventFilter.java
|
460 |
public class TransportPendingClusterTasksAction extends TransportMasterNodeReadOperationAction<PendingClusterTasksRequest, PendingClusterTasksResponse> {
private final ClusterService clusterService;
@Inject
public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
this.clusterService = clusterService;
}
@Override
protected String transportAction() {
return PendingClusterTasksAction.NAME;
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected PendingClusterTasksRequest newRequest() {
return new PendingClusterTasksRequest();
}
@Override
protected PendingClusterTasksResponse newResponse() {
return new PendingClusterTasksResponse();
}
@Override
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) throws ElasticsearchException {
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_tasks_TransportPendingClusterTasksAction.java
|
2,071 |
public class MergeOperation extends BasePutOperation {
private MapMergePolicy mergePolicy;
private EntryView<Data, Data> mergingEntry;
private boolean merged = false;
public MergeOperation(String name, Data dataKey, EntryView<Data, Data> entryView, MapMergePolicy policy) {
super(name, dataKey, null);
mergingEntry = entryView;
mergePolicy = policy;
}
public MergeOperation() {
}
public void run() {
SimpleEntryView entryView = (SimpleEntryView) mergingEntry;
entryView.setKey(mapService.toObject(mergingEntry.getKey()));
entryView.setValue(mapService.toObject(mergingEntry.getValue()));
merged = recordStore.merge(dataKey, mergingEntry, mergePolicy);
if (merged) {
Record record = recordStore.getRecord(dataKey);
if (record != null)
dataValue = mapService.toData(record.getValue());
}
}
@Override
public Object getResponse() {
return merged;
}
public boolean shouldBackup() {
return merged;
}
public void afterRun() {
if (merged) {
invalidateNearCaches();
}
}
public Operation getBackupOperation() {
if (dataValue == null) {
return new RemoveBackupOperation(name, dataKey);
} else {
RecordInfo replicationInfo = mapService.createRecordInfo(recordStore.getRecord(dataKey));
return new PutBackupOperation(name, dataKey, dataValue, replicationInfo);
}
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(mergingEntry);
out.writeObject(mergePolicy);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
mergingEntry = in.readObject();
mergePolicy = in.readObject();
}
@Override
public String toString() {
return "MergeOperation{" + name + "}";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_MergeOperation.java
|
403 |
public class CreditCardType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, CreditCardType> TYPES = new LinkedHashMap<String, CreditCardType>();
public static final CreditCardType MASTERCARD = new CreditCardType("MASTERCARD", "Master Card");
public static final CreditCardType VISA = new CreditCardType("VISA", "Visa");
public static final CreditCardType AMEX = new CreditCardType("AMEX", "American Express");
public static final CreditCardType DINERSCLUB_CARTEBLANCHE = new CreditCardType("DINERSCLUB_CARTEBLANCHE", "Diner's Club / Carte Blanche");
public static final CreditCardType DISCOVER = new CreditCardType("DISCOVER", "Discover");
public static final CreditCardType ENROUTE = new CreditCardType("ENROUTE", "En Route");
public static final CreditCardType JCB = new CreditCardType("JCB", "JCB");
public static CreditCardType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public CreditCardType() {
//do nothing
}
public CreditCardType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CreditCardType other = (CreditCardType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_payment_CreditCardType.java
|
2,084 |
public class FileSystemUtils {
private static ESLogger logger = ESLoggerFactory.getLogger(FileSystemUtils.class.getName());
private static final long mkdirsStallTimeout = TimeValue.timeValueMinutes(5).millis();
private static final Object mkdirsMutex = new Object();
private static volatile Thread mkdirsThread;
private static volatile long mkdirsStartTime;
public static boolean mkdirs(File dir) {
synchronized (mkdirsMutex) {
try {
mkdirsThread = Thread.currentThread();
mkdirsStartTime = System.currentTimeMillis();
return dir.mkdirs();
} finally {
mkdirsThread = null;
}
}
}
public static void checkMkdirsStall(long currentTime) {
Thread mkdirsThread1 = mkdirsThread;
long stallTime = currentTime - mkdirsStartTime;
if (mkdirsThread1 != null && (stallTime > mkdirsStallTimeout)) {
logger.error("mkdirs stalled for {} on {}, trying to interrupt", new TimeValue(stallTime), mkdirsThread1.getName());
mkdirsThread1.interrupt(); // try and interrupt it...
}
}
public static int maxOpenFiles(File testDir) {
boolean dirCreated = false;
if (!testDir.exists()) {
dirCreated = true;
testDir.mkdirs();
}
List<RandomAccessFile> files = new ArrayList<RandomAccessFile>();
try {
while (true) {
files.add(new RandomAccessFile(new File(testDir, "tmp" + files.size()), "rw"));
}
} catch (IOException ioe) {
int i = 0;
for (RandomAccessFile raf : files) {
try {
raf.close();
} catch (IOException e) {
// ignore
}
new File(testDir, "tmp" + i++).delete();
}
if (dirCreated) {
deleteRecursively(testDir);
}
}
return files.size();
}
public static boolean hasExtensions(File root, String... extensions) {
if (root != null && root.exists()) {
if (root.isDirectory()) {
File[] children = root.listFiles();
if (children != null) {
for (File child : children) {
if (child.isDirectory()) {
boolean has = hasExtensions(child, extensions);
if (has) {
return true;
}
} else {
for (String extension : extensions) {
if (child.getName().endsWith(extension)) {
return true;
}
}
}
}
}
}
}
return false;
}
/**
* Returns true if at least one of the files exists.
*/
public static boolean exists(File... files) {
for (File file : files) {
if (file.exists()) {
return true;
}
}
return false;
}
public static boolean deleteRecursively(File[] roots) {
boolean deleted = true;
for (File root : roots) {
deleted &= deleteRecursively(root);
}
return deleted;
}
public static boolean deleteRecursively(File root) {
return deleteRecursively(root, true);
}
private static boolean innerDeleteRecursively(File root) {
return deleteRecursively(root, true);
}
/**
* Delete the supplied {@link java.io.File} - for directories,
* recursively delete any nested directories or files as well.
*
* @param root the root <code>File</code> to delete
* @param deleteRoot whether or not to delete the root itself or just the content of the root.
* @return <code>true</code> if the <code>File</code> was deleted,
* otherwise <code>false</code>
*/
public static boolean deleteRecursively(File root, boolean deleteRoot) {
if (root != null && root.exists()) {
if (root.isDirectory()) {
File[] children = root.listFiles();
if (children != null) {
for (File aChildren : children) {
innerDeleteRecursively(aChildren);
}
}
}
if (deleteRoot) {
return root.delete();
} else {
return true;
}
}
return false;
}
public static void syncFile(File fileToSync) throws IOException {
boolean success = false;
int retryCount = 0;
IOException exc = null;
while (!success && retryCount < 5) {
retryCount++;
RandomAccessFile file = null;
try {
try {
file = new RandomAccessFile(fileToSync, "rw");
file.getFD().sync();
success = true;
} finally {
if (file != null)
file.close();
}
} catch (IOException ioe) {
if (exc == null)
exc = ioe;
try {
// Pause 5 msec
Thread.sleep(5);
} catch (InterruptedException ie) {
throw new InterruptedIOException(ie.getMessage());
}
}
}
}
public static void copyFile(File sourceFile, File destinationFile) throws IOException {
FileInputStream sourceIs = null;
FileChannel source = null;
FileOutputStream destinationOs = null;
FileChannel destination = null;
try {
sourceIs = new FileInputStream(sourceFile);
source = sourceIs.getChannel();
destinationOs = new FileOutputStream(destinationFile);
destination = destinationOs.getChannel();
destination.transferFrom(source, 0, source.size());
} finally {
if (source != null) {
source.close();
}
if (sourceIs != null) {
sourceIs.close();
}
if (destination != null) {
destination.close();
}
if (destinationOs != null) {
destinationOs.close();
}
}
}
private FileSystemUtils() {
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_io_FileSystemUtils.java
|
464 |
public class CeylonNavigatorLabelProvider extends
CeylonLabelProvider implements ICommonLabelProvider {
ICommonContentExtensionSite extensionSite;
public CeylonNavigatorLabelProvider() {
super(true); // small images
}
@Override
public StyledString getStyledText(Object element) {
if (element instanceof ExternalModuleNode) {
ExternalModuleNode externalModule = (ExternalModuleNode) element;
JDTModule jdtModule = externalModule.getModule();
JDTModule mod = ((ExternalModuleNode) element).getModule();
String name = super.getStyledText(mod).toString();
StyledString moduleText = new StyledString(name);
if (jdtModule != null) {
moduleText.append(" - " + jdtModule.getVersion(), QUALIFIER_STYLER);
}
return moduleText;
}
if (element instanceof SourceModuleNode) {
JDTModule module = ((SourceModuleNode) element).getModule();
if (module==null) {
return new StyledString(((SourceModuleNode) element).getElementName());
}
else {
String name = super.getStyledText(module).toString();
StyledString result = new StyledString(name);
if (module != null && module.isDefaultModule()) {
result = result.insert('(', 0).append(')').append("");
}
return result;
}
}
if (element instanceof RepositoryNode) {
RepositoryNode repoNode = (RepositoryNode) element;
String stringToDisplay = getRepositoryString(repoNode);
return new StyledString(stringToDisplay);
}
if (element instanceof Package || element instanceof IPackageFragment) {
return new StyledString(super.getStyledText(element).getString());
}
if (element instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archiveFileStore = (CeylonArchiveFileStore)element;
if (archiveFileStore.getParent() == null) {
return new StyledString("Ceylon Sources").append(" - " + archiveFileStore.getArchivePath().toOSString(), QUALIFIER_STYLER);
}
return new StyledString(archiveFileStore.getName());
}
if (element instanceof JarPackageFragmentRoot) {
JarPackageFragmentRoot jpfr = (JarPackageFragmentRoot) element;
if (ArtifactContext.CAR.substring(1).equalsIgnoreCase(jpfr.getPath().getFileExtension())) {
return new StyledString("Java Binaries").append(" - " + jpfr.getPath().toOSString(), QUALIFIER_STYLER);
} else {
return getJavaNavigatorLabelProvider().getStyledText(element);
}
}
if (element instanceof IProject || element instanceof IJavaProject) {
return getJavaNavigatorLabelProvider().getStyledText(element);
}
StyledString styledString = super.getStyledText(element);
if (styledString.getString().equals("<something>")) {
StyledString javaResult = getJavaNavigatorLabelProvider().getStyledText(element);
if (! javaResult.getString().trim().isEmpty()) {
return javaResult;
}
}
return styledString;
}
private String getRepositoryString(RepositoryNode repoNode) {
String displayString = repoNode.getDisplayString();
String stringToDisplay = null;
if (Constants.REPO_URL_CEYLON.equals(displayString)) {
stringToDisplay = "Herd Modules";
}
if (stringToDisplay == null && JDKRepository.JDK_REPOSITORY_DISPLAY_STRING.equals(displayString)) {
stringToDisplay = "JDK Modules";
}
if (stringToDisplay == null && CeylonBuilder.getInterpolatedCeylonSystemRepo(repoNode.project).equals(displayString)) {
stringToDisplay = "System Modules";
}
if (stringToDisplay == null && CeylonBuilder.getCeylonModulesOutputDirectory(repoNode.project).getAbsolutePath().equals(displayString)) {
stringToDisplay = "Output Modules";
}
if (stringToDisplay == null && CeylonProjectConfig.get(repoNode.project).getMergedRepositories().getCacheRepoDir().getAbsolutePath().equals(displayString)) {
stringToDisplay = "Cached Modules";
}
if (stringToDisplay == null && CeylonProjectConfig.get(repoNode.project).getMergedRepositories().getUserRepoDir().getAbsolutePath().equals(displayString)) {
stringToDisplay = "User Modules";
}
if (stringToDisplay == null) {
try {
for (IProject referencedProject: repoNode.project.getReferencedProjects()) {
if (referencedProject.isOpen() && CeylonNature.isEnabled(referencedProject)) {
if (CeylonBuilder.getCeylonModulesOutputDirectory(referencedProject).getAbsolutePath().equals(displayString)) {
stringToDisplay = "Modules of Referenced Project : " + referencedProject.getName() + "";
break;
}
}
}
} catch (CoreException e) {
}
}
if (stringToDisplay == null) {
for (Repositories.Repository repo : CeylonProjectConfig.get(repoNode.project).getMergedRepositories().getLocalLookupRepositories()) {
if (repo.getUrl().startsWith("./") && repo.getUrl().length() > 2) {
IPath relativePath = Path.fromPortableString(repo.getUrl().substring(2));
IFolder folder = repoNode.project.getFolder(relativePath);
if (folder.exists() && folder.getLocation().toFile().getAbsolutePath().equals(displayString)) {
stringToDisplay = "Local Repository : " + relativePath.toString() + "";
break;
}
}
}
}
if (stringToDisplay == null && NodeUtils.UNKNOWN_REPOSITORY.equals(displayString)) {
stringToDisplay = "Unknown Repository";
}
if (stringToDisplay == null) {
stringToDisplay = displayString;
}
return stringToDisplay;
}
@Override
public Image getImage(Object element) {
JavaNavigatorLabelProvider javaProvider = getJavaNavigatorLabelProvider();
if (element instanceof IProject || element instanceof IJavaProject) {
Image javaContributedImage = javaProvider.getImage(element);
if (javaContributedImage != null) {
return javaContributedImage;
}
}
if (element instanceof IPackageFragment &&
! CeylonBuilder.isInSourceFolder((IPackageFragment)element)) {
return javaProvider.getImage(element);
}
if (element instanceof ExternalModuleNode) {
return super.getImage(((ExternalModuleNode)element).getModule());
}
if (element instanceof SourceModuleNode) {
int decorationAttributes = 0;
for (Object child : getContentProvider().getChildren(element)) {
if (!hasPipelinedChildren(child)) {
continue;
}
int childValue = getDecorationAttributes(child);
if ((childValue & ERROR) != 0) {
decorationAttributes = ERROR;
break;
}
if ((childValue & WARNING) != 0) {
decorationAttributes = WARNING;
}
}
JDTModule module = ((SourceModuleNode)element).getModule();
if (module==null) {
return getDecoratedImage(CEYLON_MODULE, decorationAttributes, true);
}
else {
return getDecoratedImage(getImageKey(module), decorationAttributes, true);
}
}
if (element instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archiveFileStore = (CeylonArchiveFileStore)element;
if (archiveFileStore.getParent() != null
&& ! archiveFileStore.fetchInfo().isDirectory()) {
IFolder sourceArchiveFolder = ExternalSourceArchiveManager.getExternalSourceArchiveManager().getSourceArchive(archiveFileStore.getArchivePath());
if (sourceArchiveFolder != null && sourceArchiveFolder.exists()) {
IResource file = sourceArchiveFolder.findMember(archiveFileStore.getEntryPath());
if (file instanceof IFile) {
element = file;
}
}
}
}
if (element instanceof IFile) {
if (! CeylonBuilder.isCeylon((IFile) element)) {
return javaProvider.getImage(element);
}
}
return super.getImage(element);
}
private boolean hasPipelinedChildren(Object child) {
return getContentProvider().hasPipelinedChildren(child,
getJavaNavigatorContentProvider().hasChildren(child));
}
@Override
protected String getImageKey(Object element) {
if (element instanceof RepositoryNode) {
return RUNTIME_OBJ;
}
if (element instanceof IPackageFragment) {
return CEYLON_PACKAGE;
}
if (element instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archiveFileStore = (CeylonArchiveFileStore)element;
if (archiveFileStore.getParent() == null) {
return CEYLON_SOURCE_ARCHIVE;
} else {
if (archiveFileStore.fetchInfo().isDirectory()) {
return CEYLON_PACKAGE;
} else {
return CEYLON_FILE;
}
}
}
if (element instanceof JarPackageFragmentRoot) {
return CEYLON_BINARY_ARCHIVE;
}
return super.getImageKey(element);
}
@Override
public void restoreState(IMemento aMemento) {
// TODO Auto-generated method stub
}
@Override
public void saveState(IMemento aMemento) {
// TODO Auto-generated method stub
}
@Override
public String getDescription(Object anElement) {
if (anElement instanceof RepositoryNode) {
Repository repo = ((RepositoryNode)anElement).getRepository();
if (repo != null) {
return "Repository path : " + repo.getDisplayString();
}
}
if (anElement instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archive = (CeylonArchiveFileStore)anElement;
if (archive.getParent() == null) {
return archive.getArchivePath().toOSString();
}
}
return null;
}
@Override
public void init(ICommonContentExtensionSite aConfig) {
extensionSite = aConfig;
}
private INavigatorContentExtension getJavaNavigatorExtension() {
@SuppressWarnings("unchecked")
Set<INavigatorContentExtension> set = extensionSite.getService().findContentExtensionsByTriggerPoint(JavaCore.create(ResourcesPlugin.getWorkspace().getRoot()));
for (INavigatorContentExtension extension : set) {
if (extension.getDescriptor().equals(extensionSite.getExtension().getDescriptor().getOverriddenDescriptor())) {
return extension;
}
}
return null;
}
private JavaNavigatorLabelProvider getJavaNavigatorLabelProvider() {
INavigatorContentExtension javaExtension = getJavaNavigatorExtension();
if (javaExtension != null) {
return (JavaNavigatorLabelProvider) javaExtension.getLabelProvider();
}
return null;
}
private JavaNavigatorContentProvider getJavaNavigatorContentProvider() {
INavigatorContentExtension javaExtension = getJavaNavigatorExtension();
if (javaExtension != null) {
return (JavaNavigatorContentProvider) javaExtension.getContentProvider();
}
return null;
}
private CeylonNavigatorContentProvider getContentProvider() {
return (CeylonNavigatorContentProvider) extensionSite.getExtension().getContentProvider();
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_navigator_CeylonNavigatorLabelProvider.java
|
285 |
public class ActionRequestValidationException extends ElasticsearchException {
private final List<String> validationErrors = new ArrayList<String>();
public ActionRequestValidationException() {
super(null);
}
public void addValidationError(String error) {
validationErrors.add(error);
}
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_ActionRequestValidationException.java
|
264 |
public class Build {
public static final Build CURRENT;
static {
String hash = "NA";
String hashShort = "NA";
String timestamp = "NA";
try {
String properties = Streams.copyToStringFromClasspath("/es-build.properties");
Properties props = new Properties();
props.load(new FastStringReader(properties));
hash = props.getProperty("hash", hash);
if (!hash.equals("NA")) {
hashShort = hash.substring(0, 7);
}
String gitTimestampRaw = props.getProperty("timestamp");
if (gitTimestampRaw != null) {
timestamp = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC).print(Long.parseLong(gitTimestampRaw));
}
} catch (Exception e) {
// just ignore...
}
CURRENT = new Build(hash, hashShort, timestamp);
}
private String hash;
private String hashShort;
private String timestamp;
Build(String hash, String hashShort, String timestamp) {
this.hash = hash;
this.hashShort = hashShort;
this.timestamp = timestamp;
}
public String hash() {
return hash;
}
public String hashShort() {
return hashShort;
}
public String timestamp() {
return timestamp;
}
public static Build readBuild(StreamInput in) throws IOException {
String hash = in.readString();
String hashShort = in.readString();
String timestamp = in.readString();
return new Build(hash, hashShort, timestamp);
}
public static void writeBuild(Build build, StreamOutput out) throws IOException {
out.writeString(build.hash());
out.writeString(build.hashShort());
out.writeString(build.timestamp());
}
}
| 0true
|
src_main_java_org_elasticsearch_Build.java
|
431 |
public class ClusterStatsAction extends ClusterAction<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
public static final ClusterStatsAction INSTANCE = new ClusterStatsAction();
public static final String NAME = "cluster/stats";
private ClusterStatsAction() {
super(NAME);
}
@Override
public ClusterStatsResponse newResponse() {
return new ClusterStatsResponse();
}
@Override
public ClusterStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new ClusterStatsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsAction.java
|
3,734 |
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Map<String, Object> objectNode = node;
ObjectMapper.Builder builder = createBuilder(name);
boolean nested = false;
boolean nestedIncludeInParent = false;
boolean nestedIncludeInRoot = false;
for (Map.Entry<String, Object> entry : objectNode.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("dynamic")) {
String value = fieldNode.toString();
if (value.equalsIgnoreCase("strict")) {
builder.dynamic(Dynamic.STRICT);
} else {
builder.dynamic(nodeBooleanValue(fieldNode) ? Dynamic.TRUE : Dynamic.FALSE);
}
} else if (fieldName.equals("type")) {
String type = fieldNode.toString();
if (type.equals(CONTENT_TYPE)) {
builder.nested = Nested.NO;
} else if (type.equals(NESTED_CONTENT_TYPE)) {
nested = true;
} else {
throw new MapperParsingException("Trying to parse an object but has a different type [" + type + "] for [" + name + "]");
}
} else if (fieldName.equals("include_in_parent")) {
nestedIncludeInParent = nodeBooleanValue(fieldNode);
} else if (fieldName.equals("include_in_root")) {
nestedIncludeInRoot = nodeBooleanValue(fieldNode);
} else if (fieldName.equals("enabled")) {
builder.enabled(nodeBooleanValue(fieldNode));
} else if (fieldName.equals("path")) {
builder.pathType(parsePathType(name, fieldNode.toString()));
} else if (fieldName.equals("properties")) {
parseProperties(builder, (Map<String, Object>) fieldNode, parserContext);
} else if (fieldName.equals("include_in_all")) {
builder.includeInAll(nodeBooleanValue(fieldNode));
} else {
processField(builder, fieldName, fieldNode);
}
}
if (nested) {
builder.nested = Nested.newNested(nestedIncludeInParent, nestedIncludeInRoot);
}
return builder;
}
private void parseProperties(ObjectMapper.Builder objBuilder, Map<String, Object> propsNode, ParserContext parserContext) {
for (Map.Entry<String, Object> entry : propsNode.entrySet()) {
String propName = entry.getKey();
Map<String, Object> propNode = (Map<String, Object>) entry.getValue();
String type;
Object typeNode = propNode.get("type");
if (typeNode != null) {
type = typeNode.toString();
} else {
// lets see if we can derive this...
if (propNode.get("properties") != null) {
type = ObjectMapper.CONTENT_TYPE;
} else if (propNode.size() == 1 && propNode.get("enabled") != null) {
// if there is a single property with the enabled flag on it, make it an object
// (usually, setting enabled to false to not index any type, including core values, which
// non enabled object type supports).
type = ObjectMapper.CONTENT_TYPE;
} else {
throw new MapperParsingException("No type specified for property [" + propName + "]");
}
}
Mapper.TypeParser typeParser = parserContext.typeParser(type);
if (typeParser == null) {
throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + propName + "]");
}
objBuilder.add(typeParser.parse(propName, propNode, parserContext));
}
}
protected Builder createBuilder(String name) {
return object(name);
}
protected void processField(Builder builder, String fieldName, Object fieldNode) {
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_object_ObjectMapper.java
|
354 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class ClientMapReduceTest
extends AbstractClientMapReduceJobTest {
private static final String MAP_NAME = "default";
@After
public void shutdown() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test(timeout = 60000, expected = ExecutionException.class)
public void testExceptionDistribution()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new ExceptionThrowingMapper()).submit();
try {
Map<String, List<Integer>> result = future.get();
fail();
} catch (Exception e) {
e.printStackTrace();
assertTrue(e.getCause() instanceof NullPointerException);
throw e;
}
}
@Test(timeout = 60000, expected = CancellationException.class)
public void testInProcessCancellation()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new TimeConsumingMapper()).submit();
future.cancel(true);
try {
Map<String, List<Integer>> result = future.get();
fail();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
@Test(timeout = 60000)
public void testMapper()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new TestMapper()).submit();
Map<String, List<Integer>> result = future.get();
assertEquals(100, result.size());
for (List<Integer> value : result.values()) {
assertEquals(1, value.size());
}
}
@Test(timeout = 60000)
public void testMapperReducer()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, Integer>> future = job.mapper(new GroupingTestMapper()).reducer(new TestReducerFactory())
.submit();
Map<String, Integer> result = future.get();
// Precalculate results
int[] expectedResults = new int[4];
for (int i = 0; i < 100; i++) {
int index = i % 4;
expectedResults[index] += i;
}
for (int i = 0; i < 4; i++) {
assertEquals(expectedResults[i], (int) result.get(String.valueOf(i)));
}
}
@Test(timeout = 60000)
public void testMapperCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.mapper(new GroupingTestMapper()).submit(new GroupingTestCollator());
int result = future.get();
// Precalculate result
int expectedResult = 0;
for (int i = 0; i < 100; i++) {
expectedResult += i;
}
}
@Test(timeout = 60000)
public void testKeyedMapperCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 10000; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.onKeys(50).mapper(new TestMapper()).submit(new GroupingTestCollator());
int result = future.get();
assertEquals(50, result);
}
@Test(timeout = 60000)
public void testKeyPredicateMapperCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 10000; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.keyPredicate(new TestKeyPredicate()).mapper(new TestMapper())
.submit(new GroupingTestCollator());
int result = future.get();
assertEquals(50, result);
}
@Test(timeout = 60000)
public void testMapperReducerCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.mapper(new GroupingTestMapper()).reducer(new TestReducerFactory())
.submit(new TestCollator());
int result = future.get();
// Precalculate result
int expectedResult = 0;
for (int i = 0; i < 100; i++) {
expectedResult += i;
}
for (int i = 0; i < 4; i++) {
assertEquals(expectedResult, result);
}
}
@Test(timeout = 60000)
public void testAsyncMapper()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
final Map<String, List<Integer>> listenerResults = new HashMap<String, List<Integer>>();
final Semaphore semaphore = new Semaphore(1);
semaphore.acquire();
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new TestMapper()).submit();
future.andThen(new ExecutionCallback<Map<String, List<Integer>>>() {
@Override
public void onResponse(Map<String, List<Integer>> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
semaphore.acquire();
assertEquals(100, listenerResults.size());
for (List<Integer> value : listenerResults.values()) {
assertEquals(1, value.size());
}
}
@Test(timeout = 60000)
public void testKeyedAsyncMapper()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
final Map<String, List<Integer>> listenerResults = new HashMap<String, List<Integer>>();
final Semaphore semaphore = new Semaphore(1);
semaphore.acquire();
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, List<Integer>>> future = job.onKeys(50).mapper(new TestMapper()).submit();
future.andThen(new ExecutionCallback<Map<String, List<Integer>>>() {
@Override
public void onResponse(Map<String, List<Integer>> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
semaphore.acquire();
assertEquals(1, listenerResults.size());
for (List<Integer> value : listenerResults.values()) {
assertEquals(1, value.size());
}
}
@Test(timeout = 60000)
public void testAsyncMapperReducer()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
final Map<String, Integer> listenerResults = new HashMap<String, Integer>();
final Semaphore semaphore = new Semaphore(1);
semaphore.acquire();
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Map<String, Integer>> future = job.mapper(new GroupingTestMapper()).reducer(new TestReducerFactory())
.submit();
future.andThen(new ExecutionCallback<Map<String, Integer>>() {
@Override
public void onResponse(Map<String, Integer> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
// Precalculate results
int[] expectedResults = new int[4];
for (int i = 0; i < 100; i++) {
int index = i % 4;
expectedResults[index] += i;
}
semaphore.acquire();
for (int i = 0; i < 4; i++) {
assertEquals(expectedResults[i], (int) listenerResults.get(String.valueOf(i)));
}
}
@Test(timeout = 60000)
public void testAsyncMapperCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
final int[] result = new int[1];
final Semaphore semaphore = new Semaphore(1);
semaphore.acquire();
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.mapper(new GroupingTestMapper())//
.submit(new GroupingTestCollator());
future.andThen(new ExecutionCallback<Integer>() {
@Override
public void onResponse(Integer response) {
result[0] = response.intValue();
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
// Precalculate result
int expectedResult = 0;
for (int i = 0; i < 100; i++) {
expectedResult += i;
}
semaphore.acquire();
for (int i = 0; i < 4; i++) {
assertEquals(expectedResult, result[0]);
}
}
@Test(timeout = 60000)
public void testAsyncMapperReducerCollator()
throws Exception {
Config config = buildConfig();
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance client = HazelcastClient.newHazelcastClient(null);
IMap<Integer, Integer> m1 = client.getMap(MAP_NAME);
for (int i = 0; i < 100; i++) {
m1.put(i, i);
}
final int[] result = new int[1];
final Semaphore semaphore = new Semaphore(1);
semaphore.acquire();
JobTracker tracker = client.getJobTracker("default");
Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1));
ICompletableFuture<Integer> future = job.mapper(new GroupingTestMapper()).reducer(new TestReducerFactory())
.submit(new TestCollator());
future.andThen(new ExecutionCallback<Integer>() {
@Override
public void onResponse(Integer response) {
result[0] = response.intValue();
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
// Precalculate result
int expectedResult = 0;
for (int i = 0; i < 100; i++) {
expectedResult += i;
}
semaphore.acquire();
for (int i = 0; i < 4; i++) {
assertEquals(expectedResult, result[0]);
}
}
public static class ExceptionThrowingMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> context) {
throw new NullPointerException("BUMM!");
}
}
public static class TimeConsumingMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> collector) {
try {
Thread.sleep(1000);
} catch (Exception ignore) {
}
collector.emit(String.valueOf(key), value);
}
}
public static class TestKeyPredicate
implements KeyPredicate<Integer> {
@Override
public boolean evaluate(Integer key) {
return key == 50;
}
}
public static class TestMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> collector) {
collector.emit(String.valueOf(key), value);
}
}
public static class GroupingTestMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> collector) {
collector.emit(String.valueOf(key % 4), value);
}
}
public static class TestReducer
extends Reducer<String, Integer, Integer> {
private transient int sum = 0;
@Override
public void reduce(Integer value) {
sum += value;
}
@Override
public Integer finalizeReduce() {
return sum;
}
}
public static class TestReducerFactory
implements ReducerFactory<String, Integer, Integer> {
public TestReducerFactory() {
}
@Override
public Reducer<String, Integer, Integer> newReducer(String key) {
return new TestReducer();
}
}
public static class GroupingTestCollator
implements Collator<Map.Entry<String, List<Integer>>, Integer> {
@Override
public Integer collate(Iterable<Map.Entry<String, List<Integer>>> values) {
int sum = 0;
for (Map.Entry<String, List<Integer>> entry : values) {
for (Integer value : entry.getValue()) {
sum += value;
}
}
return sum;
}
}
public static class TestCollator
implements Collator<Map.Entry<String, Integer>, Integer> {
@Override
public Integer collate(Iterable<Map.Entry<String, Integer>> values) {
int sum = 0;
for (Map.Entry<String, Integer> entry : values) {
sum += entry.getValue();
}
return sum;
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
465 |
public static class AliasActions {
private String[] indices = Strings.EMPTY_ARRAY;
private String[] aliases = Strings.EMPTY_ARRAY;
private AliasAction aliasAction;
public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
aliasAction = new AliasAction(type);
indices(indices);
aliases(aliases);
}
public AliasActions(AliasAction.Type type, String index, String alias) {
aliasAction = new AliasAction(type);
indices(index);
aliases(alias);
}
AliasActions(AliasAction.Type type, String[] index, String alias) {
aliasAction = new AliasAction(type);
indices(index);
aliases(alias);
}
public AliasActions(AliasAction action) {
this.aliasAction = action;
indices(action.index());
aliases(action.alias());
}
public AliasActions(Type type, String index, String[] aliases) {
aliasAction = new AliasAction(type);
indices(index);
aliases(aliases);
}
public AliasActions() {
}
public AliasActions filter(Map<String, Object> filter) {
aliasAction.filter(filter);
return this;
}
public AliasActions filter(FilterBuilder filter) {
aliasAction.filter(filter);
return this;
}
public Type actionType() {
return aliasAction.actionType();
}
public void routing(String routing) {
aliasAction.routing(routing);
}
public void searchRouting(String searchRouting) {
aliasAction.searchRouting(searchRouting);
}
public void indexRouting(String indexRouting) {
aliasAction.indexRouting(indexRouting);
}
public AliasActions filter(String filter) {
aliasAction.filter(filter);
return this;
}
public void indices(String... indices) {
List<String> finalIndices = new ArrayList<String>();
for (String index : indices) {
if (index != null) {
finalIndices.add(index);
}
}
this.indices = finalIndices.toArray(new String[finalIndices.size()]);
}
public void aliases(String... aliases) {
this.aliases = aliases;
}
public String[] aliases() {
return aliases;
}
public String[] indices() {
return indices;
}
public AliasAction aliasAction() {
return aliasAction;
}
public String[] concreteAliases(MetaData metaData, String concreteIndex) {
if (aliasAction.actionType() == Type.REMOVE) {
//for DELETE we expand the aliases
String[] indexAsArray = {concreteIndex};
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasMetaData = metaData.findAliases(aliases, indexAsArray);
List<String> finalAliases = new ArrayList<String> ();
for (ObjectCursor<ImmutableList<AliasMetaData>> curAliases : aliasMetaData.values()) {
for (AliasMetaData aliasMeta: curAliases.value) {
finalAliases.add(aliasMeta.alias());
}
}
return finalAliases.toArray(new String[finalAliases.size()]);
} else {
//for add we just return the current aliases
return aliases;
}
}
public AliasActions readFrom(StreamInput in) throws IOException {
indices = in.readStringArray();
aliases = in.readStringArray();
aliasAction = readAliasAction(in);
return this;
}
public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(indices);
out.writeStringArray(aliases);
this.aliasAction.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_IndicesAliasesRequest.java
|
5,325 |
public class UnmappedTerms extends InternalTerms {
public static final Type TYPE = new Type("terms", "umterms");
private static final Collection<Bucket> BUCKETS = Collections.emptyList();
private static final Map<String, Bucket> BUCKETS_MAP = Collections.emptyMap();
public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public UnmappedTerms readResult(StreamInput in) throws IOException {
UnmappedTerms buckets = new UnmappedTerms();
buckets.readFrom(in);
return buckets;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
UnmappedTerms() {} // for serialization
public UnmappedTerms(String name, InternalOrder order, int requiredSize, long minDocCount) {
super(name, order, requiredSize, minDocCount, BUCKETS);
}
@Override
public Type type() {
return TYPE;
}
@Override
public void readFrom(StreamInput in) throws IOException {
this.name = in.readString();
this.order = InternalOrder.Streams.readOrder(in);
this.requiredSize = readSize(in);
this.minDocCount = in.readVLong();
this.buckets = BUCKETS;
this.bucketMap = BUCKETS_MAP;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
InternalOrder.Streams.writeOrder(order, out);
writeSize(requiredSize, out);
out.writeVLong(minDocCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.startArray(CommonFields.BUCKETS).endArray();
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_UnmappedTerms.java
|
44 |
public class TouchCommand extends AbstractTextCommand {
String key;
int expiration;
boolean noreply;
ByteBuffer response;
public TouchCommand(TextCommandType type, String key, int expiration, boolean noReply) {
super(type);
this.key = key;
this.expiration = expiration;
this.noreply = noReply;
}
public boolean writeTo(ByteBuffer destination) {
if (response == null) {
response = ByteBuffer.wrap(STORED);
}
while (destination.hasRemaining() && response.hasRemaining()) {
destination.put(response.get());
}
return !response.hasRemaining();
}
public boolean readFrom(ByteBuffer source) {
return true;
}
public boolean shouldReply() {
return !noreply;
}
public String getKey() {
return key;
}
public int getExpiration() {
return expiration;
}
public void setResponse(byte[] value) {
this.response = ByteBuffer.wrap(value);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_TouchCommand.java
|
1,143 |
public class OSQLMethodField extends OAbstractSQLMethod {
public static final String NAME = "field";
public OSQLMethodField() {
super(NAME, 0, 1);
}
@Override
public Object execute(final OIdentifiable iCurrentRecord, final OCommandContext iContext, Object ioResult,
final Object[] iMethodParams) {
if (ioResult != null)
if (ioResult instanceof String)
try {
ioResult = new ODocument(new ORecordId((String) ioResult));
} catch (Exception e) {
OLogManager.instance().error(this, "Error on reading rid with value '%s'", null, ioResult);
ioResult = null;
}
else if (ioResult instanceof OIdentifiable)
ioResult = ((OIdentifiable) ioResult).getRecord();
else if (ioResult instanceof Collection<?> || ioResult instanceof OMultiCollectionIterator<?>
|| ioResult.getClass().isArray()) {
final List<Object> result = new ArrayList<Object>(OMultiValue.getSize(ioResult));
for (Object o : OMultiValue.getMultiValueIterable(ioResult)) {
result.add(ODocumentHelper.getFieldValue(o, iMethodParams[0].toString()));
}
return result;
}
if (ioResult != null) {
if (ioResult instanceof OCommandContext) {
ioResult = ((OCommandContext) ioResult).getVariable(iMethodParams[0].toString());
} else {
ioResult = ODocumentHelper.getFieldValue(ioResult, iMethodParams[0].toString(), iContext);
}
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodField.java
|
307 |
public class MergeApplicationContextXmlConfigResource extends MergeXmlConfigResource {
private static final Log LOG = LogFactory.getLog(MergeApplicationContextXmlConfigResource.class);
/**
* Generate a merged configuration resource, loading the definitions from the given streams. Note,
* all sourceLocation streams will be merged using standard Spring configuration override rules. However, the patch
* streams are fully merged into the result of the sourceLocations simple merge. Patch merges are first executed according
* to beans with the same id. Subsequent merges within a bean are executed against tagnames - ignoring any
* further id attributes.
*
* @param sources array of input streams for the source application context files
* @param patches array of input streams for the patch application context files
* @throws BeansException
*/
public Resource[] getConfigResources(ResourceInputStream[] sources, ResourceInputStream[] patches) throws BeansException {
Resource[] configResources = null;
ResourceInputStream merged = null;
try {
merged = merge(sources);
if (patches != null) {
ResourceInputStream[] patches2 = new ResourceInputStream[patches.length+1];
patches2[0] = merged;
System.arraycopy(patches, 0, patches2, 1, patches.length);
merged = merge(patches2);
}
//read the final stream into a byte array
ByteArrayOutputStream baos = new ByteArrayOutputStream();
boolean eof = false;
while (!eof) {
int temp = merged.read();
if (temp == -1) {
eof = true;
} else {
baos.write(temp);
}
}
configResources = new Resource[]{new ByteArrayResource(baos.toByteArray())};
if (LOG.isDebugEnabled()) {
LOG.debug("Merged ApplicationContext Including Patches: \n" + serialize(configResources[0]));
}
} catch (MergeException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} catch (MergeManagerSetupException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} catch (IOException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} finally {
if (merged != null) {
try{ merged.close(); } catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
}
}
return configResources;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_MergeApplicationContextXmlConfigResource.java
|
105 |
{
@Override
public Object doWork( State state )
{
node.setProperty( key, value );
return null;
}
}, 200, MILLISECONDS );
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestManualAcquireLock.java
|
8 |
.setImplementation(new EntryAdapter() {
@Override
public void entryEvicted(EntryEvent event) {
latch.countDown();
}
}));
| 0true
|
hazelcast_src_test_java_com_hazelcast_ascii_RestTest.java
|
151 |
public class ItemCriteriaDTO implements Serializable {
private static final long serialVersionUID = 1L;
protected Integer qty;
protected String matchRule;
public Integer getQty() {
return qty;
}
public void setQty(Integer qty) {
this.qty = qty;
}
public String getMatchRule() {
return matchRule;
}
public void setMatchRule(String matchRule) {
this.matchRule = matchRule;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_dto_ItemCriteriaDTO.java
|
2,561 |
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode);
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
// remove the NO_MASTER block in this case
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(Discovery.NO_MASTER_BLOCK);
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
});
| 1no label
|
src_main_java_org_elasticsearch_discovery_local_LocalDiscovery.java
|
132 |
@SuppressWarnings("restriction")
public class OUnsafeBinaryConverter implements OBinaryConverter {
public static final OUnsafeBinaryConverter INSTANCE = new OUnsafeBinaryConverter();
private static final Unsafe theUnsafe;
private static final long BYTE_ARRAY_OFFSET;
static {
theUnsafe = (Unsafe) AccessController.doPrivileged(new PrivilegedAction<Object>() {
public Object run() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
boolean wasAccessible = f.isAccessible();
f.setAccessible(true);
try {
return f.get(null);
} finally {
f.setAccessible(wasAccessible);
}
} catch (NoSuchFieldException e) {
throw new Error();
} catch (IllegalAccessException e) {
throw new Error();
}
}
});
BYTE_ARRAY_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
}
public void putShort(byte[] buffer, int index, short value, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Short.reverseBytes(value);
theUnsafe.putShort(buffer, index + BYTE_ARRAY_OFFSET, value);
}
public short getShort(byte[] buffer, int index, ByteOrder byteOrder) {
short result = theUnsafe.getShort(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Short.reverseBytes(result);
return result;
}
public void putInt(byte[] buffer, int pointer, int value, ByteOrder byteOrder) {
final long position = pointer + BYTE_ARRAY_OFFSET;
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Integer.reverseBytes(value);
theUnsafe.putInt(buffer, position, value);
}
public int getInt(byte[] buffer, int pointer, ByteOrder byteOrder) {
final long position = pointer + BYTE_ARRAY_OFFSET;
int result = theUnsafe.getInt(buffer, position);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Integer.reverseBytes(result);
return result;
}
public void putLong(byte[] buffer, int index, long value, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Long.reverseBytes(value);
theUnsafe.putLong(buffer, index + BYTE_ARRAY_OFFSET, value);
}
public long getLong(byte[] buffer, int index, ByteOrder byteOrder) {
long result = theUnsafe.getLong(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Long.reverseBytes(result);
return result;
}
public void putChar(byte[] buffer, int index, char character, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
character = Character.reverseBytes(character);
theUnsafe.putChar(buffer, index + BYTE_ARRAY_OFFSET, character);
}
public char getChar(byte[] buffer, int index, ByteOrder byteOrder) {
char result = theUnsafe.getChar(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Character.reverseBytes(result);
return result;
}
public boolean nativeAccelerationUsed() {
return true;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_serialization_OUnsafeBinaryConverter.java
|
848 |
new Visitor() {
private boolean needsParens = false;
@Override
public void visit(Tree.Variable that) {
if (that.getType() instanceof Tree.SyntheticVariable) {
TypedDeclaration od =
that.getDeclarationModel().getOriginalDeclaration();
if (od!=null && od.equals(declaration) && delete) {
Integer startIndex =
that.getSpecifierExpression().getStartIndex();
tfc.addEdit(new InsertEdit(startIndex,
that.getIdentifier().getText()+" = "));
}
}
super.visit(that);
}
@Override
public void visit(Tree.MemberOrTypeExpression that) {
super.visit(that);
inlineDefinition(tokens, declarationTokens, term,
tfc, null, that, needsParens);
}
@Override
public void visit(Tree.OperatorExpression that) {
boolean onp = needsParens;
needsParens=true;
super.visit(that);
needsParens = onp;
}
@Override
public void visit(Tree.StatementOrArgument that) {
boolean onp = needsParens;
needsParens = false;
super.visit(that);
needsParens = onp;
}
@Override
public void visit(Tree.Expression that) {
boolean onp = needsParens;
needsParens = false;
super.visit(that);
needsParens = onp;
}
}.visit(pu);
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_InlineRefactoring.java
|
2,591 |
public class MasterFaultDetection extends AbstractComponent {
public static interface Listener {
void onMasterFailure(DiscoveryNode masterNode, String reason);
void onDisconnectedFromMaster();
}
private final ThreadPool threadPool;
private final TransportService transportService;
private final DiscoveryNodesProvider nodesProvider;
private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<Listener>();
private final boolean connectOnNetworkDisconnect;
private final TimeValue pingInterval;
private final TimeValue pingRetryTimeout;
private final int pingRetryCount;
// used mainly for testing, should always be true
private final boolean registerConnectionListener;
private final FDConnectionListener connectionListener;
private volatile MasterPinger masterPinger;
private final Object masterNodeMutex = new Object();
private volatile DiscoveryNode masterNode;
private volatile int retryCount;
private final AtomicBoolean notifiedMasterFailure = new AtomicBoolean();
public MasterFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, DiscoveryNodesProvider nodesProvider) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.nodesProvider = nodesProvider;
this.connectOnNetworkDisconnect = componentSettings.getAsBoolean("connect_on_network_disconnect", true);
this.pingInterval = componentSettings.getAsTime("ping_interval", timeValueSeconds(1));
this.pingRetryTimeout = componentSettings.getAsTime("ping_timeout", timeValueSeconds(30));
this.pingRetryCount = componentSettings.getAsInt("ping_retries", 3);
this.registerConnectionListener = componentSettings.getAsBoolean("register_connection_listener", true);
logger.debug("[master] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount);
this.connectionListener = new FDConnectionListener();
if (registerConnectionListener) {
transportService.addConnectionListener(connectionListener);
}
transportService.registerHandler(MasterPingRequestHandler.ACTION, new MasterPingRequestHandler());
}
public DiscoveryNode masterNode() {
return this.masterNode;
}
public void addListener(Listener listener) {
listeners.add(listener);
}
public void removeListener(Listener listener) {
listeners.remove(listener);
}
public void restart(DiscoveryNode masterNode, String reason) {
synchronized (masterNodeMutex) {
if (logger.isDebugEnabled()) {
logger.debug("[master] restarting fault detection against master [{}], reason [{}]", masterNode, reason);
}
innerStop();
innerStart(masterNode);
}
}
public void start(final DiscoveryNode masterNode, String reason) {
synchronized (masterNodeMutex) {
if (logger.isDebugEnabled()) {
logger.debug("[master] starting fault detection against master [{}], reason [{}]", masterNode, reason);
}
innerStart(masterNode);
}
}
private void innerStart(final DiscoveryNode masterNode) {
this.masterNode = masterNode;
this.retryCount = 0;
this.notifiedMasterFailure.set(false);
// try and connect to make sure we are connected
try {
transportService.connectToNode(masterNode);
} catch (final Exception e) {
// notify master failure (which stops also) and bail..
notifyMasterFailure(masterNode, "failed to perform initial connect [" + e.getMessage() + "]");
return;
}
if (masterPinger != null) {
masterPinger.stop();
}
this.masterPinger = new MasterPinger();
// start the ping process
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, masterPinger);
}
public void stop(String reason) {
synchronized (masterNodeMutex) {
if (masterNode != null) {
if (logger.isDebugEnabled()) {
logger.debug("[master] stopping fault detection against master [{}], reason [{}]", masterNode, reason);
}
}
innerStop();
}
}
private void innerStop() {
// also will stop the next ping schedule
this.retryCount = 0;
if (masterPinger != null) {
masterPinger.stop();
masterPinger = null;
}
this.masterNode = null;
}
public void close() {
stop("closing");
this.listeners.clear();
transportService.removeConnectionListener(connectionListener);
transportService.removeHandler(MasterPingRequestHandler.ACTION);
}
private void handleTransportDisconnect(DiscoveryNode node) {
synchronized (masterNodeMutex) {
if (!node.equals(this.masterNode)) {
return;
}
if (connectOnNetworkDisconnect) {
try {
transportService.connectToNode(node);
// if all is well, make sure we restart the pinger
if (masterPinger != null) {
masterPinger.stop();
}
this.masterPinger = new MasterPinger();
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, masterPinger);
} catch (Exception e) {
logger.trace("[master] [{}] transport disconnected (with verified connect)", masterNode);
notifyMasterFailure(masterNode, "transport disconnected (with verified connect)");
}
} else {
logger.trace("[master] [{}] transport disconnected", node);
notifyMasterFailure(node, "transport disconnected");
}
}
}
private void notifyDisconnectedFromMaster() {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (Listener listener : listeners) {
listener.onDisconnectedFromMaster();
}
}
});
}
private void notifyMasterFailure(final DiscoveryNode masterNode, final String reason) {
if (notifiedMasterFailure.compareAndSet(false, true)) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (Listener listener : listeners) {
listener.onMasterFailure(masterNode, reason);
}
}
});
stop("master failure, " + reason);
}
}
private class FDConnectionListener implements TransportConnectionListener {
@Override
public void onNodeConnected(DiscoveryNode node) {
}
@Override
public void onNodeDisconnected(DiscoveryNode node) {
handleTransportDisconnect(node);
}
}
private class MasterPinger implements Runnable {
private volatile boolean running = true;
public void stop() {
this.running = false;
}
@Override
public void run() {
if (!running) {
// return and don't spawn...
return;
}
final DiscoveryNode masterToPing = masterNode;
if (masterToPing == null) {
// master is null, should not happen, but we are still running, so reschedule
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
return;
}
transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout),
new BaseTransportResponseHandler<MasterPingResponseResponse>() {
@Override
public MasterPingResponseResponse newInstance() {
return new MasterPingResponseResponse();
}
@Override
public void handleResponse(MasterPingResponseResponse response) {
if (!running) {
return;
}
// reset the counter, we got a good result
MasterFaultDetection.this.retryCount = 0;
// check if the master node did not get switched on us..., if it did, we simply return with no reschedule
if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
if (!response.connectedToMaster) {
logger.trace("[master] [{}] does not have us registered with it...", masterToPing);
notifyDisconnectedFromMaster();
}
// we don't stop on disconnection from master, we keep pinging it
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
}
}
@Override
public void handleException(TransportException exp) {
if (!running) {
return;
}
if (exp instanceof ConnectTransportException) {
// ignore this one, we already handle it by registering a connection listener
return;
}
synchronized (masterNodeMutex) {
// check if the master node did not get switched on us...
if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
if (exp.getCause() instanceof NoLongerMasterException) {
logger.debug("[master] pinging a master {} that is no longer a master", masterNode);
notifyMasterFailure(masterToPing, "no longer master");
return;
} else if (exp.getCause() instanceof NotMasterException) {
logger.debug("[master] pinging a master {} that is not the master", masterNode);
notifyMasterFailure(masterToPing, "not master");
return;
} else if (exp.getCause() instanceof NodeDoesNotExistOnMasterException) {
logger.debug("[master] pinging a master {} but we do not exists on it, act as if its master failure", masterNode);
notifyMasterFailure(masterToPing, "do not exists on master, act as master failure");
return;
}
int retryCount = ++MasterFaultDetection.this.retryCount;
logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount, pingRetryCount);
if (retryCount >= pingRetryCount) {
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout);
// not good, failure
notifyMasterFailure(masterToPing, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
} else {
// resend the request, not reschedule, rely on send timeout
transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout), this);
}
}
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
}
static class NoLongerMasterException extends ElasticsearchIllegalStateException {
@Override
public Throwable fillInStackTrace() {
return null;
}
}
static class NotMasterException extends ElasticsearchIllegalStateException {
@Override
public Throwable fillInStackTrace() {
return null;
}
}
static class NodeDoesNotExistOnMasterException extends ElasticsearchIllegalStateException {
@Override
public Throwable fillInStackTrace() {
return null;
}
}
private class MasterPingRequestHandler extends BaseTransportRequestHandler<MasterPingRequest> {
public static final String ACTION = "discovery/zen/fd/masterPing";
@Override
public MasterPingRequest newInstance() {
return new MasterPingRequest();
}
@Override
public void messageReceived(MasterPingRequest request, TransportChannel channel) throws Exception {
DiscoveryNodes nodes = nodesProvider.nodes();
// check if we are really the same master as the one we seemed to be think we are
// this can happen if the master got "kill -9" and then another node started using the same port
if (!request.masterNodeId.equals(nodes.localNodeId())) {
throw new NotMasterException();
}
// if we are no longer master, fail...
if (!nodes.localNodeMaster()) {
throw new NoLongerMasterException();
}
if (!nodes.nodeExists(request.nodeId)) {
throw new NodeDoesNotExistOnMasterException();
}
// send a response, and note if we are connected to the master or not
channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
private static class MasterPingRequest extends TransportRequest {
private String nodeId;
private String masterNodeId;
private MasterPingRequest() {
}
private MasterPingRequest(String nodeId, String masterNodeId) {
this.nodeId = nodeId;
this.masterNodeId = masterNodeId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodeId = in.readString();
masterNodeId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(nodeId);
out.writeString(masterNodeId);
}
}
private static class MasterPingResponseResponse extends TransportResponse {
private boolean connectedToMaster;
private MasterPingResponseResponse() {
}
private MasterPingResponseResponse(boolean connectedToMaster) {
this.connectedToMaster = connectedToMaster;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
connectedToMaster = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(connectedToMaster);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java
|
25 |
return Iterables.transform(Iterables.filter(outEdges, new Predicate<Edge>() {
@Override
public boolean apply(@Nullable Edge edge) {
return !CHECK_VALUE || ((Integer) edge.getProperty("number")).intValue() == value;
}
}), new Function<Edge, Vertex>() {
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
4,193 |
private class RestoreContext extends Context {
private final Store store;
private final RecoveryStatus recoveryStatus;
/**
* Constructs new restore context
*
* @param snapshotId snapshot id
* @param shardId shard to be restored
* @param snapshotShardId shard in the snapshot that data should be restored from
* @param recoveryStatus recovery status to report progress
*/
public RestoreContext(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus) {
super(snapshotId, shardId, snapshotShardId);
store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class);
this.recoveryStatus = recoveryStatus;
}
/**
* Performs restore operation
*/
public void restore() {
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
BlobStoreIndexShardSnapshot snapshot;
try {
snapshot = readSnapshot(blobContainer.readBlobFully(snapshotBlobName(snapshotId)));
} catch (IOException ex) {
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
}
recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
int numberOfFiles = 0;
long totalSize = 0;
int numberOfReusedFiles = 0;
long reusedTotalSize = 0;
List<FileInfo> filesToRecover = Lists.newArrayList();
for (FileInfo fileInfo : snapshot.indexFiles()) {
String fileName = fileInfo.physicalName();
StoreFileMetaData md = null;
try {
md = store.metaData(fileName);
} catch (IOException e) {
// no file
}
numberOfFiles++;
// we don't compute checksum for segments, so always recover them
if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
totalSize += md.length();
numberOfReusedFiles++;
reusedTotalSize += md.length();
if (logger.isTraceEnabled()) {
logger.trace("not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
}
} else {
totalSize += fileInfo.length();
filesToRecover.add(fileInfo);
if (logger.isTraceEnabled()) {
if (md == null) {
logger.trace("recovering [{}], does not exists in local store", fileInfo.physicalName());
} else {
logger.trace("recovering [{}], exists in local store but is different", fileInfo.physicalName());
}
}
}
}
recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
if (filesToRecover.isEmpty()) {
logger.trace("no files to recover, all exists within the local store");
}
if (logger.isTraceEnabled()) {
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
}
final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
for (final FileInfo fileToRecover : filesToRecover) {
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
restoreFile(fileToRecover, latch, failures);
}
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (!failures.isEmpty()) {
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", failures.get(0));
}
// read the snapshot data persisted
long version = -1;
try {
if (Lucene.indexExists(store.directory())) {
version = Lucene.readSegmentInfos(store.directory()).getVersion();
}
} catch (IOException e) {
throw new IndexShardRestoreFailedException(shardId, "Failed to fetch index version after copying it over", e);
}
recoveryStatus.index().updateVersion(version);
/// now, go over and clean files that are in the store, but were not in the snapshot
try {
for (String storeFile : store.directory().listAll()) {
if (!snapshot.containPhysicalIndexFile(storeFile)) {
try {
store.directory().deleteFile(storeFile);
} catch (IOException e) {
// ignore
}
}
}
} catch (IOException e) {
// ignore
}
}
/**
* Restores a file
* This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are
* added to the {@code failures} list
*
* @param fileInfo file to be restored
* @param latch latch that should be counted down once file is snapshoted
* @param failures thread-safe list of failures
*/
private void restoreFile(final FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) {
final IndexOutput indexOutput;
try {
// we create an output with no checksum, this is because the pure binary data of the file is not
// the checksum (because of seek). We will create the checksum file once copying is done
indexOutput = store.createOutputRaw(fileInfo.physicalName());
} catch (IOException e) {
failures.add(e);
latch.countDown();
return;
}
String firstFileToRecover = fileInfo.partName(0);
final AtomicInteger partIndex = new AtomicInteger();
blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
@Override
public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
recoveryStatus.index().addCurrentFilesSize(size);
indexOutput.writeBytes(data, offset, size);
if (restoreRateLimiter != null) {
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(size));
}
}
@Override
public synchronized void onCompleted() {
int part = partIndex.incrementAndGet();
if (part < fileInfo.numberOfParts()) {
String partName = fileInfo.partName(part);
// continue with the new part
blobContainer.readBlob(partName, this);
return;
} else {
// we are done...
try {
indexOutput.close();
// write the checksum
if (fileInfo.checksum() != null) {
store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
}
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
} catch (IOException e) {
onFailure(e);
return;
}
}
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
failures.add(t);
latch.countDown();
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_BlobStoreIndexShardRepository.java
|
422 |
public class ClientMapReduceProxy
extends ClientProxy
implements JobTracker {
private final ConcurrentMap<String, ClientTrackableJob> trackableJobs = new ConcurrentHashMap<String, ClientTrackableJob>();
public ClientMapReduceProxy(String instanceName, String serviceName, String objectName) {
super(instanceName, serviceName, objectName);
}
@Override
protected void onDestroy() {
for (ClientTrackableJob trackableJob : trackableJobs.values()) {
trackableJob.completableFuture.cancel(false);
}
}
@Override
public <K, V> Job<K, V> newJob(KeyValueSource<K, V> source) {
return new ClientJob<K, V>(getName(), source);
}
@Override
public <V> TrackableJob<V> getTrackableJob(String jobId) {
return trackableJobs.get(jobId);
}
@Override
public String toString() {
return "JobTracker{" + "name='" + getName() + '\'' + '}';
}
/*
* Removed for now since it is moved to Hazelcast 3.3
@Override
public <K, V> ProcessJob<K, V> newProcessJob(KeyValueSource<K, V> source) {
// TODO
return null;
}*/
private <T> T invoke(InvocationClientRequest request, String jobId) throws Exception {
ClientContext context = getContext();
ClientInvocationService cis = context.getInvocationService();
ClientTrackableJob trackableJob = trackableJobs.get(jobId);
if (trackableJob != null) {
Address runningMember = trackableJob.jobOwner;
ICompletableFuture<T> future = cis.invokeOnTarget(request, runningMember);
return future.get();
}
return null;
}
private class ClientJob<KeyIn, ValueIn> extends AbstractJob<KeyIn, ValueIn> {
public ClientJob(String name, KeyValueSource<KeyIn, ValueIn> keyValueSource) {
super(name, ClientMapReduceProxy.this, keyValueSource);
}
@Override
protected <T> JobCompletableFuture<T> invoke(final Collator collator) {
try {
final String jobId = UuidUtil.buildRandomUuidString();
ClientContext context = getContext();
ClientInvocationService cis = context.getInvocationService();
ClientMapReduceRequest request = new ClientMapReduceRequest(name, jobId, keys,
predicate, mapper, combinerFactory, reducerFactory, keyValueSource,
chunkSize, topologyChangedStrategy);
final ClientCompletableFuture completableFuture = new ClientCompletableFuture(jobId);
ClientCallFuture future = (ClientCallFuture) cis.invokeOnRandomTarget(request, null);
future.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
try {
if (collator != null) {
response = collator.collate(((Map) response).entrySet());
}
} finally {
completableFuture.setResult(response);
trackableJobs.remove(jobId);
}
}
@Override
public void onFailure(Throwable t) {
try {
if (t instanceof ExecutionException
&& t.getCause() instanceof CancellationException) {
t = t.getCause();
}
completableFuture.setResult(t);
} finally {
trackableJobs.remove(jobId);
}
}
});
Address runningMember = future.getConnection().getRemoteEndpoint();
trackableJobs.putIfAbsent(jobId, new ClientTrackableJob<T>(jobId, runningMember, completableFuture));
return completableFuture;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
private class ClientCompletableFuture<V>
extends AbstractCompletableFuture<V>
implements JobCompletableFuture<V> {
private final String jobId;
private final CountDownLatch latch;
private volatile boolean cancelled;
protected ClientCompletableFuture(String jobId) {
super(null, Logger.getLogger(ClientCompletableFuture.class));
this.jobId = jobId;
this.latch = new CountDownLatch(1);
}
@Override
public String getJobId() {
return jobId;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
try {
cancelled = (Boolean) invoke(new ClientCancellationRequest(getName(), jobId), jobId);
} catch (Exception ignore) {
}
return cancelled;
}
@Override
public boolean isCancelled() {
return cancelled;
}
@Override
public void setResult(Object result) {
super.setResult(result);
latch.countDown();
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
ValidationUtil.isNotNull(unit, "unit");
if (!latch.await(timeout, unit) || !isDone()) {
throw new TimeoutException("timeout reached");
}
return getResult();
}
@Override
protected ExecutorService getAsyncExecutor() {
return getContext().getExecutionService().getAsyncExecutor();
}
}
private final class ClientTrackableJob<V>
implements TrackableJob<V> {
private final String jobId;
private final Address jobOwner;
private final AbstractCompletableFuture<V> completableFuture;
private ClientTrackableJob(String jobId, Address jobOwner,
AbstractCompletableFuture<V> completableFuture) {
this.jobId = jobId;
this.jobOwner = jobOwner;
this.completableFuture = completableFuture;
}
@Override
public JobTracker getJobTracker() {
return ClientMapReduceProxy.this;
}
@Override
public String getName() {
return ClientMapReduceProxy.this.getName();
}
@Override
public String getJobId() {
return jobId;
}
@Override
public ICompletableFuture<V> getCompletableFuture() {
return completableFuture;
}
@Override
public JobProcessInformation getJobProcessInformation() {
try {
return invoke(new ClientJobProcessInformationRequest(getName(), jobId), jobId);
} catch (Exception ignore) {
}
return null;
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientMapReduceProxy.java
|
81 |
public interface OCommandStream extends OCloseable {
boolean hasNext();
String nextCommand();
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_OCommandStream.java
|
270 |
public class ElasticsearchNullPointerException extends ElasticsearchException {
public ElasticsearchNullPointerException() {
super(null);
}
public ElasticsearchNullPointerException(String msg) {
super(msg);
}
public ElasticsearchNullPointerException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchNullPointerException.java
|
351 |
public class MergePersistenceUnitManager extends DefaultPersistenceUnitManager {
private static final Log LOG = LogFactory.getLog(MergePersistenceUnitManager.class);
protected HashMap<String, PersistenceUnitInfo> mergedPus = new HashMap<String, PersistenceUnitInfo>();
protected final boolean jpa2ApiPresent = ClassUtils.hasMethod(PersistenceUnitInfo.class, "getSharedCacheMode");
protected List<BroadleafClassTransformer> classTransformers = new ArrayList<BroadleafClassTransformer>();
@Resource(name="blMergedPersistenceXmlLocations")
protected Set<String> mergedPersistenceXmlLocations;
@Resource(name="blMergedDataSources")
protected Map<String, DataSource> mergedDataSources;
@Resource(name="blMergedClassTransformers")
protected Set<BroadleafClassTransformer> mergedClassTransformers;
@PostConstruct
public void configureMergedItems() {
String[] tempLocations;
try {
Field persistenceXmlLocations = DefaultPersistenceUnitManager.class.getDeclaredField("persistenceXmlLocations");
persistenceXmlLocations.setAccessible(true);
tempLocations = (String[]) persistenceXmlLocations.get(this);
} catch (Exception e) {
throw new RuntimeException(e);
}
for (String legacyLocation : tempLocations) {
if (!legacyLocation.endsWith("/persistence.xml")) {
//do not add the default JPA persistence location by default
mergedPersistenceXmlLocations.add(legacyLocation);
}
}
setPersistenceXmlLocations(mergedPersistenceXmlLocations.toArray(new String[mergedPersistenceXmlLocations.size()]));
if (!mergedDataSources.isEmpty()) {
setDataSources(mergedDataSources);
}
}
@PostConstruct
public void configureClassTransformers() throws InstantiationException, IllegalAccessException, ClassNotFoundException {
classTransformers.addAll(mergedClassTransformers);
}
protected PersistenceUnitInfo getMergedUnit(String persistenceUnitName, MutablePersistenceUnitInfo newPU) {
if (!mergedPus.containsKey(persistenceUnitName)) {
PersistenceUnitInfo puiToStore = newPU;
if (jpa2ApiPresent) {
puiToStore = (PersistenceUnitInfo) Proxy.newProxyInstance(SmartPersistenceUnitInfo.class.getClassLoader(),
new Class[] {SmartPersistenceUnitInfo.class}, new Jpa2PersistenceUnitInfoDecorator(newPU));
}
mergedPus.put(persistenceUnitName, puiToStore);
}
return mergedPus.get(persistenceUnitName);
}
@Override
@SuppressWarnings({"unchecked", "ToArrayCallWithZeroLengthArrayArgument"})
public void preparePersistenceUnitInfos() {
//Need to use reflection to try and execute the logic in the DefaultPersistenceUnitManager
//SpringSource added a block of code in version 3.1 to "protect" the user from having more than one PU with
//the same name. Of course, in our case, this happens before a merge occurs. They have added
//a block of code to throw an exception if more than one PU has the same name. We want to
//use the logic of the DefaultPersistenceUnitManager without the exception in the case of
//a duplicate name. This will require reflection in order to do what we need.
try {
Set<String> persistenceUnitInfoNames = null;
Map<String, PersistenceUnitInfo> persistenceUnitInfos = null;
ResourcePatternResolver resourcePatternResolver = null;
Field[] fields = getClass().getSuperclass().getDeclaredFields();
for (Field field : fields) {
if ("persistenceUnitInfoNames".equals(field.getName())) {
field.setAccessible(true);
persistenceUnitInfoNames = (Set<String>)field.get(this);
} else if ("persistenceUnitInfos".equals(field.getName())) {
field.setAccessible(true);
persistenceUnitInfos = (Map<String, PersistenceUnitInfo>)field.get(this);
} else if ("resourcePatternResolver".equals(field.getName())) {
field.setAccessible(true);
resourcePatternResolver = (ResourcePatternResolver)field.get(this);
}
}
persistenceUnitInfoNames.clear();
persistenceUnitInfos.clear();
Method readPersistenceUnitInfos =
getClass().
getSuperclass().
getDeclaredMethod("readPersistenceUnitInfos");
readPersistenceUnitInfos.setAccessible(true);
//In Spring 3.0 this returns an array
//In Spring 3.1 this returns a List
Object pInfosObject = readPersistenceUnitInfos.invoke(this);
Object[] puis;
if (pInfosObject.getClass().isArray()) {
puis = (Object[])pInfosObject;
} else {
puis = ((Collection)pInfosObject).toArray();
}
for (Object pui : puis) {
MutablePersistenceUnitInfo mPui = (MutablePersistenceUnitInfo)pui;
if (mPui.getPersistenceUnitRootUrl() == null) {
Method determineDefaultPersistenceUnitRootUrl =
getClass().
getSuperclass().
getDeclaredMethod("determineDefaultPersistenceUnitRootUrl");
determineDefaultPersistenceUnitRootUrl.setAccessible(true);
mPui.setPersistenceUnitRootUrl((URL)determineDefaultPersistenceUnitRootUrl.invoke(this));
}
ConfigurationOnlyState state = ConfigurationOnlyState.getState();
if ((state == null || !state.isConfigurationOnly()) && mPui.getNonJtaDataSource() == null) {
mPui.setNonJtaDataSource(getDefaultDataSource());
}
if (super.getLoadTimeWeaver() != null) {
Method puiInitMethod = mPui.getClass().getDeclaredMethod("init", LoadTimeWeaver.class);
puiInitMethod.setAccessible(true);
puiInitMethod.invoke(pui, getLoadTimeWeaver());
}
else {
Method puiInitMethod = mPui.getClass().getDeclaredMethod("init", ClassLoader.class);
puiInitMethod.setAccessible(true);
puiInitMethod.invoke(pui, resourcePatternResolver.getClassLoader());
}
postProcessPersistenceUnitInfo((MutablePersistenceUnitInfo)pui);
String name = mPui.getPersistenceUnitName();
persistenceUnitInfoNames.add(name);
PersistenceUnitInfo puiToStore = mPui;
if (jpa2ApiPresent) {
InvocationHandler jpa2PersistenceUnitInfoDecorator = null;
Class<?>[] classes = getClass().getSuperclass().getDeclaredClasses();
for (Class<?> clz : classes){
if ("org.springframework.orm.jpa.persistenceunit.DefaultPersistenceUnitManager$Jpa2PersistenceUnitInfoDecorator"
.equals(clz.getName())) {
Constructor<?> constructor =
clz.getConstructor(Class.forName("org.springframework.orm.jpa.persistenceunit.SpringPersistenceUnitInfo"));
constructor.setAccessible(true);
jpa2PersistenceUnitInfoDecorator = (InvocationHandler)constructor.newInstance(mPui);
break;
}
}
puiToStore = (PersistenceUnitInfo) Proxy.newProxyInstance(SmartPersistenceUnitInfo.class.getClassLoader(),
new Class[] {SmartPersistenceUnitInfo.class}, jpa2PersistenceUnitInfoDecorator);
}
persistenceUnitInfos.put(name, puiToStore);
}
} catch (Exception e) {
throw new RuntimeException("An error occured reflectively invoking methods on " +
"class: " + getClass().getSuperclass().getName(), e);
}
try {
List<String> managedClassNames = new ArrayList<String>();
for (PersistenceUnitInfo pui : mergedPus.values()) {
for (BroadleafClassTransformer transformer : classTransformers) {
try {
if (!(transformer instanceof NullClassTransformer) && pui.getPersistenceUnitName().equals("blPU")) {
pui.addTransformer(transformer);
}
} catch (IllegalStateException e) {
LOG.warn("A BroadleafClassTransformer is configured for this persistence unit, but Spring reported a problem (likely that a LoadTimeWeaver is not registered). As a result, the Broadleaf Commerce ClassTransformer ("+transformer.getClass().getName()+") is not being registered with the persistence unit.", e);
}
}
}
for (PersistenceUnitInfo pui : mergedPus.values()) {
for (String managedClassName : pui.getManagedClassNames()) {
if (!managedClassNames.contains(managedClassName)) {
// Force-load this class so that we are able to ensure our instrumentation happens globally.
Class.forName(managedClassName, true, getClass().getClassLoader());
managedClassNames.add(managedClassName);
}
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected void postProcessPersistenceUnitInfo(MutablePersistenceUnitInfo newPU) {
super.postProcessPersistenceUnitInfo(newPU);
ConfigurationOnlyState state = ConfigurationOnlyState.getState();
String persistenceUnitName = newPU.getPersistenceUnitName();
MutablePersistenceUnitInfo temp;
PersistenceUnitInfo pui = getMergedUnit(persistenceUnitName, newPU);
if (pui != null && Proxy.isProxyClass(pui.getClass())) {
// JPA 2.0 PersistenceUnitInfo decorator with a SpringPersistenceUnitInfo as target
Jpa2PersistenceUnitInfoDecorator dec = (Jpa2PersistenceUnitInfoDecorator) Proxy.getInvocationHandler(pui);
temp = (MutablePersistenceUnitInfo) dec.getTarget();
}
else {
// Must be a raw JPA 1.0 SpringPersistenceUnitInfo instance
temp = (MutablePersistenceUnitInfo) pui;
}
List<String> managedClassNames = newPU.getManagedClassNames();
for (String managedClassName : managedClassNames){
if (!temp.getManagedClassNames().contains(managedClassName)) {
temp.addManagedClassName(managedClassName);
}
}
List<String> mappingFileNames = newPU.getMappingFileNames();
for (String mappingFileName : mappingFileNames) {
if (!temp.getMappingFileNames().contains(mappingFileName)) {
temp.addMappingFileName(mappingFileName);
}
}
temp.setExcludeUnlistedClasses(newPU.excludeUnlistedClasses());
for (URL url : newPU.getJarFileUrls()) {
// Avoid duplicate class scanning by Ejb3Configuration. Do not re-add the URL to the list of jars for this
// persistence unit or duplicate the persistence unit root URL location (both types of locations are scanned)
if (!temp.getJarFileUrls().contains(url) && !temp.getPersistenceUnitRootUrl().equals(url)) {
temp.addJarFileUrl(url);
}
}
if (temp.getProperties() == null) {
temp.setProperties(newPU.getProperties());
} else {
Properties props = newPU.getProperties();
if (props != null) {
for (Object key : props.keySet()) {
temp.getProperties().put(key, props.get(key));
for (BroadleafClassTransformer transformer : classTransformers) {
try {
transformer.compileJPAProperties(props, key);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
if (state == null || !state.isConfigurationOnly()) {
if (newPU.getJtaDataSource() != null) {
temp.setJtaDataSource(newPU.getJtaDataSource());
}
if (newPU.getNonJtaDataSource() != null) {
temp.setNonJtaDataSource(newPU.getNonJtaDataSource());
}
} else {
temp.getProperties().setProperty("hibernate.hbm2ddl.auto", "none");
temp.getProperties().setProperty("hibernate.temp.use_jdbc_metadata_defaults", "false");
}
temp.setTransactionType(newPU.getTransactionType());
if (newPU.getPersistenceProviderClassName() != null) {
temp.setPersistenceProviderClassName(newPU.getPersistenceProviderClassName());
}
if (newPU.getPersistenceProviderPackageName() != null) {
temp.setPersistenceProviderPackageName(newPU.getPersistenceProviderPackageName());
}
}
/* (non-Javadoc)
* @see org.springframework.orm.jpa.persistenceunit.DefaultPersistenceUnitManager#obtainPersistenceUnitInfo(java.lang.String)
*/
@Override
public PersistenceUnitInfo obtainPersistenceUnitInfo(String persistenceUnitName) {
return mergedPus.get(persistenceUnitName);
}
/* (non-Javadoc)
* @see org.springframework.orm.jpa.persistenceunit.DefaultPersistenceUnitManager#obtainDefaultPersistenceUnitInfo()
*/
@Override
public PersistenceUnitInfo obtainDefaultPersistenceUnitInfo() {
throw new IllegalStateException("Default Persistence Unit is not supported. The persistence unit name must be specified at the entity manager factory.");
}
public List<BroadleafClassTransformer> getClassTransformers() {
return classTransformers;
}
public void setClassTransformers(List<BroadleafClassTransformer> classTransformers) {
this.classTransformers = classTransformers;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_extensibility_jpa_MergePersistenceUnitManager.java
|
625 |
public static class IndexShardStatsRequest extends BroadcastShardOperationRequest {
// TODO if there are many indices, the request might hold a large indices array..., we don't really need to serialize it
IndicesStatsRequest request;
IndexShardStatsRequest() {
}
IndexShardStatsRequest(String index, int shardId, IndicesStatsRequest request) {
super(index, shardId, request);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new IndicesStatsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_TransportIndicesStatsAction.java
|
50 |
@Test(enabled = false)
public class UnsafeComparatorTest {
public void testOneByteArray() {
final byte[] keyOne = new byte[] { 1 };
final byte[] keyTwo = new byte[] { 2 };
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyOne, keyTwo) < 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyOne) > 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyTwo) == 0);
}
public void testOneLongArray() {
final byte[] keyOne = new byte[] { 0, 1, 0, 0, 0, 0, 0, 0 };
final byte[] keyTwo = new byte[] { 1, 0, 0, 0, 0, 0, 0, 0 };
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyOne, keyTwo) < 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyOne) > 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyTwo) == 0);
}
public void testOneLongArrayAndByte() {
final byte[] keyOne = new byte[] { 1, 1, 0, 0, 0, 0, 0, 0, 0 };
final byte[] keyTwo = new byte[] { 1, 1, 0, 0, 0, 0, 0, 0, 1 };
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyOne, keyTwo) < 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyOne) > 0);
Assert.assertTrue(OUnsafeByteArrayComparator.INSTANCE.compare(keyTwo, keyTwo) == 0);
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_comparator_UnsafeComparatorTest.java
|
5,930 |
public class SortParseElement implements SearchParseElement {
public static final SortField SORT_SCORE = new SortField(null, SortField.Type.SCORE);
private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.Type.SCORE, true);
private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC);
private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true);
public static final String SCORE_FIELD_NAME = "_score";
public static final String DOC_FIELD_NAME = "_doc";
private final ImmutableMap<String, SortParser> parsers;
public SortParseElement() {
ImmutableMap.Builder<String, SortParser> builder = ImmutableMap.builder();
addParser(builder, new ScriptSortParser());
addParser(builder, new GeoDistanceSortParser());
this.parsers = builder.build();
}
private void addParser(ImmutableMap.Builder<String, SortParser> parsers, SortParser parser) {
for (String name : parser.names()) {
parsers.put(name, parser);
}
}
@Override
public void parse(XContentParser parser, SearchContext context) throws Exception {
XContentParser.Token token = parser.currentToken();
List<SortField> sortFields = Lists.newArrayListWithCapacity(2);
if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.START_OBJECT) {
addCompoundSortField(parser, context, sortFields);
} else if (token == XContentParser.Token.VALUE_STRING) {
addSortField(context, sortFields, parser.text(), false, false, null, null, null, null);
} else {
throw new ElasticsearchIllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed");
}
}
} else if (token == XContentParser.Token.VALUE_STRING) {
addSortField(context, sortFields, parser.text(), false, false, null, null, null, null);
} else if (token == XContentParser.Token.START_OBJECT) {
addCompoundSortField(parser, context, sortFields);
} else {
throw new ElasticsearchIllegalArgumentException("malformed sort format, either start with array, object, or an actual string");
}
if (!sortFields.isEmpty()) {
// optimize if we just sort on score non reversed, we don't really need sorting
boolean sort;
if (sortFields.size() > 1) {
sort = true;
} else {
SortField sortField = sortFields.get(0);
if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) {
sort = false;
} else {
sort = true;
}
}
if (sort) {
context.sort(new Sort(sortFields.toArray(new SortField[sortFields.size()])));
}
}
}
private void addCompoundSortField(XContentParser parser, SearchContext context, List<SortField> sortFields) throws Exception {
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
boolean reverse = false;
String missing = null;
String innerJsonName = null;
boolean ignoreUnmapped = false;
SortMode sortMode = null;
Filter nestedFilter = null;
String nestedPath = null;
token = parser.nextToken();
if (token == XContentParser.Token.VALUE_STRING) {
String direction = parser.text();
if (direction.equals("asc")) {
reverse = SCORE_FIELD_NAME.equals(fieldName);
} else if (direction.equals("desc")) {
reverse = !SCORE_FIELD_NAME.equals(fieldName);
} else {
throw new ElasticsearchIllegalArgumentException("sort direction [" + fieldName + "] not supported");
}
addSortField(context, sortFields, fieldName, reverse, ignoreUnmapped, missing, sortMode, nestedPath, nestedFilter);
} else {
if (parsers.containsKey(fieldName)) {
sortFields.add(parsers.get(fieldName).parse(parser, context));
} else {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
innerJsonName = parser.currentName();
} else if (token.isValue()) {
if ("reverse".equals(innerJsonName)) {
reverse = parser.booleanValue();
} else if ("order".equals(innerJsonName)) {
if ("asc".equals(parser.text())) {
reverse = SCORE_FIELD_NAME.equals(fieldName);
} else if ("desc".equals(parser.text())) {
reverse = !SCORE_FIELD_NAME.equals(fieldName);
}
} else if ("missing".equals(innerJsonName)) {
missing = parser.textOrNull();
} else if ("ignore_unmapped".equals(innerJsonName) || "ignoreUnmapped".equals(innerJsonName)) {
ignoreUnmapped = parser.booleanValue();
} else if ("mode".equals(innerJsonName)) {
sortMode = SortMode.fromString(parser.text());
} else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) {
nestedPath = parser.text();
} else {
throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) {
ParsedFilter parsedFilter = context.queryParserService().parseInnerFilter(parser);
nestedFilter = parsedFilter == null ? null : parsedFilter.filter();
} else {
throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported");
}
}
}
addSortField(context, sortFields, fieldName, reverse, ignoreUnmapped, missing, sortMode, nestedPath, nestedFilter);
}
}
}
}
}
private void addSortField(SearchContext context, List<SortField> sortFields, String fieldName, boolean reverse, boolean ignoreUnmapped, @Nullable final String missing, SortMode sortMode, String nestedPath, Filter nestedFilter) {
if (SCORE_FIELD_NAME.equals(fieldName)) {
if (reverse) {
sortFields.add(SORT_SCORE_REVERSE);
} else {
sortFields.add(SORT_SCORE);
}
} else if (DOC_FIELD_NAME.equals(fieldName)) {
if (reverse) {
sortFields.add(SORT_DOC_REVERSE);
} else {
sortFields.add(SORT_DOC);
}
} else {
FieldMapper fieldMapper = context.smartNameFieldMapper(fieldName);
if (fieldMapper == null) {
if (ignoreUnmapped) {
return;
}
throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on");
}
if (!fieldMapper.isSortable()) {
throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]");
}
// Enable when we also know how to detect fields that do tokenize, but only emit one token
/*if (fieldMapper instanceof StringFieldMapper) {
StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;
if (stringFieldMapper.fieldType().tokenized()) {
// Fail early
throw new SearchParseException(context, "Can't sort on tokenized string field[" + fieldName + "]");
}
}*/
// We only support AVG and SUM on number based fields
if (!(fieldMapper instanceof NumberFieldMapper) && (sortMode == SortMode.SUM || sortMode == SortMode.AVG)) {
sortMode = null;
}
if (sortMode == null) {
sortMode = resolveDefaultSortMode(reverse);
}
IndexFieldData.XFieldComparatorSource fieldComparatorSource = context.fieldData().getForField(fieldMapper)
.comparatorSource(missing, sortMode);
ObjectMapper objectMapper;
if (nestedPath != null) {
ObjectMappers objectMappers = context.mapperService().objectMapper(nestedPath);
if (objectMappers == null) {
throw new ElasticsearchIllegalArgumentException("failed to find nested object mapping for explicit nested path [" + nestedPath + "]");
}
objectMapper = objectMappers.mapper();
if (!objectMapper.nested().isNested()) {
throw new ElasticsearchIllegalArgumentException("mapping for explicit nested path is not mapped as nested: [" + nestedPath + "]");
}
} else {
objectMapper = context.mapperService().resolveClosestNestedObjectMapper(fieldName);
}
if (objectMapper != null && objectMapper.nested().isNested()) {
Filter rootDocumentsFilter = context.filterCache().cache(NonNestedDocsFilter.INSTANCE);
Filter innerDocumentsFilter;
if (nestedFilter != null) {
innerDocumentsFilter = context.filterCache().cache(nestedFilter);
} else {
innerDocumentsFilter = context.filterCache().cache(objectMapper.nestedTypeFilter());
}
fieldComparatorSource = new NestedFieldComparatorSource(sortMode, fieldComparatorSource, rootDocumentsFilter, innerDocumentsFilter);
}
sortFields.add(new SortField(fieldMapper.names().indexName(), fieldComparatorSource, reverse));
}
}
private static SortMode resolveDefaultSortMode(boolean reverse) {
return reverse ? SortMode.MAX : SortMode.MIN;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_sort_SortParseElement.java
|
5,815 |
public class HighlightPhase extends AbstractComponent implements FetchSubPhase {
private final Highlighters highlighters;
@Inject
public HighlightPhase(Settings settings, Highlighters highlighters) {
super(settings);
this.highlighters = highlighters;
}
@Override
public Map<String, ? extends SearchParseElement> parseElements() {
return ImmutableMap.of("highlight", new HighlighterParseElement());
}
@Override
public boolean hitsExecutionNeeded(SearchContext context) {
return false;
}
@Override
public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
}
@Override
public boolean hitExecutionNeeded(SearchContext context) {
return context.highlight() != null;
}
@Override
public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
Map<String, HighlightField> highlightFields = newHashMap();
for (SearchContextHighlight.Field field : context.highlight().fields()) {
Set<String> fieldNamesToHighlight;
if (Regex.isSimpleMatchPattern(field.field())) {
DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().type());
fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field());
} else {
fieldNamesToHighlight = ImmutableSet.of(field.field());
}
if (field.forceSource()) {
SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).sourceMapper();
if (!sourceFieldMapper.enabled()) {
throw new ElasticsearchIllegalArgumentException("source is forced for field [" + field.field() + "] but type [" + hitContext.hit().type() + "] has disabled _source");
}
}
for (String fieldName : fieldNamesToHighlight) {
FieldMapper<?> fieldMapper = getMapperForField(fieldName, context, hitContext);
if (fieldMapper == null) {
continue;
}
if (field.highlighterType() == null) {
boolean useFastVectorHighlighter = fieldMapper.fieldType().storeTermVectors() && fieldMapper.fieldType().storeTermVectorOffsets() && fieldMapper.fieldType().storeTermVectorPositions();
if (useFastVectorHighlighter) {
field.highlighterType("fvh");
} else if (fieldMapper.fieldType().indexOptions() == FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
field.highlighterType("postings");
} else {
field.highlighterType("plain");
}
}
Highlighter highlighter = highlighters.get(field.highlighterType());
if (highlighter == null) {
throw new ElasticsearchIllegalArgumentException("unknown highlighter type [" + field.highlighterType() + "] for the field [" + fieldName + "]");
}
HighlighterContext.HighlightQuery highlightQuery;
if (field.highlightQuery() == null) {
highlightQuery = new HighlighterContext.HighlightQuery(context.parsedQuery().query(), context.query(), context.queryRewritten());
} else {
highlightQuery = new HighlighterContext.HighlightQuery(field.highlightQuery(), field.highlightQuery(), false);
}
HighlighterContext highlighterContext = new HighlighterContext(fieldName, field, fieldMapper, context, hitContext, highlightQuery);
HighlightField highlightField = highlighter.highlight(highlighterContext);
if (highlightField != null) {
highlightFields.put(highlightField.name(), highlightField);
}
}
}
hitContext.hit().highlightFields(highlightFields);
}
private FieldMapper<?> getMapperForField(String fieldName, SearchContext searchContext, HitContext hitContext) {
DocumentMapper documentMapper = searchContext.mapperService().documentMapper(hitContext.hit().type());
FieldMapper<?> mapper = documentMapper.mappers().smartNameFieldMapper(fieldName);
if (mapper == null) {
MapperService.SmartNameFieldMappers fullMapper = searchContext.mapperService().smartName(fieldName);
if (fullMapper == null || !fullMapper.hasDocMapper() || fullMapper.docMapper().type().equals(hitContext.hit().type())) {
return null;
}
mapper = fullMapper.mapper();
}
return mapper;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_HighlightPhase.java
|
1,640 |
@Component("blCollectionFieldMetadataProvider")
@Scope("prototype")
public class CollectionFieldMetadataProvider extends AdvancedCollectionFieldMetadataProvider {
private static final Log LOG = LogFactory.getLog(CollectionFieldMetadataProvider.class);
protected boolean canHandleFieldForConfiguredMetadata(AddMetadataRequest addMetadataRequest, Map<String, FieldMetadata> metadata) {
AdminPresentationCollection annot = addMetadataRequest.getRequestedField().getAnnotation(AdminPresentationCollection.class);
return annot != null;
}
protected boolean canHandleAnnotationOverride(OverrideViaAnnotationRequest overrideViaAnnotationRequest, Map<String, FieldMetadata> metadata) {
AdminPresentationOverrides myOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationOverrides.class);
AdminPresentationMergeOverrides myMergeOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationMergeOverrides.class);
return (myOverrides != null && !ArrayUtils.isEmpty(myOverrides.collections()) || myMergeOverrides != null);
}
@Override
public FieldProviderResponse addMetadata(AddMetadataRequest addMetadataRequest, Map<String, FieldMetadata> metadata) {
if (!canHandleFieldForConfiguredMetadata(addMetadataRequest, metadata)) {
return FieldProviderResponse.NOT_HANDLED;
}
AdminPresentationCollection annot = addMetadataRequest.getRequestedField().getAnnotation(AdminPresentationCollection
.class);
FieldInfo info = buildFieldInfo(addMetadataRequest.getRequestedField());
FieldMetadataOverride override = constructBasicCollectionMetadataOverride(annot);
buildCollectionMetadata(addMetadataRequest.getParentClass(), addMetadataRequest.getTargetClass(),
metadata, info, override);
setClassOwnership(addMetadataRequest.getParentClass(), addMetadataRequest.getTargetClass(), metadata, info);
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse overrideViaAnnotation(OverrideViaAnnotationRequest overrideViaAnnotationRequest, Map<String, FieldMetadata> metadata) {
if (!canHandleAnnotationOverride(overrideViaAnnotationRequest, metadata)) {
return FieldProviderResponse.NOT_HANDLED;
}
Map<String, AdminPresentationCollectionOverride> presentationCollectionOverrides = new HashMap<String, AdminPresentationCollectionOverride>();
AdminPresentationOverrides myOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationOverrides.class);
if (myOverrides != null) {
for (AdminPresentationCollectionOverride myOverride : myOverrides.collections()) {
presentationCollectionOverrides.put(myOverride.name(), myOverride);
}
}
for (String propertyName : presentationCollectionOverrides.keySet()) {
for (String key : metadata.keySet()) {
if (key.startsWith(propertyName)) {
buildAdminPresentationCollectionOverride(overrideViaAnnotationRequest.getPrefix(), overrideViaAnnotationRequest.getParentExcluded(), metadata, presentationCollectionOverrides, propertyName, key, overrideViaAnnotationRequest.getDynamicEntityDao());
}
}
}
AdminPresentationMergeOverrides myMergeOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationMergeOverrides.class);
if (myMergeOverrides != null) {
for (AdminPresentationMergeOverride override : myMergeOverrides.value()) {
String propertyName = override.name();
Map<String, FieldMetadata> loopMap = new HashMap<String, FieldMetadata>();
loopMap.putAll(metadata);
for (Map.Entry<String, FieldMetadata> entry : loopMap.entrySet()) {
if (entry.getKey().startsWith(propertyName) || StringUtils.isEmpty(propertyName)) {
FieldMetadata targetMetadata = entry.getValue();
if (targetMetadata instanceof BasicCollectionMetadata) {
BasicCollectionMetadata serverMetadata = (BasicCollectionMetadata) targetMetadata;
if (serverMetadata.getTargetClass() != null) {
try {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = overrideViaAnnotationRequest.getDynamicEntityDao().getFieldManager()
.getField(targetClass, fieldName);
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
temp.put(field.getName(), serverMetadata);
FieldInfo info = buildFieldInfo(field);
FieldMetadataOverride fieldMetadataOverride = overrideCollectionMergeMetadata(override);
if (serverMetadata.getExcluded() != null && serverMetadata.getExcluded() &&
(fieldMetadataOverride.getExcluded() == null || fieldMetadataOverride.getExcluded())) {
continue;
}
buildCollectionMetadata(parentClass, targetClass, temp, info, fieldMetadataOverride);
serverMetadata = (BasicCollectionMetadata) temp.get(field.getName());
metadata.put(entry.getKey(), serverMetadata);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
}
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse overrideViaXml(OverrideViaXmlRequest overrideViaXmlRequest, Map<String, FieldMetadata> metadata) {
Map<String, FieldMetadataOverride> overrides = getTargetedOverride(overrideViaXmlRequest.getDynamicEntityDao(), overrideViaXmlRequest.getRequestedConfigKey(), overrideViaXmlRequest.getRequestedCeilingEntity());
if (overrides != null) {
for (String propertyName : overrides.keySet()) {
final FieldMetadataOverride localMetadata = overrides.get(propertyName);
for (String key : metadata.keySet()) {
if (key.equals(propertyName)) {
try {
if (metadata.get(key) instanceof BasicCollectionMetadata) {
BasicCollectionMetadata serverMetadata = (BasicCollectionMetadata) metadata.get(key);
if (serverMetadata.getTargetClass() != null) {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = overrideViaXmlRequest.getDynamicEntityDao().getFieldManager().getField(targetClass, fieldName);
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
temp.put(field.getName(), serverMetadata);
FieldInfo info = buildFieldInfo(field);
buildCollectionMetadata(parentClass, targetClass, temp, info, localMetadata);
serverMetadata = (BasicCollectionMetadata) temp.get(field.getName());
metadata.put(key, serverMetadata);
if (overrideViaXmlRequest.getParentExcluded()) {
if (LOG.isDebugEnabled()) {
LOG.debug("applyCollectionMetadataOverrides:Excluding " + key + "because parent is marked as excluded.");
}
serverMetadata.setExcluded(true);
}
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
return FieldProviderResponse.HANDLED;
}
protected void buildAdminPresentationCollectionOverride(String prefix, Boolean isParentExcluded, Map<String, FieldMetadata> mergedProperties, Map<String, AdminPresentationCollectionOverride> presentationCollectionOverrides, String propertyName, String key, DynamicEntityDao dynamicEntityDao) {
AdminPresentationCollectionOverride override = presentationCollectionOverrides.get(propertyName);
if (override != null) {
AdminPresentationCollection annot = override.value();
if (annot != null) {
String testKey = prefix + key;
if ((testKey.startsWith(propertyName + ".") || testKey.equals(propertyName)) && annot.excluded()) {
FieldMetadata metadata = mergedProperties.get(key);
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationCollectionOverride:Excluding " + key + "because an override annotation declared " + testKey + "to be excluded");
}
metadata.setExcluded(true);
return;
}
if ((testKey.startsWith(propertyName + ".") || testKey.equals(propertyName)) && !annot.excluded()) {
FieldMetadata metadata = mergedProperties.get(key);
if (!isParentExcluded) {
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationCollectionOverride:Showing " + key + "because an override annotation declared " + testKey + " to not be excluded");
}
metadata.setExcluded(false);
}
}
if (!(mergedProperties.get(key) instanceof BasicCollectionMetadata)) {
return;
}
BasicCollectionMetadata serverMetadata = (BasicCollectionMetadata) mergedProperties.get(key);
if (serverMetadata.getTargetClass() != null) {
try {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = dynamicEntityDao.getFieldManager().getField(targetClass, fieldName);
FieldMetadataOverride localMetadata = constructBasicCollectionMetadataOverride(annot);
//do not include the previous metadata - we want to construct a fresh metadata from the override annotation
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
FieldInfo info = buildFieldInfo(field);
buildCollectionMetadata(parentClass, targetClass, temp, info, localMetadata);
BasicCollectionMetadata result = (BasicCollectionMetadata) temp.get(field.getName());
result.setInheritedFromType(serverMetadata.getInheritedFromType());
result.setAvailableToTypes(serverMetadata.getAvailableToTypes());
mergedProperties.put(key, result);
if (isParentExcluded) {
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationCollectionOverride:Excluding " + key + "because the parent was excluded");
}
serverMetadata.setExcluded(true);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
protected FieldMetadataOverride overrideCollectionMergeMetadata(AdminPresentationMergeOverride merge) {
FieldMetadataOverride fieldMetadataOverride = new FieldMetadataOverride();
Map<String, AdminPresentationMergeEntry> overrideValues = getAdminPresentationEntries(merge.mergeEntries());
for (Map.Entry<String, AdminPresentationMergeEntry> entry : overrideValues.entrySet()) {
String stringValue = entry.getValue().overrideValue();
if (entry.getKey().equals(PropertyType.AdminPresentationCollection.ADDTYPE)) {
fieldMetadataOverride.setAddType(OperationType.valueOf(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.CURRENCYCODEFIELD)) {
fieldMetadataOverride.setCurrencyCodeField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.CUSTOMCRITERIA)) {
fieldMetadataOverride.setCustomCriteria(entry.getValue().stringArrayOverrideValue());
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.EXCLUDED)) {
fieldMetadataOverride.setExcluded(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() :
Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.FRIENDLYNAME)) {
fieldMetadataOverride.setFriendlyName(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.MANYTOFIELD)) {
fieldMetadataOverride.setManyToField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.OPERATIONTYPES)) {
AdminPresentationOperationTypes operationType = entry.getValue().operationTypes();
fieldMetadataOverride.setAddType(operationType.addType());
fieldMetadataOverride.setRemoveType(operationType.removeType());
fieldMetadataOverride.setUpdateType(operationType.updateType());
fieldMetadataOverride.setFetchType(operationType.fetchType());
fieldMetadataOverride.setInspectType(operationType.inspectType());
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.ORDER)) {
fieldMetadataOverride.setOrder(StringUtils.isEmpty(stringValue) ? entry.getValue().intOverrideValue() :
Integer.parseInt(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.MANYTOFIELD)) {
fieldMetadataOverride.setManyToField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.READONLY)) {
fieldMetadataOverride.setReadOnly(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() :
Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.SECURITYLEVEL)) {
fieldMetadataOverride.setSecurityLevel(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.SHOWIFPROPERTY)) {
fieldMetadataOverride.setShowIfProperty(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.TAB)) {
fieldMetadataOverride.setTab(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.TABORDER)) {
fieldMetadataOverride.setTabOrder(StringUtils.isEmpty(stringValue) ? entry.getValue()
.intOverrideValue() :
Integer.parseInt(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationCollection.USESERVERSIDEINSPECTIONCACHE)) {
fieldMetadataOverride.setUseServerSideInspectionCache(StringUtils.isEmpty(stringValue) ? entry
.getValue().booleanOverrideValue() :
Boolean.parseBoolean(stringValue));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Unrecognized type: " + entry.getKey() + ". Not setting on collection field.");
}
}
}
return fieldMetadataOverride;
}
protected FieldMetadataOverride constructBasicCollectionMetadataOverride(AdminPresentationCollection annotColl) {
if (annotColl != null) {
FieldMetadataOverride override = new FieldMetadataOverride();
override.setAddMethodType(annotColl.addType());
override.setManyToField(annotColl.manyToField());
override.setCustomCriteria(annotColl.customCriteria());
override.setUseServerSideInspectionCache(annotColl.useServerSideInspectionCache());
override.setExcluded(annotColl.excluded());
override.setFriendlyName(annotColl.friendlyName());
override.setReadOnly(annotColl.readOnly());
override.setOrder(annotColl.order());
override.setTab(annotColl.tab());
override.setTabOrder(annotColl.tabOrder());
override.setSecurityLevel(annotColl.securityLevel());
override.setAddType(annotColl.operationTypes().addType());
override.setFetchType(annotColl.operationTypes().fetchType());
override.setRemoveType(annotColl.operationTypes().removeType());
override.setUpdateType(annotColl.operationTypes().updateType());
override.setInspectType(annotColl.operationTypes().inspectType());
override.setShowIfProperty(annotColl.showIfProperty());
override.setCurrencyCodeField(annotColl.currencyCodeField());
return override;
}
throw new IllegalArgumentException("AdminPresentationCollection annotation not found on Field");
}
protected void buildCollectionMetadata(Class<?> parentClass, Class<?> targetClass, Map<String, FieldMetadata> attributes, FieldInfo field, FieldMetadataOverride collectionMetadata) {
BasicCollectionMetadata serverMetadata = (BasicCollectionMetadata) attributes.get(field.getName());
Class<?> resolvedClass = parentClass==null?targetClass:parentClass;
BasicCollectionMetadata metadata;
if (serverMetadata != null) {
metadata = serverMetadata;
} else {
metadata = new BasicCollectionMetadata();
}
metadata.setTargetClass(targetClass.getName());
metadata.setFieldName(field.getName());
if (collectionMetadata.getReadOnly() != null) {
metadata.setMutable(!collectionMetadata.getReadOnly());
}
if (collectionMetadata.getAddMethodType() != null) {
metadata.setAddMethodType(collectionMetadata.getAddMethodType());
}
if (collectionMetadata.getShowIfProperty()!=null) {
metadata.setShowIfProperty(collectionMetadata.getShowIfProperty());
}
org.broadleafcommerce.openadmin.dto.OperationTypes dtoOperationTypes = new org.broadleafcommerce.openadmin.dto.OperationTypes(OperationType.BASIC, OperationType.BASIC, OperationType.BASIC, OperationType.BASIC, OperationType.BASIC);
if (collectionMetadata.getAddType() != null) {
dtoOperationTypes.setAddType(collectionMetadata.getAddType());
}
if (collectionMetadata.getRemoveType() != null) {
dtoOperationTypes.setRemoveType(collectionMetadata.getRemoveType());
}
if (collectionMetadata.getFetchType() != null) {
dtoOperationTypes.setFetchType(collectionMetadata.getFetchType());
}
if (collectionMetadata.getInspectType() != null) {
dtoOperationTypes.setInspectType(collectionMetadata.getInspectType());
}
if (collectionMetadata.getUpdateType() != null) {
dtoOperationTypes.setUpdateType(collectionMetadata.getUpdateType());
}
if (AddMethodType.LOOKUP == metadata.getAddMethodType()) {
dtoOperationTypes.setRemoveType(OperationType.NONDESTRUCTIVEREMOVE);
}
//don't allow additional non-persistent properties or additional foreign keys for an advanced collection datasource - they don't make sense in this context
PersistencePerspective persistencePerspective;
if (serverMetadata != null) {
persistencePerspective = metadata.getPersistencePerspective();
persistencePerspective.setOperationTypes(dtoOperationTypes);
} else {
persistencePerspective = new PersistencePerspective(dtoOperationTypes, new String[]{}, new ForeignKey[]{});
metadata.setPersistencePerspective(persistencePerspective);
}
String foreignKeyName = null;
if (serverMetadata != null) {
foreignKeyName = ((ForeignKey) serverMetadata.getPersistencePerspective().getPersistencePerspectiveItems
().get(PersistencePerspectiveItemType.FOREIGNKEY)).getManyToField();
}
if (!StringUtils.isEmpty(collectionMetadata.getManyToField())) {
foreignKeyName = collectionMetadata.getManyToField();
}
if (foreignKeyName == null && !StringUtils.isEmpty(field.getOneToManyMappedBy())) {
foreignKeyName = field.getOneToManyMappedBy();
}
if (foreignKeyName == null && !StringUtils.isEmpty(field.getManyToManyMappedBy())) {
foreignKeyName = field.getManyToManyMappedBy();
}
if (StringUtils.isEmpty(foreignKeyName)) {
throw new IllegalArgumentException("Unable to infer a ManyToOne field name for the @AdminPresentationCollection annotated field("+field.getName()+"). If not using the mappedBy property of @OneToMany or @ManyToMany, please make sure to explicitly define the manyToField property");
}
if (serverMetadata != null) {
ForeignKey foreignKey = (ForeignKey) metadata.getPersistencePerspective().getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
foreignKey.setManyToField(foreignKeyName);
foreignKey.setForeignKeyClass(resolvedClass.getName());
foreignKey.setMutable(metadata.isMutable());
foreignKey.setOriginatingField(field.getName());
} else {
ForeignKey foreignKey = new ForeignKey(foreignKeyName, resolvedClass.getName(), null, ForeignKeyRestrictionType.ID_EQ);
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.FOREIGNKEY, foreignKey);
foreignKey.setMutable(metadata.isMutable());
foreignKey.setOriginatingField(field.getName());
}
String ceiling = null;
checkCeiling: {
if (field.getGenericType() instanceof ParameterizedType) {
try {
ParameterizedType pt = (ParameterizedType) field.getGenericType();
java.lang.reflect.Type collectionType = pt.getActualTypeArguments()[0];
String ceilingEntityName = ((Class<?>) collectionType).getName();
ceiling = entityConfiguration.lookupEntityClass(ceilingEntityName).getName();
break checkCeiling;
} catch (NoSuchBeanDefinitionException e) {
// We weren't successful at looking at entity configuration to find the type of this collection.
// We will continue and attempt to find it via the Hibernate annotations
}
}
if (!StringUtils.isEmpty(field.getOneToManyTargetEntity()) && !void.class.getName().equals(field.getOneToManyTargetEntity())) {
ceiling = field.getOneToManyTargetEntity();
break checkCeiling;
}
if (!StringUtils.isEmpty(field.getManyToManyTargetEntity()) && !void.class.getName().equals(field.getManyToManyTargetEntity())) {
ceiling = field.getManyToManyTargetEntity();
break checkCeiling;
}
}
if (!StringUtils.isEmpty(ceiling)) {
metadata.setCollectionCeilingEntity(ceiling);
}
if (collectionMetadata.getExcluded() != null) {
if (LOG.isDebugEnabled()) {
if (collectionMetadata.getExcluded()) {
LOG.debug("buildCollectionMetadata:Excluding " + field.getName() + " because it was explicitly declared in config");
} else {
LOG.debug("buildCollectionMetadata:Showing " + field.getName() + " because it was explicitly declared in config");
}
}
metadata.setExcluded(collectionMetadata.getExcluded());
}
if (collectionMetadata.getFriendlyName() != null) {
metadata.setFriendlyName(collectionMetadata.getFriendlyName());
}
if (collectionMetadata.getSecurityLevel() != null) {
metadata.setSecurityLevel(collectionMetadata.getSecurityLevel());
}
if (collectionMetadata.getOrder() != null) {
metadata.setOrder(collectionMetadata.getOrder());
}
if (collectionMetadata.getTab() != null) {
metadata.setTab(collectionMetadata.getTab());
}
if (collectionMetadata.getTabOrder() != null) {
metadata.setTabOrder(collectionMetadata.getTabOrder());
}
if (collectionMetadata.getCustomCriteria() != null) {
metadata.setCustomCriteria(collectionMetadata.getCustomCriteria());
}
if (collectionMetadata.getUseServerSideInspectionCache() != null) {
persistencePerspective.setUseServerSideInspectionCache(collectionMetadata.getUseServerSideInspectionCache());
}
if (collectionMetadata.getCurrencyCodeField()!=null) {
metadata.setCurrencyCodeField(collectionMetadata.getCurrencyCodeField());
}
attributes.put(field.getName(), metadata);
}
@Override
public int getOrder() {
return FieldMetadataProvider.COLLECTION;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_dao_provider_metadata_CollectionFieldMetadataProvider.java
|
312 |
static final class Fields {
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString NUMBER_OF_SHARDS = new XContentBuilderString("number_of_shards");
static final XContentBuilderString NUMBER_OF_REPLICAS = new XContentBuilderString("number_of_replicas");
static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString PRIMARY_ACTIVE = new XContentBuilderString("primary_active");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterIndexHealth.java
|
727 |
@Embeddable
public class Weight implements Serializable {
private static final long serialVersionUID = 1L;
@Column(name = "WEIGHT")
@AdminPresentation(friendlyName = "ProductWeight_Product_Weight", order = 8000,
tab = ProductImpl.Presentation.Tab.Name.Shipping, tabOrder = ProductImpl.Presentation.Tab.Order.Shipping,
group = ProductImpl.Presentation.Group.Name.Shipping, groupOrder = ProductImpl.Presentation.Group.Order.Shipping)
protected BigDecimal weight;
@Column(name = "WEIGHT_UNIT_OF_MEASURE")
@AdminPresentation(friendlyName = "ProductWeight_Product_Weight_Units", order = 9000,
tab = ProductImpl.Presentation.Tab.Name.Shipping, tabOrder = ProductImpl.Presentation.Tab.Order.Shipping,
group = ProductImpl.Presentation.Group.Name.Shipping, groupOrder = ProductImpl.Presentation.Group.Order.Shipping,
fieldType= SupportedFieldType.BROADLEAF_ENUMERATION,
broadleafEnumeration="org.broadleafcommerce.common.util.WeightUnitOfMeasureType")
protected String weightUnitOfMeasure;
public WeightUnitOfMeasureType getWeightUnitOfMeasure() {
return WeightUnitOfMeasureType.getInstance(weightUnitOfMeasure);
}
public void setWeightUnitOfMeasure(WeightUnitOfMeasureType weightUnitOfMeasure) {
if (weightUnitOfMeasure != null) {
this.weightUnitOfMeasure = weightUnitOfMeasure.getType();
}
}
public BigDecimal getWeight() {
return weight;
}
public void setWeight(BigDecimal weight) {
this.weight = weight;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_Weight.java
|
1,319 |
@Entity
@Table(name = "BLC_SEARCH_SYNONYM")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
public class SearchSynonymImpl implements SearchSynonym {
@Id
@GeneratedValue(generator = "SearchSynonymId")
@GenericGenerator(
name="SearchSynonymId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="SearchSynonymImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.search.domain.SearchSynonymImpl")
}
)
@Column(name = "SEARCH_SYNONYM_ID")
private Long id;
@Column(name = "TERM")
@Index(name="SEARCHSYNONYM_TERM_INDEX", columnNames={"TERM"})
private String term;
@Column(name = "SYNONYMS")
private String synonyms;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Override
public String getTerm() {
return term;
}
@Override
public void setTerm(String term) {
this.term = term;
}
@Override
public String[] getSynonyms() {
return synonyms.split("|");
}
@Override
public void setSynonyms(String[] synonyms) {
this.synonyms = StringUtils.join(synonyms, '|');
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_SearchSynonymImpl.java
|
477 |
public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder<GetAliasesResponse, GetAliasesRequestBuilder> {
public GetAliasesRequestBuilder(IndicesAdminClient client, String... aliases) {
super(client, aliases);
}
@Override
protected void doExecute(ActionListener<GetAliasesResponse> listener) {
((IndicesAdminClient) client).getAliases(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_get_GetAliasesRequestBuilder.java
|
683 |
public class PutWarmerResponse extends AcknowledgedResponse {
PutWarmerResponse() {
super();
}
PutWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_put_PutWarmerResponse.java
|
239 |
@Service("blSystemPropertiesService")
public class SystemPropertiesServiceImpl implements SystemPropertiesService{
@Resource(name="blSystemPropertiesDao")
protected SystemPropertiesDao systemPropertiesDao;
@Override
@Transactional("blTransactionManager")
public SystemProperty saveSystemProperty(SystemProperty systemProperty) {
return systemPropertiesDao.saveSystemProperty(systemProperty);
}
@Override
@Transactional("blTransactionManager")
public void deleteSystemProperty(SystemProperty systemProperty) {
systemPropertiesDao.deleteSystemProperty(systemProperty);
}
@Override
@Transactional("blTransactionManager")
public List<SystemProperty> findAllSystemProperties() {
return systemPropertiesDao.readAllSystemProperties();
}
@Override
@Transactional("blTransactionManager")
public SystemProperty findSystemPropertyByName(String name) {
return systemPropertiesDao.readSystemPropertyByName(name);
}
@Override
@Transactional("blTransactionManager")
public SystemProperty createNewSystemProperty() {
return systemPropertiesDao.createNewSystemProperty();
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_config_service_SystemPropertiesServiceImpl.java
|
2,355 |
public class JobSupervisor {
private final ConcurrentMap<Object, Reducer> reducers = new ConcurrentHashMap<Object, Reducer>();
private final ConcurrentMap<Integer, Set<Address>> remoteReducers = new ConcurrentHashMap<Integer, Set<Address>>();
private final AtomicReference<DefaultContext> context = new AtomicReference<DefaultContext>();
private final ConcurrentMap<Object, Address> keyAssignments = new ConcurrentHashMap<Object, Address>();
private final Address jobOwner;
private final boolean ownerNode;
private final AbstractJobTracker jobTracker;
private final JobTaskConfiguration configuration;
private final MapReduceService mapReduceService;
private final ExecutorService executorService;
private final JobProcessInformationImpl jobProcessInformation;
public JobSupervisor(JobTaskConfiguration configuration, AbstractJobTracker jobTracker, boolean ownerNode,
MapReduceService mapReduceService) {
this.jobTracker = jobTracker;
this.ownerNode = ownerNode;
this.configuration = configuration;
this.mapReduceService = mapReduceService;
this.jobOwner = configuration.getJobOwner();
this.executorService = mapReduceService.getExecutorService(configuration.getName());
// Calculate partition count
this.jobProcessInformation = createJobProcessInformation(configuration, this);
// Preregister reducer task to handle immediate reducing events
String name = configuration.getName();
String jobId = configuration.getJobId();
jobTracker.registerReducerTask(new ReducerTask(name, jobId, this));
}
public MapReduceService getMapReduceService() {
return mapReduceService;
}
public JobTracker getJobTracker() {
return jobTracker;
}
public void startTasks(MappingPhase mappingPhase) {
// Start map-combiner tasks
jobTracker.registerMapCombineTask(new MapCombineTask(configuration, this, mappingPhase));
}
public void onNotification(MapReduceNotification notification) {
if (notification instanceof IntermediateChunkNotification) {
IntermediateChunkNotification icn = (IntermediateChunkNotification) notification;
ReducerTask reducerTask = jobTracker.getReducerTask(icn.getJobId());
reducerTask.processChunk(icn.getChunk());
} else if (notification instanceof LastChunkNotification) {
LastChunkNotification lcn = (LastChunkNotification) notification;
ReducerTask reducerTask = jobTracker.getReducerTask(lcn.getJobId());
reducerTask.processChunk(lcn.getPartitionId(), lcn.getSender(), lcn.getChunk());
} else if (notification instanceof ReducingFinishedNotification) {
ReducingFinishedNotification rfn = (ReducingFinishedNotification) notification;
processReducerFinished(rfn);
}
}
public void notifyRemoteException(Address remoteAddress, Throwable throwable) {
// Cancel all partition states
jobProcessInformation.cancelPartitionState();
// Notify all other nodes about cancellation
Set<Address> addresses = collectRemoteAddresses();
// Now notify all involved members to cancel the job
cancelRemoteOperations(addresses);
// Cancel local job
TrackableJobFuture future = cancel();
if (future != null) {
// Might be already cancelled by another members exception
ExceptionUtil.fixRemoteStackTrace(throwable, Thread.currentThread().getStackTrace(),
"Operation failed on node: " + remoteAddress);
future.setResult(throwable);
}
}
public boolean cancelAndNotify(Exception exception) {
// Cancel all partition states
jobProcessInformation.cancelPartitionState();
// Notify all other nodes about cancellation
Set<Address> addresses = collectRemoteAddresses();
// Now notify all involved members to cancel the job
cancelRemoteOperations(addresses);
// Cancel local job
TrackableJobFuture future = cancel();
if (future != null) {
// Might be already cancelled by another members exception
future.setResult(exception);
}
return true;
}
// TODO Not yet fully supported
public boolean cancelNotifyAndRestart() {
// Cancel all partition states
jobProcessInformation.cancelPartitionState();
// Notify all other nodes about cancellation
Set<Address> addresses = collectRemoteAddresses();
// Now notify all involved members to cancel the job
cancelRemoteOperations(addresses);
// Kill local tasks
String jobId = getConfiguration().getJobId();
MapCombineTask mapCombineTask = jobTracker.unregisterMapCombineTask(jobId);
if (mapCombineTask != null) {
mapCombineTask.cancel();
}
ReducerTask reducerTask = jobTracker.unregisterReducerTask(jobId);
if (reducerTask != null) {
reducerTask.cancel();
}
// Reset local data
jobProcessInformation.resetPartitionState();
reducers.clear();
remoteReducers.clear();
context.set(null);
keyAssignments.clear();
// Restart
// TODO restart with a new KeyValueJob
return true;
}
public TrackableJobFuture cancel() {
String jobId = getConfiguration().getJobId();
TrackableJobFuture future = jobTracker.unregisterTrackableJob(jobId);
MapCombineTask mapCombineTask = jobTracker.unregisterMapCombineTask(jobId);
if (mapCombineTask != null) {
mapCombineTask.cancel();
}
ReducerTask reducerTask = jobTracker.unregisterReducerTask(jobId);
if (reducerTask != null) {
reducerTask.cancel();
}
mapReduceService.destroyJobSupervisor(this);
return future;
}
public Map<Object, Object> getJobResults() {
Map<Object, Object> result;
if (configuration.getReducerFactory() != null) {
int mapsize = MapReduceUtil.mapSize(reducers.size());
result = new HashMap<Object, Object>(mapsize);
for (Map.Entry<Object, Reducer> entry : reducers.entrySet()) {
result.put(entry.getKey(), entry.getValue().finalizeReduce());
}
} else {
DefaultContext currentContext = context.get();
result = currentContext.finish();
}
return result;
}
public <KeyIn, ValueIn, ValueOut> Reducer<KeyIn, ValueIn, ValueOut> getReducerByKey(Object key) {
Reducer reducer = reducers.get(key);
if (reducer == null && configuration.getReducerFactory() != null) {
reducer = configuration.getReducerFactory().newReducer(key);
Reducer oldReducer = reducers.putIfAbsent(key, reducer);
if (oldReducer != null) {
reducer = oldReducer;
} else {
reducer.beginReduce(key);
}
}
return reducer;
}
public Address getReducerAddressByKey(Object key) {
Address address = keyAssignments.get(key);
if (address != null) {
return address;
}
return null;
}
public Address assignKeyReducerAddress(Object key) {
// Assign new key to a known member
Address address = keyAssignments.get(key);
if (address == null) {
address = mapReduceService.getKeyMember(key);
Address oldAddress = keyAssignments.putIfAbsent(key, address);
if (oldAddress != null) {
address = oldAddress;
}
}
return address;
}
public boolean checkAssignedMembersAvailable() {
return mapReduceService.checkAssignedMembersAvailable(keyAssignments.values());
}
public boolean assignKeyReducerAddress(Object key, Address address) {
Address oldAssignment = keyAssignments.putIfAbsent(key, address);
return oldAssignment == null || oldAssignment.equals(address);
}
public void checkFullyProcessed(JobProcessInformation processInformation) {
if (isOwnerNode()) {
JobPartitionState[] partitionStates = processInformation.getPartitionStates();
for (JobPartitionState partitionState : partitionStates) {
if (partitionState == null || partitionState.getState() != JobPartitionState.State.PROCESSED) {
return;
}
}
String name = configuration.getName();
String jobId = configuration.getJobId();
NodeEngine nodeEngine = configuration.getNodeEngine();
GetResultOperationFactory operationFactory = new GetResultOperationFactory(name, jobId);
List<Map> results = MapReduceUtil.executeOperation(operationFactory, mapReduceService, nodeEngine, true);
boolean reducedResult = configuration.getReducerFactory() != null;
if (results != null) {
Map<Object, Object> mergedResults = new HashMap<Object, Object>();
for (Map<?, ?> map : results) {
for (Map.Entry entry : map.entrySet()) {
collectResults(reducedResult, mergedResults, entry);
}
}
// Get the initial future object to eventually set the result and cleanup
TrackableJobFuture future = jobTracker.unregisterTrackableJob(jobId);
jobTracker.unregisterMapCombineTask(jobId);
jobTracker.unregisterReducerTask(jobId);
mapReduceService.destroyJobSupervisor(this);
future.setResult(mergedResults);
}
}
}
public <K, V> DefaultContext<K, V> getOrCreateContext(MapCombineTask mapCombineTask) {
DefaultContext<K, V> newContext = new DefaultContext<K, V>(configuration.getCombinerFactory(), mapCombineTask);
if (context.compareAndSet(null, newContext)) {
return newContext;
}
return context.get();
}
public void registerReducerEventInterests(int partitionId, Set<Address> remoteReducers) {
Set<Address> addresses = this.remoteReducers.get(partitionId);
if (addresses == null) {
addresses = new CopyOnWriteArraySet<Address>();
Set<Address> oldSet = this.remoteReducers.putIfAbsent(partitionId, addresses);
if (oldSet != null) {
addresses = oldSet;
}
}
addresses.addAll(remoteReducers);
}
public Collection<Address> getReducerEventInterests(int partitionId) {
return this.remoteReducers.get(partitionId);
}
public JobProcessInformationImpl getJobProcessInformation() {
return jobProcessInformation;
}
public Address getJobOwner() {
return jobOwner;
}
public boolean isOwnerNode() {
return ownerNode;
}
public JobTaskConfiguration getConfiguration() {
return configuration;
}
private void collectResults(boolean reducedResult, Map<Object, Object> mergedResults, Map.Entry entry) {
if (reducedResult) {
mergedResults.put(entry.getKey(), entry.getValue());
} else {
List<Object> list = (List) mergedResults.get(entry.getKey());
if (list == null) {
list = new ArrayList<Object>();
mergedResults.put(entry.getKey(), list);
}
for (Object value : (List) entry.getValue()) {
list.add(value);
}
}
}
private Set<Address> collectRemoteAddresses() {
Set<Address> addresses = new HashSet<Address>();
for (Set<Address> remoteReducerAddresses : remoteReducers.values()) {
addAllFilterJobOwner(addresses, remoteReducerAddresses);
}
for (JobPartitionState partitionState : jobProcessInformation.getPartitionStates()) {
if (partitionState != null && partitionState.getOwner() != null) {
if (!partitionState.getOwner().equals(jobOwner)) {
addresses.add(partitionState.getOwner());
}
}
}
return addresses;
}
private void cancelRemoteOperations(Set<Address> addresses) {
String name = getConfiguration().getName();
String jobId = getConfiguration().getJobId();
for (Address address : addresses) {
try {
CancelJobSupervisorOperation operation = new CancelJobSupervisorOperation(name, jobId);
mapReduceService.processRequest(address, operation, name);
} catch (Exception ignore) {
// We can ignore this exception since we just want to cancel the job
// and the member may be crashed or unreachable in some way
ILogger logger = mapReduceService.getNodeEngine().getLogger(JobSupervisor.class);
logger.finest("Remote node may already be down", ignore);
}
}
}
private void processReducerFinished(final ReducingFinishedNotification notification) {
// Just offload it to free the event queue
executorService.submit(new Runnable() {
@Override
public void run() {
processReducerFinished0(notification);
}
});
}
private void addAllFilterJobOwner(Set<Address> target, Set<Address> source) {
for (Address address : source) {
if (jobOwner.equals(address)) {
continue;
}
target.add(address);
}
}
private void processReducerFinished0(ReducingFinishedNotification notification) {
String name = configuration.getName();
String jobId = configuration.getJobId();
int partitionId = notification.getPartitionId();
Address reducerAddress = notification.getAddress();
if (checkPartitionReductionCompleted(partitionId, reducerAddress)) {
try {
RequestPartitionResult result = mapReduceService
.processRequest(jobOwner, new RequestPartitionProcessed(name, jobId, partitionId, REDUCING), name);
if (result.getResultState() != SUCCESSFUL) {
throw new RuntimeException("Could not finalize processing for partitionId " + partitionId);
}
} catch (Throwable t) {
MapReduceUtil.notifyRemoteException(this, t);
if (t instanceof Error) {
ExceptionUtil.sneakyThrow(t);
}
}
}
}
private boolean checkPartitionReductionCompleted(int partitionId, Address reducerAddress) {
Set<Address> remoteAddresses = remoteReducers.get(partitionId);
if (remoteAddresses == null) {
throw new RuntimeException("Reducer for partition " + partitionId + " not registered");
}
remoteAddresses.remove(reducerAddress);
if (remoteAddresses.size() == 0) {
if (remoteReducers.remove(partitionId) != null) {
return true;
}
}
return false;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_JobSupervisor.java
|
214 |
public class HydrationDescriptor {
private Map<String, HydrationItemDescriptor> hydratedMutators;
private Method[] idMutators;
private String cacheRegion;
public Map<String, HydrationItemDescriptor> getHydratedMutators() {
return hydratedMutators;
}
public Method[] getIdMutators() {
return idMutators;
}
public String getCacheRegion() {
return cacheRegion;
}
public void setHydratedMutators(Map<String, HydrationItemDescriptor> hydratedMutators) {
this.hydratedMutators = hydratedMutators;
}
public void setIdMutators(Method[] idMutators) {
this.idMutators = idMutators;
}
public void setCacheRegion(String cacheRegion) {
this.cacheRegion = cacheRegion;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_cache_engine_HydrationDescriptor.java
|
2,683 |
public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
private final Gateway gateway;
private final ThreadPool threadPool;
private final AllocationService allocationService;
private final ClusterService clusterService;
private final DiscoveryService discoveryService;
private final TimeValue recoverAfterTime;
private final int recoverAfterNodes;
private final int expectedNodes;
private final int recoverAfterDataNodes;
private final int expectedDataNodes;
private final int recoverAfterMasterNodes;
private final int expectedMasterNodes;
private final AtomicBoolean recovered = new AtomicBoolean();
private final AtomicBoolean scheduledRecovery = new AtomicBoolean();
@Inject
public GatewayService(Settings settings, Gateway gateway, AllocationService allocationService, ClusterService clusterService, DiscoveryService discoveryService, ThreadPool threadPool) {
super(settings);
this.gateway = gateway;
this.allocationService = allocationService;
this.clusterService = clusterService;
this.discoveryService = discoveryService;
this.threadPool = threadPool;
// allow to control a delay of when indices will get created
this.recoverAfterTime = componentSettings.getAsTime("recover_after_time", null);
this.recoverAfterNodes = componentSettings.getAsInt("recover_after_nodes", -1);
this.expectedNodes = componentSettings.getAsInt("expected_nodes", -1);
this.recoverAfterDataNodes = componentSettings.getAsInt("recover_after_data_nodes", -1);
this.expectedDataNodes = componentSettings.getAsInt("expected_data_nodes", -1);
// default the recover after master nodes to the minimum master nodes in the discovery
this.recoverAfterMasterNodes = componentSettings.getAsInt("recover_after_master_nodes", settings.getAsInt("discovery.zen.minimum_master_nodes", -1));
this.expectedMasterNodes = componentSettings.getAsInt("expected_master_nodes", -1);
// Add the not recovered as initial state block, we don't allow anything until
this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
}
@Override
protected void doStart() throws ElasticsearchException {
gateway.start();
// if we received initial state, see if we can recover within the start phase, so we hold the
// node from starting until we recovered properly
if (discoveryService.initialStateReceived()) {
ClusterState clusterState = clusterService.state();
DiscoveryNodes nodes = clusterState.nodes();
if (clusterState.nodes().localNodeMaster() && clusterState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
if (clusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
logger.debug("not recovering from gateway, no master elected yet");
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
} else {
boolean ignoreRecoverAfterTime;
if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
// no expected is set, don't ignore the timeout
ignoreRecoverAfterTime = false;
} else {
// one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
ignoreRecoverAfterTime = true;
if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
}
performStateRecovery(ignoreRecoverAfterTime);
}
}
} else {
logger.debug("can't wait on start for (possibly) reading state from gateway, will do it asynchronously");
}
clusterService.addLast(this);
}
@Override
protected void doStop() throws ElasticsearchException {
clusterService.remove(this);
gateway.stop();
}
@Override
protected void doClose() throws ElasticsearchException {
gateway.close();
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
// we need to clear those flags, since we might need to recover again in case we disconnect
// from the cluster and then reconnect
recovered.set(false);
scheduledRecovery.set(false);
}
if (event.localNodeMaster() && event.state().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
ClusterState clusterState = event.state();
DiscoveryNodes nodes = clusterState.nodes();
if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
logger.debug("not recovering from gateway, no master elected yet");
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
} else {
boolean ignoreRecoverAfterTime;
if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
// no expected is set, don't ignore the timeout
ignoreRecoverAfterTime = false;
} else {
// one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
ignoreRecoverAfterTime = true;
if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
}
final boolean fIgnoreRecoverAfterTime = ignoreRecoverAfterTime;
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
performStateRecovery(fIgnoreRecoverAfterTime);
}
});
}
}
}
private void performStateRecovery(boolean ignoreRecoverAfterTime) {
final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener(new CountDownLatch(1));
if (!ignoreRecoverAfterTime && recoverAfterTime != null) {
if (scheduledRecovery.compareAndSet(false, true)) {
logger.debug("delaying initial state recovery for [{}]", recoverAfterTime);
threadPool.schedule(recoverAfterTime, ThreadPool.Names.GENERIC, new Runnable() {
@Override
public void run() {
if (recovered.compareAndSet(false, true)) {
logger.trace("performing state recovery...");
gateway.performStateRecovery(recoveryListener);
}
}
});
}
} else {
if (recovered.compareAndSet(false, true)) {
logger.trace("performing state recovery...");
gateway.performStateRecovery(recoveryListener);
}
}
}
class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {
private final CountDownLatch latch;
GatewayRecoveryListener(CountDownLatch latch) {
this.latch = latch;
}
@Override
public void onSuccess(final ClusterState recoveredState) {
logger.trace("successful state recovery, importing cluster state...");
clusterService.submitStateUpdateTask("local-gateway-elected-state", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assert currentState.metaData().indices().isEmpty();
// remove the block, since we recovered from gateway
ClusterBlocks.Builder blocks = ClusterBlocks.builder()
.blocks(currentState.blocks())
.blocks(recoveredState.blocks())
.removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);
MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
// automatically generate a UID for the metadata if we need to
metaDataBuilder.generateUuidIfNeeded();
if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
for (IndexMetaData indexMetaData : recoveredState.metaData()) {
metaDataBuilder.put(indexMetaData, false);
blocks.addBlocks(indexMetaData);
}
// update the state to reflect the new metadata and routing
ClusterState updatedState = ClusterState.builder(currentState)
.blocks(blocks)
.metaData(metaDataBuilder)
.build();
// initialize all index routing tables as empty
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable());
for (ObjectCursor<IndexMetaData> cursor : updatedState.metaData().indices().values()) {
routingTableBuilder.addAsRecovery(cursor.value);
}
// start with 0 based versions for routing table
routingTableBuilder.version(0);
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size());
latch.countDown();
}
});
}
@Override
public void onFailure(String message) {
recovered.set(false);
scheduledRecovery.set(false);
// don't remove the block here, we don't want to allow anything in such a case
logger.info("metadata state not restored, reason: {}", message);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_gateway_GatewayService.java
|
1,083 |
public class FieldChain {
private FieldChain() {
}
public String getItemName(int fieldIndex) {
if (fieldIndex == 0) {
return name;
} else {
return operationsChain.get(fieldIndex - 1).getValue()[0].toString();
}
}
public int getItemCount() {
if (operationsChain == null) {
return 1;
} else {
return operationsChain.size() + 1;
}
}
/**
* Field chain is considered as long chain if it contains more than one item.
*
* @return true if this chain is long and false in another case.
*/
public boolean isLong() {
return operationsChain != null && operationsChain.size() > 0;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLFilterItemField.java
|
278 |
public interface OCommandResultListener {
/**
* This method is called for each result.
*
* @param iRecord
* Current record
* @return True to continue the query, otherwise false
*/
public boolean result(Object iRecord);
/**
* Called at the end of processing. This is useful to clean-up local attributes.
*/
public void end();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandResultListener.java
|
3 |
public interface CommandParser extends TextCommandConstants {
TextCommand parser(SocketTextReader socketTextReader, String cmd, int space);
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_CommandParser.java
|
1,383 |
public class TitanCassandraRecordReader extends RecordReader<NullWritable, FaunusVertex> {
private static final Logger log =
LoggerFactory.getLogger(TitanCassandraRecordReader.class);
private ColumnFamilyRecordReader reader;
private TitanCassandraHadoopGraph graph;
private FaunusVertexQueryFilter vertexQuery;
private Configuration configuration;
private FaunusVertex vertex;
public TitanCassandraRecordReader(final TitanCassandraHadoopGraph graph, final FaunusVertexQueryFilter vertexQuery, final ColumnFamilyRecordReader reader) {
this.graph = graph;
this.vertexQuery = vertexQuery;
this.reader = reader;
}
@Override
public void initialize(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
reader.initialize(inputSplit, taskAttemptContext);
configuration = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(taskAttemptContext));
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
while (reader.nextKeyValue()) {
// TODO titan05 integration -- the duplicate() call may be unnecessary
final FaunusVertex temp = graph.readHadoopVertex(configuration, reader.getCurrentKey().duplicate(), reader.getCurrentValue());
if (null != temp) {
vertex = temp;
vertexQuery.filterRelationsOf(vertex);
return true;
}
}
return false;
}
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public FaunusVertex getCurrentValue() throws IOException, InterruptedException {
return vertex;
}
@Override
public void close() throws IOException {
graph.close();
reader.close();
}
@Override
public float getProgress() {
return reader.getProgress();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_cassandra_TitanCassandraRecordReader.java
|
3 |
ceylonOutputFolder.accept(new IResourceVisitor() {
@Override
public boolean visit(IResource resource) throws CoreException {
if (resource instanceof IFile) {
filesToAddInArchive.add((IFile)resource);
}
return true;
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.android.plugin_src_com_redhat_ceylon_eclipse_android_plugin_AndroidBuildHookProvider.java
|
37 |
public class CompletionException extends RuntimeException {
private static final long serialVersionUID = 7830266012832686185L;
/**
* Constructs a {@code CompletionException} with no detail message.
* The cause is not initialized, and may subsequently be
* initialized by a call to {@link #initCause(Throwable) initCause}.
*/
protected CompletionException() { }
/**
* Constructs a {@code CompletionException} with the specified detail
* message. The cause is not initialized, and may subsequently be
* initialized by a call to {@link #initCause(Throwable) initCause}.
*
* @param message the detail message
*/
protected CompletionException(String message) {
super(message);
}
/**
* Constructs a {@code CompletionException} with the specified detail
* message and cause.
*
* @param message the detail message
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method)
*/
public CompletionException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a {@code CompletionException} with the specified cause.
* The detail message is set to {@code (cause == null ? null :
* cause.toString())} (which typically contains the class and
* detail message of {@code cause}).
*
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method)
*/
public CompletionException(Throwable cause) {
super(cause);
}
}
| 0true
|
src_main_java_jsr166e_CompletionException.java
|
820 |
public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> {
private List<SearchRequest> requests = Lists.newArrayList();
private IndicesOptions indicesOptions = IndicesOptions.strict();
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
*/
public MultiSearchRequest add(SearchRequestBuilder request) {
requests.add(request.request());
return this;
}
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
*/
public MultiSearchRequest add(SearchRequest request) {
requests.add(request);
return this;
}
public MultiSearchRequest add(byte[] data, int from, int length, boolean contentUnsafe,
@Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception {
return add(new BytesArray(data, from, length), contentUnsafe, indices, types, searchType, null, IndicesOptions.strict(), true);
}
public MultiSearchRequest add(BytesReference data, boolean contentUnsafe, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception {
return add(data, contentUnsafe, indices, types, searchType, null, indicesOptions, true);
}
public MultiSearchRequest add(BytesReference data, boolean contentUnsafe, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception {
XContent xContent = XContentFactory.xContent(data);
int from = 0;
int length = data.length();
byte marker = xContent.streamSeparator();
while (true) {
int nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
// support first line with \n
if (nextMarker == 0) {
from = nextMarker + 1;
continue;
}
SearchRequest searchRequest = new SearchRequest();
if (indices != null) {
searchRequest.indices(indices);
}
if (indicesOptions != null) {
searchRequest.indicesOptions(indicesOptions);
}
if (types != null && types.length > 0) {
searchRequest.types(types);
}
if (routing != null) {
searchRequest.routing(routing);
}
searchRequest.searchType(searchType);
boolean ignoreUnavailable = IndicesOptions.strict().ignoreUnavailable();
boolean allowNoIndices = IndicesOptions.strict().allowNoIndices();
boolean expandWildcardsOpen = IndicesOptions.strict().expandWildcardsOpen();
boolean expandWildcardsClosed = IndicesOptions.strict().expandWildcardsClosed();
// now parse the action
if (nextMarker - from > 0) {
XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from));
try {
// Move to START_OBJECT, if token is null, its an empty data
XContentParser.Token token = parser.nextToken();
if (token != null) {
assert token == XContentParser.Token.START_OBJECT;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) {
if (!allowExplicitIndex) {
throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed");
}
searchRequest.indices(Strings.splitStringByCommaToArray(parser.text()));
} else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) {
searchRequest.types(Strings.splitStringByCommaToArray(parser.text()));
} else if ("search_type".equals(currentFieldName) || "searchType".equals(currentFieldName)) {
searchRequest.searchType(parser.text());
} else if ("preference".equals(currentFieldName)) {
searchRequest.preference(parser.text());
} else if ("routing".equals(currentFieldName)) {
searchRequest.routing(parser.text());
} else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) {
ignoreUnavailable = parser.booleanValue();
} else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) {
allowNoIndices = parser.booleanValue();
} else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
String[] wildcards = Strings.splitStringByCommaToArray(parser.text());
for (String wildcard : wildcards) {
if ("open".equals(wildcard)) {
expandWildcardsOpen = true;
} else if ("closed".equals(wildcard)) {
expandWildcardsClosed = true;
} else {
throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
}
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) {
if (!allowExplicitIndex) {
throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed");
}
searchRequest.indices(parseArray(parser));
} else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) {
searchRequest.types(parseArray(parser));
} else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) {
String[] wildcards = parseArray(parser);
for (String wildcard : wildcards) {
if ("open".equals(wildcard)) {
expandWildcardsOpen = true;
} else if ("closed".equals(wildcard)) {
expandWildcardsClosed = true;
} else {
throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
}
}
} else {
throw new ElasticsearchParseException(currentFieldName + " doesn't support arrays");
}
}
}
}
} finally {
parser.close();
}
}
searchRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
// move pointers
from = nextMarker + 1;
// now for the body
nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
searchRequest.source(data.slice(from, nextMarker - from), contentUnsafe);
// move pointers
from = nextMarker + 1;
add(searchRequest);
}
return this;
}
private String[] parseArray(XContentParser parser) throws IOException {
final List<String> list = new ArrayList<String>();
assert parser.currentToken() == XContentParser.Token.START_ARRAY;
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
list.add(parser.text());
}
return list.toArray(new String[list.size()]);
}
private int findNextMarker(byte marker, int from, BytesReference data, int length) {
for (int i = from; i < length; i++) {
if (data.get(i) == marker) {
return i;
}
}
return -1;
}
public List<SearchRequest> requests() {
return this.requests;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException);
}
for (int i = 0; i < requests.size(); i++) {
ActionRequestValidationException ex = requests.get(i).validate();
if (ex != null) {
if (validationException == null) {
validationException = new ActionRequestValidationException();
}
validationException.addValidationErrors(ex.validationErrors());
}
}
return validationException;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public MultiSearchRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
SearchRequest request = new SearchRequest();
request.readFrom(in);
requests.add(request);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(requests.size());
for (SearchRequest request : requests) {
request.writeTo(out);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_MultiSearchRequest.java
|
2,841 |
private static class DefaultNodeGroup implements NodeGroup {
final PartitionTable groupPartitionTable = new PartitionTable();
final Map<Address, PartitionTable> nodePartitionTables = new HashMap<Address, PartitionTable>();
final Set<Address> nodes = nodePartitionTables.keySet();
final Collection<PartitionTable> nodeTables = nodePartitionTables.values();
final LinkedList<Integer> partitionQ = new LinkedList<Integer>();
@Override
public void addNode(Address address) {
nodePartitionTables.put(address, new PartitionTable());
}
@Override
public boolean hasNode(Address address) {
return nodes.contains(address);
}
@Override
public Set<Address> getNodes() {
return nodes;
}
@Override
public PartitionTable getPartitionTable(Address address) {
return nodePartitionTables.get(address);
}
@Override
public void resetPartitions() {
groupPartitionTable.reset();
partitionQ.clear();
for (PartitionTable table : nodeTables) {
table.reset();
}
}
@Override
public int getPartitionCount(int index) {
return groupPartitionTable.size(index);
}
@Override
public boolean containsPartition(Integer partitionId) {
return groupPartitionTable.contains(partitionId);
}
@Override
public boolean ownPartition(Address address, int index, Integer partitionId) {
if (!hasNode(address)) {
String error = "Address does not belong to this group: " + address.toString();
logger.warning(error);
return false;
}
if (containsPartition(partitionId)) {
if (logger.isFinestEnabled()) {
String error = "Partition[" + partitionId + "] is already owned by this group! " +
"Duplicate!";
logger.finest(error);
}
return false;
}
groupPartitionTable.add(index, partitionId);
return nodePartitionTables.get(address).add(index, partitionId);
}
@Override
public boolean addPartition(int replicaIndex, Integer partitionId) {
if (containsPartition(partitionId)) {
return false;
}
if (groupPartitionTable.add(replicaIndex, partitionId)) {
partitionQ.add(partitionId);
return true;
}
return false;
}
@Override
public Iterator<Integer> getPartitionsIterator(final int index) {
final Iterator<Integer> iter = groupPartitionTable.getPartitions(index).iterator();
return new Iterator<Integer>() {
Integer current = null;
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Integer next() {
return (current = iter.next());
}
@Override
public void remove() {
iter.remove();
doRemovePartition(index, current);
}
};
}
@Override
public boolean removePartition(int index, Integer partitionId) {
if (groupPartitionTable.remove(index, partitionId)) {
doRemovePartition(index, partitionId);
return true;
}
return false;
}
private void doRemovePartition(int index, Integer partitionId) {
for (PartitionTable table : nodeTables) {
if (table.remove(index, partitionId)) {
break;
}
}
}
@Override
public void postProcessPartitionTable(int index) {
if (nodes.size() == 1) {
PartitionTable table = nodeTables.iterator().next();
while (!partitionQ.isEmpty()) {
table.add(index, partitionQ.poll());
}
} else {
int totalCount = getPartitionCount(index);
int avgCount = totalCount / nodes.size();
List<PartitionTable> underLoadedStates = new LinkedList<PartitionTable>();
for (PartitionTable table : nodeTables) {
Set<Integer> partitions = table.getPartitions(index);
if (partitions.size() > avgCount) {
Iterator<Integer> iter = partitions.iterator();
while (partitions.size() > avgCount) {
Integer partitionId = iter.next();
iter.remove();
partitionQ.add(partitionId);
}
} else {
underLoadedStates.add(table);
}
}
if (!partitionQ.isEmpty()) {
for (PartitionTable table : underLoadedStates) {
while (table.size(index) < avgCount) {
table.add(index, partitionQ.poll());
}
}
}
while (!partitionQ.isEmpty()) {
for (PartitionTable table : nodeTables) {
table.add(index, partitionQ.poll());
if (partitionQ.isEmpty()) {
break;
}
}
}
}
}
@Override
public String toString() {
return "DefaultNodeGroupRegistry [nodes=" + nodes + "]";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_PartitionStateGeneratorImpl.java
|
5,250 |
public class InternalRange<B extends InternalRange.Bucket> extends InternalAggregation implements Range {
static final Factory FACTORY = new Factory();
public final static Type TYPE = new Type("range");
private final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalRange readResult(StreamInput in) throws IOException {
InternalRange ranges = new InternalRange();
ranges.readFrom(in);
return ranges;
}
};
public static void registerStream() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
public static class Bucket implements Range.Bucket {
private double from = Double.NEGATIVE_INFINITY;
private double to = Double.POSITIVE_INFINITY;
private long docCount;
InternalAggregations aggregations;
private String key;
private boolean explicitKey;
public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
if (key != null) {
this.key = key;
explicitKey = true;
} else {
this.key = key(from, to, formatter);
explicitKey = false;
}
this.from = from;
this.to = to;
this.docCount = docCount;
this.aggregations = aggregations;
}
public String getKey() {
return key;
}
@Override
public Text getKeyAsText() {
return new StringText(getKey());
}
@Override
public Number getFrom() {
return from;
}
@Override
public Number getTo() {
return to;
}
@Override
public long getDocCount() {
return docCount;
}
@Override
public Aggregations getAggregations() {
return aggregations;
}
Bucket reduce(List<Bucket> ranges, CacheRecycler cacheRecycler) {
if (ranges.size() == 1) {
// we stil need to call reduce on all the sub aggregations
Bucket bucket = ranges.get(0);
bucket.aggregations.reduce(cacheRecycler);
return bucket;
}
Bucket reduced = null;
List<InternalAggregations> aggregationsList = Lists.newArrayListWithCapacity(ranges.size());
for (Bucket range : ranges) {
if (reduced == null) {
reduced = range;
} else {
reduced.docCount += range.docCount;
}
aggregationsList.add(range.aggregations);
}
reduced.aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
return reduced;
}
void toXContent(XContentBuilder builder, Params params, ValueFormatter formatter, boolean keyed) throws IOException {
if (keyed) {
builder.startObject(key);
} else {
builder.startObject();
if (explicitKey) {
builder.field(CommonFields.KEY, key);
}
}
if (!Double.isInfinite(from)) {
builder.field(CommonFields.FROM, from);
if (formatter != null) {
builder.field(CommonFields.FROM_AS_STRING, formatter.format(from));
}
}
if (!Double.isInfinite(to)) {
builder.field(CommonFields.TO, to);
if (formatter != null) {
builder.field(CommonFields.TO_AS_STRING, formatter.format(to));
}
}
builder.field(CommonFields.DOC_COUNT, docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
}
private static String key(double from, double to, ValueFormatter formatter) {
StringBuilder sb = new StringBuilder();
sb.append(Double.isInfinite(from) ? "*" : formatter != null ? formatter.format(from) : from);
sb.append("-");
sb.append(Double.isInfinite(to) ? "*" : formatter != null ? formatter.format(to) : to);
return sb.toString();
}
}
public static class Factory<B extends Bucket, R extends InternalRange<B>> {
public String type() {
return TYPE.name();
}
public R create(String name, List<B> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
return (R) new InternalRange<B>(name, ranges, formatter, keyed, unmapped);
}
public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
return (B) new Bucket(key, from, to, docCount, aggregations, formatter);
}
}
private List<B> ranges;
private Map<String, B> rangeMap;
private ValueFormatter formatter;
private boolean keyed;
private boolean unmapped;
public InternalRange() {} // for serialization
public InternalRange(String name, List<B> ranges, ValueFormatter formatter, boolean keyed, boolean unmapped) {
super(name);
this.ranges = ranges;
this.formatter = formatter;
this.keyed = keyed;
this.unmapped = unmapped;
}
@Override
public Type type() {
return TYPE;
}
@Override
public Collection<B> getBuckets() {
return ranges;
}
@Override
public B getBucketByKey(String key) {
if (rangeMap == null) {
rangeMap = new HashMap<String, B>(ranges.size());
for (Range.Bucket bucket : ranges) {
rangeMap.put(bucket.getKey(), (B) bucket);
}
}
return rangeMap.get(key);
}
@Override
public InternalAggregation reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
InternalRange<B> reduced = (InternalRange<B>) aggregations.get(0);
for (B bucket : reduced.ranges) {
bucket.aggregations.reduce(reduceContext.cacheRecycler());
}
return reduced;
}
List<List<Bucket>> rangesList = null;
for (InternalAggregation aggregation : aggregations) {
InternalRange<Bucket> ranges = (InternalRange) aggregation;
if (ranges.unmapped) {
continue;
}
if (rangesList == null) {
rangesList = new ArrayList<List<Bucket>>(ranges.ranges.size());
for (Bucket bucket : ranges.ranges) {
List<Bucket> sameRangeList = new ArrayList<Bucket>(aggregations.size());
sameRangeList.add(bucket);
rangesList.add(sameRangeList);
}
} else {
int i = 0;
for (Bucket range : ranges.ranges) {
rangesList.get(i++).add(range);
}
}
}
if (rangesList == null) {
// unmapped, we can just take the first one
return aggregations.get(0);
}
InternalRange reduced = (InternalRange) aggregations.get(0);
int i = 0;
for (List<Bucket> sameRangeList : rangesList) {
reduced.ranges.set(i++, (sameRangeList.get(0)).reduce(sameRangeList, reduceContext.cacheRecycler()));
}
return reduced;
}
protected B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, ValueFormatter formatter) {
return (B) new Bucket(key, from, to, docCount, aggregations, formatter);
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
formatter = ValueFormatterStreams.readOptional(in);
keyed = in.readBoolean();
int size = in.readVInt();
List<B> ranges = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; i++) {
String key = in.readOptionalString();
ranges.add(createBucket(key, in.readDouble(), in.readDouble(), in.readVLong(), InternalAggregations.readAggregations(in), formatter));
}
this.ranges = ranges;
this.rangeMap = null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(formatter, out);
out.writeBoolean(keyed);
out.writeVInt(ranges.size());
for (B bucket : ranges) {
out.writeOptionalString(((Bucket) bucket).key);
out.writeDouble(((Bucket) bucket).from);
out.writeDouble(((Bucket) bucket).to);
out.writeVLong(((Bucket) bucket).docCount);
bucket.aggregations.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
if (keyed) {
builder.startObject(CommonFields.BUCKETS);
} else {
builder.startArray(CommonFields.BUCKETS);
}
for (B range : ranges) {
range.toXContent(builder, params, formatter, keyed);
}
if (keyed) {
builder.endObject();
} else {
builder.endArray();
}
return builder.endObject();
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_range_InternalRange.java
|
278 |
assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(COUNT, map.size());
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_io_ClientExecutionPoolSizeLowTest.java
|
849 |
public class TransportSearchAction extends TransportAction<SearchRequest, SearchResponse> {
private final ClusterService clusterService;
private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction;
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
private final TransportSearchScanAction scanAction;
private final TransportSearchCountAction countAction;
private final boolean optimizeSingleShard;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ClusterService clusterService,
TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction,
TransportSearchQueryThenFetchAction queryThenFetchAction,
TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction,
TransportSearchQueryAndFetchAction queryAndFetchAction,
TransportSearchScanAction scanAction,
TransportSearchCountAction countAction) {
super(settings, threadPool);
this.clusterService = clusterService;
this.dfsQueryThenFetchAction = dfsQueryThenFetchAction;
this.queryThenFetchAction = queryThenFetchAction;
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
this.queryAndFetchAction = queryAndFetchAction;
this.scanAction = scanAction;
this.countAction = countAction;
this.optimizeSingleShard = componentSettings.getAsBoolean("optimize_single_shard", true);
transportService.registerHandler(SearchAction.NAME, new TransportHandler());
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// optimize search type for cases where there is only one shard group to search on
if (optimizeSingleShard && searchRequest.searchType() != SCAN && searchRequest.searchType() != COUNT) {
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = clusterState.metaData().concreteIndices(searchRequest.indices(), searchRequest.indicesOptions());
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, searchRequest.indices(), concreteIndices, routingMap, searchRequest.preference());
if (shardCount == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
} catch (IndexMissingException e) {
// ignore this, we will notify the search response if its really the case
// from the actual action
} catch (Exception e) {
logger.debug("failed to optimize search type, continue as normal", e);
}
}
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
dfsQueryThenFetchAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {
queryThenFetchAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
dfsQueryAndFetchAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) {
queryAndFetchAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.SCAN) {
scanAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.COUNT) {
countAction.execute(searchRequest, listener);
}
}
private class TransportHandler extends BaseTransportRequestHandler<SearchRequest> {
@Override
public SearchRequest newInstance() {
return new SearchRequest();
}
@Override
public void messageReceived(SearchRequest request, final TransportChannel channel) throws Exception {
// no need for a threaded listener
request.listenerThreaded(false);
// we don't spawn, so if we get a request with no threading, change it to single threaded
if (request.operationThreading() == SearchOperationThreading.NO_THREADS) {
request.operationThreading(SearchOperationThreading.SINGLE_THREAD);
}
execute(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response for search", e1);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_TransportSearchAction.java
|
159 |
public abstract class MultiTargetClientRequest extends ClientRequest {
public static final int TRY_COUNT = 100;
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
OperationFactory operationFactory = createOperationFactory();
Collection<Address> targets = getTargets();
if (targets.isEmpty()) {
endpoint.sendResponse(reduce(new HashMap<Address, Object>()), getCallId());
return;
}
MultiTargetCallback callback = new MultiTargetCallback(targets);
for (Address target : targets) {
Operation op = operationFactory.createOperation();
op.setCallerUuid(endpoint.getUuid());
InvocationBuilder builder = clientEngine.createInvocationBuilder(getServiceName(), op, target)
.setTryCount(TRY_COUNT)
.setResultDeserialized(false)
.setCallback(new SingleTargetCallback(target, callback));
builder.invoke();
}
}
protected abstract OperationFactory createOperationFactory();
protected abstract Object reduce(Map<Address, Object> map);
public abstract Collection<Address> getTargets();
private final class MultiTargetCallback {
final Collection<Address> targets;
final ConcurrentMap<Address, Object> results;
private MultiTargetCallback(Collection<Address> targets) {
this.targets = synchronizedSet(new HashSet<Address>(targets));
this.results = new ConcurrentHashMap<Address, Object>(targets.size());
}
public void notify(Address target, Object result) {
if (targets.remove(target)) {
results.put(target, result);
} else {
if (results.containsKey(target)) {
throw new IllegalArgumentException("Duplicate response from -> " + target);
}
throw new IllegalArgumentException("Unknown target! -> " + target);
}
if (targets.isEmpty()) {
Object response = reduce(results);
endpoint.sendResponse(response, getCallId());
}
}
}
private static final class SingleTargetCallback implements Callback<Object> {
final Address target;
final MultiTargetCallback parent;
private SingleTargetCallback(Address target, MultiTargetCallback parent) {
this.target = target;
this.parent = parent;
}
@Override
public void notify(Object object) {
parent.notify(target, object);
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_MultiTargetClientRequest.java
|
107 |
static final class ValuesView<K,V> extends CollectionView<K,V,V>
implements Collection<V>, java.io.Serializable {
private static final long serialVersionUID = 2249069246763182397L;
ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
public final boolean contains(Object o) {
return map.containsValue(o);
}
public final boolean remove(Object o) {
if (o != null) {
for (Iterator<V> it = iterator(); it.hasNext();) {
if (o.equals(it.next())) {
it.remove();
return true;
}
}
}
return false;
}
public final Iterator<V> iterator() {
ConcurrentHashMapV8<K,V> m = map;
Node<K,V>[] t;
int f = (t = m.table) == null ? 0 : t.length;
return new ValueIterator<K,V>(t, f, 0, f, m);
}
public final boolean add(V e) {
throw new UnsupportedOperationException();
}
public final boolean addAll(Collection<? extends V> c) {
throw new UnsupportedOperationException();
}
public ConcurrentHashMapSpliterator<V> spliteratorJSR166() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
long n = m.sumCount();
int f = (t = m.table) == null ? 0 : t.length;
return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
}
public void forEach(Action<? super V> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
action.apply(p.val);
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
631 |
public abstract class OIndexTxAware<T> extends OIndexAbstractDelegate<T> {
protected ODatabaseRecord database;
public OIndexTxAware(final ODatabaseRecord iDatabase, final OIndex<T> iDelegate) {
super(iDelegate);
database = iDatabase;
}
@Override
public long getSize() {
long tot = delegate.getSize();
final OTransactionIndexChanges indexChanges = database.getTransaction().getIndexChanges(delegate.getName());
if (indexChanges != null) {
if (indexChanges.cleared)
// BEGIN FROM 0
tot = 0;
for (final Entry<Object, OTransactionIndexChangesPerKey> entry : indexChanges.changesPerKey.entrySet()) {
for (final OTransactionIndexEntry e : entry.getValue().entries) {
if (e.operation == OPERATION.REMOVE) {
if (e.value == null)
// KEY REMOVED
tot--;
} else if (e.operation == OPERATION.PUT) {
}
}
}
}
return tot;
}
@Override
public OIndexTxAware<T> put(final Object iKey, final OIdentifiable iValue) {
final ORID rid = iValue.getIdentity();
if (!rid.isValid())
// EARLY SAVE IT
((ORecord<?>) iValue).save();
database.getTransaction().addIndexEntry(delegate, super.getName(), OPERATION.PUT, iKey, iValue);
return this;
}
@Override
public boolean remove(final Object key) {
database.getTransaction().addIndexEntry(delegate, super.getName(), OPERATION.REMOVE, key, null);
return true;
}
@Override
public boolean remove(final Object iKey, final OIdentifiable iRID) {
database.getTransaction().addIndexEntry(delegate, super.getName(), OPERATION.REMOVE, iKey, iRID);
return true;
}
@Override
public OIndexTxAware<T> clear() {
database.getTransaction().addIndexEntry(delegate, super.getName(), OPERATION.CLEAR, null, null);
return this;
}
@Override
public void unload() {
database.getTransaction().clearIndexEntries();
super.unload();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexTxAware.java
|
185 |
new Thread(){
public void run() {
for (int i=0; i<5; i++){
tempList.add("item" + i);
}
tempList.add("done");
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_collections_ClientListTest.java
|
262 |
public interface EmailService {
public boolean sendTemplateEmail(String emailAddress, EmailInfo emailInfo, HashMap<String,Object> props);
public boolean sendTemplateEmail(EmailTarget emailTarget, EmailInfo emailInfo, HashMap<String,Object> props);
public boolean sendBasicEmail(EmailInfo emailInfo, EmailTarget emailTarget, HashMap<String,Object> props);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_EmailService.java
|
2,675 |
public class NodeEnvironment extends AbstractComponent {
private final File[] nodeFiles;
private final File[] nodeIndicesLocations;
private final Lock[] locks;
private final int localNodeId;
@Inject
public NodeEnvironment(Settings settings, Environment environment) {
super(settings);
if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
nodeFiles = null;
nodeIndicesLocations = null;
locks = null;
localNodeId = -1;
return;
}
File[] nodesFiles = new File[environment.dataWithClusterFiles().length];
Lock[] locks = new Lock[environment.dataWithClusterFiles().length];
int localNodeId = -1;
IOException lastException = null;
int maxLocalStorageNodes = settings.getAsInt("node.max_local_storage_nodes", 50);
for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
File dir = new File(new File(environment.dataWithClusterFiles()[dirIndex], "nodes"), Integer.toString(possibleLockId));
if (!dir.exists()) {
FileSystemUtils.mkdirs(dir);
}
logger.trace("obtaining node lock on {} ...", dir.getAbsolutePath());
try {
NativeFSLockFactory lockFactory = new NativeFSLockFactory(dir);
Lock tmpLock = lockFactory.makeLock("node.lock");
boolean obtained = tmpLock.obtain();
if (obtained) {
locks[dirIndex] = tmpLock;
nodesFiles[dirIndex] = dir;
localNodeId = possibleLockId;
} else {
logger.trace("failed to obtain node lock on {}", dir.getAbsolutePath());
// release all the ones that were obtained up until now
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {
try {
locks[i].release();
} catch (Exception e1) {
// ignore
}
}
locks[i] = null;
}
break;
}
} catch (IOException e) {
logger.trace("failed to obtain node lock on {}", e, dir.getAbsolutePath());
lastException = new IOException("failed to obtain lock on " + dir.getAbsolutePath(), e);
// release all the ones that were obtained up until now
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {
try {
locks[i].release();
} catch (Exception e1) {
// ignore
}
}
locks[i] = null;
}
break;
}
}
if (locks[0] != null) {
// we found a lock, break
break;
}
}
if (locks[0] == null) {
throw new ElasticsearchIllegalStateException("Failed to obtain node lock, is the following location writable?: " + Arrays.toString(environment.dataWithClusterFiles()), lastException);
}
this.localNodeId = localNodeId;
this.locks = locks;
this.nodeFiles = nodesFiles;
if (logger.isDebugEnabled()) {
logger.debug("using node location [{}], local_node_id [{}]", nodesFiles, localNodeId);
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("node data locations details:\n");
for (File file : nodesFiles) {
sb.append(" -> ").append(file.getAbsolutePath()).append(", free_space [").append(new ByteSizeValue(file.getFreeSpace())).append("], usable_space [").append(new ByteSizeValue(file.getUsableSpace())).append("]\n");
}
logger.trace(sb.toString());
}
this.nodeIndicesLocations = new File[nodeFiles.length];
for (int i = 0; i < nodeFiles.length; i++) {
nodeIndicesLocations[i] = new File(nodeFiles[i], "indices");
}
}
public int localNodeId() {
return this.localNodeId;
}
public boolean hasNodeFile() {
return nodeFiles != null && locks != null;
}
public File[] nodeDataLocations() {
if (nodeFiles == null || locks == null) {
throw new ElasticsearchIllegalStateException("node is not configured to store local location");
}
return nodeFiles;
}
public File[] indicesLocations() {
return nodeIndicesLocations;
}
public File[] indexLocations(Index index) {
File[] indexLocations = new File[nodeFiles.length];
for (int i = 0; i < nodeFiles.length; i++) {
indexLocations[i] = new File(new File(nodeFiles[i], "indices"), index.name());
}
return indexLocations;
}
public File[] shardLocations(ShardId shardId) {
File[] shardLocations = new File[nodeFiles.length];
for (int i = 0; i < nodeFiles.length; i++) {
shardLocations[i] = new File(new File(new File(nodeFiles[i], "indices"), shardId.index().name()), Integer.toString(shardId.id()));
}
return shardLocations;
}
public Set<String> findAllIndices() throws Exception {
if (nodeFiles == null || locks == null) {
throw new ElasticsearchIllegalStateException("node is not configured to store local location");
}
Set<String> indices = Sets.newHashSet();
for (File indicesLocation : nodeIndicesLocations) {
File[] indicesList = indicesLocation.listFiles();
if (indicesList == null) {
continue;
}
for (File indexLocation : indicesList) {
if (indexLocation.isDirectory()) {
indices.add(indexLocation.getName());
}
}
}
return indices;
}
public Set<ShardId> findAllShardIds() throws Exception {
if (nodeFiles == null || locks == null) {
throw new ElasticsearchIllegalStateException("node is not configured to store local location");
}
Set<ShardId> shardIds = Sets.newHashSet();
for (File indicesLocation : nodeIndicesLocations) {
File[] indicesList = indicesLocation.listFiles();
if (indicesList == null) {
continue;
}
for (File indexLocation : indicesList) {
if (!indexLocation.isDirectory()) {
continue;
}
String indexName = indexLocation.getName();
File[] shardsList = indexLocation.listFiles();
if (shardsList == null) {
continue;
}
for (File shardLocation : shardsList) {
if (!shardLocation.isDirectory()) {
continue;
}
Integer shardId = Ints.tryParse(shardLocation.getName());
if (shardId != null) {
shardIds.add(new ShardId(indexName, shardId));
}
}
}
}
return shardIds;
}
public void close() {
if (locks != null) {
for (Lock lock : locks) {
try {
logger.trace("releasing lock [{}]", lock);
lock.release();
} catch (IOException e) {
logger.trace("failed to release lock [{}]", e, lock);
}
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_env_NodeEnvironment.java
|
5,128 |
aggregators[i] = new Aggregator(first.name(), BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 1, first.context(), first.parent()) {
ObjectArray<Aggregator> aggregators;
{
aggregators = BigArrays.newObjectArray(estimatedBucketsCount, context.pageCacheRecycler());
aggregators.set(0, first);
for (long i = 1; i < estimatedBucketsCount; ++i) {
aggregators.set(i, createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount));
}
}
@Override
public boolean shouldCollect() {
return first.shouldCollect();
}
@Override
protected void doPostCollection() {
for (long i = 0; i < aggregators.size(); ++i) {
final Aggregator aggregator = aggregators.get(i);
if (aggregator != null) {
aggregator.postCollection();
}
}
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
aggregators = BigArrays.grow(aggregators, owningBucketOrdinal + 1);
Aggregator aggregator = aggregators.get(owningBucketOrdinal);
if (aggregator == null) {
aggregator = createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount);
aggregators.set(owningBucketOrdinal, aggregator);
}
aggregator.collect(doc, 0);
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
return aggregators.get(owningBucketOrdinal).buildAggregation(0);
}
@Override
public InternalAggregation buildEmptyAggregation() {
return first.buildEmptyAggregation();
}
@Override
public void doRelease() {
Releasables.release(aggregators);
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_AggregatorFactories.java
|
100 |
public class OUnsafeMemoryJava7 extends OUnsafeMemory {
@Override
public byte[] get(long pointer, final int length) {
final byte[] result = new byte[length];
unsafe.copyMemory(null, pointer, result, unsafe.arrayBaseOffset(byte[].class), length);
return result;
}
@Override
public void get(long pointer, byte[] array, int arrayOffset, int length) {
pointer += arrayOffset;
unsafe.copyMemory(null, pointer, array, arrayOffset + unsafe.arrayBaseOffset(byte[].class), length);
}
@Override
public void set(long pointer, byte[] content, int arrayOffset, int length) {
unsafe.copyMemory(content, unsafe.arrayBaseOffset(byte[].class) + arrayOffset, null, pointer, length);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_directmemory_OUnsafeMemoryJava7.java
|
174 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166y_ForkJoinTask.java
|
570 |
@PreInitializeConfigOptions
public class KCVSLogManager implements LogManager {
private static final Logger log =
LoggerFactory.getLogger(KCVSLogManager.class);
public static final ConfigOption<Boolean> LOG_FIXED_PARTITION = new ConfigOption<Boolean>(LOG_NS,"fixed-partition",
"Whether all log entries are written to one fixed partition even if the backend store is partitioned." +
"This can cause imbalanced loads and should only be used on low volume logs",
ConfigOption.Type.GLOBAL_OFFLINE, false);
public static final ConfigOption<Integer> LOG_MAX_PARTITIONS = new ConfigOption<Integer>(LOG_NS,"max-partitions",
"The maximum number of partitions to use for logging. Setting up this many actual or virtual partitions. Must be bigger than 1" +
"and a power of 2.",
ConfigOption.Type.FIXED, Integer.class, new Predicate<Integer>() {
@Override
public boolean apply(@Nullable Integer integer) {
return integer!=null && integer>1 && NumberUtil.isPowerOf2(integer);
}
});
/**
* Configuration of this log manager
*/
private final Configuration configuration;
/**
* Store Manager against which to open the {@link KeyColumnValueStore}s to wrap the {@link KCVSLog} around.
*/
final KeyColumnValueStoreManager storeManager;
/**
* Id which uniquely identifies this instance. Also see {@link GraphDatabaseConfiguration#UNIQUE_INSTANCE_ID}.
*/
final String senderId;
/**
* The number of first bits of the key that identifies a partition. If this number is X then there are 2^X different
* partition blocks each of which is identified by a partition id.
*/
final int partitionBitWidth;
/**
* A collection of partition ids to which the logs write in round-robin fashion.
*/
final int[] defaultWritePartitionIds;
/**
* A collection of partition ids from which the readers will read concurrently.
*/
final int[] readPartitionIds;
/**
* Serializer used to (de)-serialize the log messages
*/
final Serializer serializer;
/**
* Keeps track of all open logs
*/
private final Map<String,KCVSLog> openLogs;
/**
* Opens a log manager against the provided KCVS store with the given configuration.
* @param storeManager
* @param config
*/
public KCVSLogManager(final KeyColumnValueStoreManager storeManager, final Configuration config) {
this(storeManager, config, null);
}
/**
* Opens a log manager against the provided KCVS store with the given configuration. Also provided is a list
* of read-partition-ids. These only apply when readers are registered against an opened log. In that case,
* the readers only read from the provided list of partition ids.
* @param storeManager
* @param config
* @param readPartitionIds
*/
public KCVSLogManager(KeyColumnValueStoreManager storeManager, final Configuration config,
final int[] readPartitionIds) {
Preconditions.checkArgument(storeManager!=null && config!=null);
if (config.has(LOG_STORE_TTL)) {
if (TTLKVCSManager.supportsStoreTTL(storeManager)) {
storeManager = new TTLKVCSManager(storeManager, ConversionHelper.getTTLSeconds(config.get(LOG_STORE_TTL)));
} else {
log.warn("Log is configured with TTL but underlying storage backend does not support TTL, hence this" +
"configuration option is ignored and entries must be manually removed from the backend.");
}
}
this.storeManager = storeManager;
this.configuration = config;
openLogs = new HashMap<String, KCVSLog>();
this.senderId=config.get(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID);
Preconditions.checkNotNull(senderId);
if (config.get(CLUSTER_PARTITION)) {
ConfigOption<Integer> maxPartitionConfig = config.has(LOG_MAX_PARTITIONS)?
LOG_MAX_PARTITIONS:CLUSTER_MAX_PARTITIONS;
int maxPartitions = config.get(maxPartitionConfig);
Preconditions.checkArgument(maxPartitions<=config.get(CLUSTER_MAX_PARTITIONS),
"Number of log partitions cannot be larger than number of cluster partitions");
this.partitionBitWidth= NumberUtil.getPowerOf2(maxPartitions);
} else {
this.partitionBitWidth=0;
}
Preconditions.checkArgument(partitionBitWidth>=0 && partitionBitWidth<32);
final int numPartitions = (1<<partitionBitWidth);
//Partitioning
if (config.get(CLUSTER_PARTITION) && !config.get(LOG_FIXED_PARTITION)) {
//Write partitions - default initialization: writing to all partitions
int[] writePartitions = new int[numPartitions];
for (int i=0;i<numPartitions;i++) writePartitions[i]=i;
if (storeManager.getFeatures().hasLocalKeyPartition()) {
//Write only to local partitions
List<Integer> localPartitions = new ArrayList<Integer>();
try {
List<PartitionIDRange> partitionRanges = PartitionIDRange.getIDRanges(partitionBitWidth,
storeManager.getLocalKeyPartition());
for (PartitionIDRange idrange : partitionRanges) {
for (int p : idrange.getAllContainedIDs()) localPartitions.add(p);
}
} catch (Throwable e) {
log.error("Could not process local id partitions",e);
}
if (!localPartitions.isEmpty()) {
writePartitions = new int[localPartitions.size()];
for (int i=0;i<localPartitions.size();i++) writePartitions[i]=localPartitions.get(i);
}
}
this.defaultWritePartitionIds = writePartitions;
//Read partitions
if (readPartitionIds!=null && readPartitionIds.length>0) {
for (int readPartitionId : readPartitionIds) {
checkValidPartitionId(readPartitionId,partitionBitWidth);
}
this.readPartitionIds = Arrays.copyOf(readPartitionIds,readPartitionIds.length);
} else {
this.readPartitionIds=new int[numPartitions];
for (int i=0;i<numPartitions;i++) this.readPartitionIds[i]=i;
}
} else {
this.defaultWritePartitionIds=new int[]{0};
Preconditions.checkArgument(readPartitionIds==null || (readPartitionIds.length==0 && readPartitionIds[0]==0),
"Cannot configure read partition ids on unpartitioned backend or with fixed partitions enabled");
this.readPartitionIds=new int[]{0};
}
this.serializer = new StandardSerializer(false);
}
private static void checkValidPartitionId(int partitionId, int partitionBitWidth) {
Preconditions.checkArgument(partitionId>=0 && partitionId<(1<<partitionBitWidth));
}
@Override
public synchronized KCVSLog openLog(final String name) throws BackendException {
if (openLogs.containsKey(name)) return openLogs.get(name);
KCVSLog log = new KCVSLog(name,this,storeManager.openDatabase(name),configuration);
openLogs.put(name,log);
return log;
}
/**
* Must be triggered by a particular {@link KCVSLog} when it is closed so that this log can be removed from the list
* of open logs.
* @param log
*/
synchronized void closedLog(KCVSLog log) {
KCVSLog l = openLogs.remove(log.getName());
assert l==log;
}
@Override
public synchronized void close() throws BackendException {
/* Copying the map is necessary to avoid ConcurrentModificationException.
* The path to ConcurrentModificationException in the absence of a copy is
* log.close() -> manager.closedLog(log) -> openLogs.remove(log.getName()).
*/
for (KCVSLog log : ImmutableMap.copyOf(openLogs).values()) log.close();
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_log_kcvs_KCVSLogManager.java
|
3,217 |
public class ReplicatedMapInitChunkOperation
extends AbstractReplicatedMapOperation
implements IdentifiedDataSerializable {
private String name;
private Member origin;
private ReplicatedRecord[] replicatedRecords;
private int recordCount;
private boolean finalChunk;
private boolean notYetReadyChooseSomeoneElse;
ReplicatedMapInitChunkOperation() {
}
public ReplicatedMapInitChunkOperation(String name, Member origin) {
this(name, origin, new ReplicatedRecord[0], 0, true);
this.notYetReadyChooseSomeoneElse = true;
}
// Findbugs warning suppressed since the array is serialized anyways and is never about to be changed
@SuppressWarnings("EI_EXPOSE_REP")
public ReplicatedMapInitChunkOperation(String name, Member origin, ReplicatedRecord[] replicatedRecords, int recordCount,
boolean finalChunk) {
this.name = name;
this.origin = origin;
this.replicatedRecords = replicatedRecords;
this.recordCount = recordCount;
this.finalChunk = finalChunk;
}
public String getName() {
return name;
}
@Override
public void run()
throws Exception {
ReplicatedMapService replicatedMapService = getService();
AbstractReplicatedRecordStore recordStorage;
recordStorage = (AbstractReplicatedRecordStore) replicatedMapService.getReplicatedRecordStore(name, true);
ReplicationPublisher replicationPublisher = recordStorage.getReplicationPublisher();
if (notYetReadyChooseSomeoneElse) {
replicationPublisher.retryWithDifferentReplicationNode(origin);
} else {
for (int i = 0; i < recordCount; i++) {
ReplicatedRecord record = replicatedRecords[i];
Object key = record.getKey();
Object value = record.getValue();
VectorClock vectorClock = record.getVectorClock();
int updateHash = record.getLatestUpdateHash();
long ttlMillis = record.getTtlMillis();
ReplicationMessage update = new ReplicationMessage(name, key, value, vectorClock, origin, updateHash, ttlMillis);
replicationPublisher.queueUpdateMessage(update);
}
if (finalChunk) {
recordStorage.finalChunkReceived();
}
}
}
@Override
public int getFactoryId() {
return ReplicatedMapDataSerializerHook.F_ID;
}
@Override
public int getId() {
return ReplicatedMapDataSerializerHook.OP_INIT_CHUNK;
}
@Override
protected void writeInternal(ObjectDataOutput out)
throws IOException {
out.writeUTF(name);
origin.writeData(out);
out.writeInt(recordCount);
for (int i = 0; i < recordCount; i++) {
replicatedRecords[i].writeData(out);
}
out.writeBoolean(finalChunk);
}
@Override
protected void readInternal(ObjectDataInput in)
throws IOException {
name = in.readUTF();
origin = new MemberImpl();
origin.readData(in);
recordCount = in.readInt();
replicatedRecords = new ReplicatedRecord[recordCount];
for (int i = 0; i < recordCount; i++) {
ReplicatedRecord replicatedRecord = new ReplicatedRecord();
replicatedRecord.readData(in);
replicatedRecords[i] = replicatedRecord;
}
finalChunk = in.readBoolean();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_operation_ReplicatedMapInitChunkOperation.java
|
3,246 |
abstract class DoubleValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
protected final IndexNumericFieldData<?> indexFieldData;
protected final double missingValue;
protected double bottom;
protected DoubleValues readerValues;
protected final SortMode sortMode;
public DoubleValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, double missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public final int compareBottom(int doc) throws IOException {
final double v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(bottom, v2);
}
@Override
public final int compareDocToValue(int doc, T valueObj) throws IOException {
final double value = valueObj.doubleValue();
final double docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(docValue, value);
}
@Override
public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
readerValues = indexFieldData.load(context).getDoubleValues();
return this;
}
@Override
public int compareBottomMissing() {
return compare(bottom, missingValue);
}
static final int compare(double left, double right) {
return Double.compare(left, right);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_DoubleValuesComparatorBase.java
|
6,022 |
public static class Candidate {
public static final Candidate[] EMPTY = new Candidate[0];
public final BytesRef term;
public final double stringDistance;
public final long frequency;
public final double score;
public final boolean userInput;
public Candidate(BytesRef term, long frequency, double stringDistance, double score, boolean userInput) {
this.frequency = frequency;
this.term = term;
this.stringDistance = stringDistance;
this.score = score;
this.userInput = userInput;
}
@Override
public String toString() {
return "Candidate [term=" + term.utf8ToString() + ", stringDistance=" + stringDistance + ", frequency=" + frequency +
(userInput ? ", userInput" : "" ) + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((term == null) ? 0 : term.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Candidate other = (Candidate) obj;
if (term == null) {
if (other.term != null)
return false;
} else if (!term.equals(other.term))
return false;
return true;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_suggest_phrase_DirectCandidateGenerator.java
|
758 |
static final class Fields {
static final XContentBuilderString DOCS = new XContentBuilderString("docs");
static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
static final XContentBuilderString _ID = new XContentBuilderString("_id");
static final XContentBuilderString ERROR = new XContentBuilderString("error");
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetResponse.java
|
3,426 |
nodeEngine.getExecutionService().execute(ExecutionService.SYSTEM_EXECUTOR, new Runnable() {
public void run() {
try {
((InitializingObject) object).initialize();
} catch (Exception e) {
getLogger().warning("Error while initializing proxy: " + object, e);
}
}
});
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ProxyServiceImpl.java
|
3,228 |
public class ReplicatedRecord<K, V>
implements IdentifiedDataSerializable {
private final AtomicLong hits = new AtomicLong();
private final AtomicLong lastAccessTime = new AtomicLong();
private K key;
private V value;
private VectorClock vectorClock;
private int latestUpdateHash;
private long ttlMillis;
private volatile long updateTime = System.currentTimeMillis();
public ReplicatedRecord() {
}
public ReplicatedRecord(K key, V value, VectorClock vectorClock, int hash, long ttlMillis) {
this.key = key;
this.value = value;
this.vectorClock = vectorClock;
this.latestUpdateHash = hash;
this.ttlMillis = ttlMillis;
}
public K getKey() {
access();
return key;
}
public V getValue() {
access();
return value;
}
public VectorClock getVectorClock() {
return vectorClock;
}
public long getTtlMillis() {
return ttlMillis;
}
public V setValue(V value, int hash, long ttlMillis) {
access();
V oldValue = this.value;
this.value = value;
this.latestUpdateHash = hash;
this.updateTime = System.currentTimeMillis();
this.ttlMillis = ttlMillis;
return oldValue;
}
public long getUpdateTime() {
return updateTime;
}
public int getLatestUpdateHash() {
return latestUpdateHash;
}
public long getHits() {
return hits.get();
}
public long getLastAccessTime() {
return lastAccessTime.get();
}
public void access() {
hits.incrementAndGet();
lastAccessTime.set(System.currentTimeMillis());
}
@Override
public int getFactoryId() {
return ReplicatedMapDataSerializerHook.F_ID;
}
@Override
public int getId() {
return ReplicatedMapDataSerializerHook.RECORD;
}
@Override
public void writeData(ObjectDataOutput out)
throws IOException {
out.writeObject(key);
out.writeObject(value);
vectorClock.writeData(out);
out.writeInt(latestUpdateHash);
out.writeLong(ttlMillis);
}
@Override
public void readData(ObjectDataInput in)
throws IOException {
key = in.readObject();
value = in.readObject();
vectorClock = new VectorClock();
vectorClock.readData(in);
latestUpdateHash = in.readInt();
ttlMillis = in.readLong();
}
//CHECKSTYLE:OFF
// Deactivated due to complexity of the equals method
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ReplicatedRecord that = (ReplicatedRecord) o;
if (latestUpdateHash != that.latestUpdateHash) {
return false;
}
if (ttlMillis != that.ttlMillis) {
return false;
}
if (key != null ? !key.equals(that.key) : that.key != null) {
return false;
}
if (value != null ? !value.equals(that.value) : that.value != null) {
return false;
}
return true;
}
//CHECKSTYLE:ON
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + latestUpdateHash;
result = 31 * result + (int) (ttlMillis ^ (ttlMillis >>> 32));
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ReplicatedRecord{");
sb.append("key=").append(key);
sb.append(", value=").append(value);
sb.append(", vector=").append(vectorClock);
sb.append(", latestUpdateHash=").append(latestUpdateHash);
sb.append(", ttlMillis=").append(ttlMillis);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_record_ReplicatedRecord.java
|
5,966 |
public final class SuggestUtils {
public static Comparator<SuggestWord> LUCENE_FREQUENCY = new SuggestWordFrequencyComparator();
public static Comparator<SuggestWord> SCORE_COMPARATOR = SuggestWordQueue.DEFAULT_COMPARATOR;
private SuggestUtils() {
// utils!!
}
public static DirectSpellChecker getDirectSpellChecker(DirectSpellcheckerSettings suggestion) {
DirectSpellChecker directSpellChecker = new DirectSpellChecker();
directSpellChecker.setAccuracy(suggestion.accuracy());
Comparator<SuggestWord> comparator;
switch (suggestion.sort()) {
case SCORE:
comparator = SCORE_COMPARATOR;
break;
case FREQUENCY:
comparator = LUCENE_FREQUENCY;
break;
default:
throw new ElasticsearchIllegalArgumentException("Illegal suggest sort: " + suggestion.sort());
}
directSpellChecker.setComparator(comparator);
directSpellChecker.setDistance(suggestion.stringDistance());
directSpellChecker.setMaxEdits(suggestion.maxEdits());
directSpellChecker.setMaxInspections(suggestion.maxInspections());
directSpellChecker.setMaxQueryFrequency(suggestion.maxTermFreq());
directSpellChecker.setMinPrefix(suggestion.prefixLength());
directSpellChecker.setMinQueryLength(suggestion.minWordLength());
directSpellChecker.setThresholdFrequency(suggestion.minDocFreq());
directSpellChecker.setLowerCaseTerms(false);
return directSpellChecker;
}
public static BytesRef join(BytesRef separator, BytesRef result, BytesRef... toJoin) {
int len = separator.length * toJoin.length - 1;
for (BytesRef br : toJoin) {
len += br.length;
}
result.grow(len);
return joinPreAllocated(separator, result, toJoin);
}
public static BytesRef joinPreAllocated(BytesRef separator, BytesRef result, BytesRef... toJoin) {
result.length = 0;
result.offset = 0;
for (int i = 0; i < toJoin.length - 1; i++) {
BytesRef br = toJoin[i];
System.arraycopy(br.bytes, br.offset, result.bytes, result.offset, br.length);
result.offset += br.length;
System.arraycopy(separator.bytes, separator.offset, result.bytes, result.offset, separator.length);
result.offset += separator.length;
}
final BytesRef br = toJoin[toJoin.length-1];
System.arraycopy(br.bytes, br.offset, result.bytes, result.offset, br.length);
result.length = result.offset + br.length;
result.offset = 0;
return result;
}
public static abstract class TokenConsumer {
protected CharTermAttribute charTermAttr;
protected PositionIncrementAttribute posIncAttr;
protected OffsetAttribute offsetAttr;
public void reset(TokenStream stream) {
charTermAttr = stream.addAttribute(CharTermAttribute.class);
posIncAttr = stream.addAttribute(PositionIncrementAttribute.class);
offsetAttr = stream.addAttribute(OffsetAttribute.class);
}
protected BytesRef fillBytesRef(BytesRef spare) {
spare.offset = 0;
spare.length = spare.bytes.length;
char[] source = charTermAttr.buffer();
UnicodeUtil.UTF16toUTF8(source, 0, charTermAttr.length(), spare);
return spare;
}
public abstract void nextToken() throws IOException;
public void end() {}
}
public static int analyze(Analyzer analyzer, BytesRef toAnalyze, String field, TokenConsumer consumer, CharsRef spare) throws IOException {
UnicodeUtil.UTF8toUTF16(toAnalyze, spare);
return analyze(analyzer, spare, field, consumer);
}
public static int analyze(Analyzer analyzer, CharsRef toAnalyze, String field, TokenConsumer consumer) throws IOException {
TokenStream ts = analyzer.tokenStream(
field, new FastCharArrayReader(toAnalyze.chars, toAnalyze.offset, toAnalyze.length)
);
return analyze(ts, consumer);
}
public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException {
stream.reset();
consumer.reset(stream);
int numTokens = 0;
while (stream.incrementToken()) {
consumer.nextToken();
numTokens++;
}
consumer.end();
stream.close();
return numTokens;
}
public static SuggestMode resolveSuggestMode(String suggestMode) {
suggestMode = suggestMode.toLowerCase(Locale.US);
if ("missing".equals(suggestMode)) {
return SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX;
} else if ("popular".equals(suggestMode)) {
return SuggestMode.SUGGEST_MORE_POPULAR;
} else if ("always".equals(suggestMode)) {
return SuggestMode.SUGGEST_ALWAYS;
} else {
throw new ElasticsearchIllegalArgumentException("Illegal suggest mode " + suggestMode);
}
}
public static Suggest.Suggestion.Sort resolveSort(String sortVal) {
if ("score".equals(sortVal)) {
return Suggest.Suggestion.Sort.SCORE;
} else if ("frequency".equals(sortVal)) {
return Suggest.Suggestion.Sort.FREQUENCY;
} else {
throw new ElasticsearchIllegalArgumentException("Illegal suggest sort " + sortVal);
}
}
public static StringDistance resolveDistance(String distanceVal) {
if ("internal".equals(distanceVal)) {
return DirectSpellChecker.INTERNAL_LEVENSHTEIN;
} else if ("damerau_levenshtein".equals(distanceVal) || "damerauLevenshtein".equals(distanceVal)) {
return new LuceneLevenshteinDistance();
} else if ("levenstein".equals(distanceVal)) {
return new LevensteinDistance();
//TODO Jaro and Winkler are 2 people - so apply same naming logic as damerau_levenshtein
} else if ("jarowinkler".equals(distanceVal)) {
return new JaroWinklerDistance();
} else if ("ngram".equals(distanceVal)) {
return new NGramDistance();
} else {
throw new ElasticsearchIllegalArgumentException("Illegal distance option " + distanceVal);
}
}
public static class Fields {
public static final ParseField STRING_DISTANCE = new ParseField("string_distance");
public static final ParseField SUGGEST_MODE = new ParseField("suggest_mode");
public static final ParseField MAX_EDITS = new ParseField("max_edits");
public static final ParseField MAX_INSPECTIONS = new ParseField("max_inspections");
// TODO some of these constants are the same as MLT constants and
// could be moved to a shared class for maintaining consistency across
// the platform
public static final ParseField MAX_TERM_FREQ = new ParseField("max_term_freq");
public static final ParseField PREFIX_LENGTH = new ParseField("prefix_length", "prefix_len");
public static final ParseField MIN_WORD_LENGTH = new ParseField("min_word_length", "min_word_len");
public static final ParseField MIN_DOC_FREQ = new ParseField("min_doc_freq");
public static final ParseField SHARD_SIZE = new ParseField("shard_size");
}
public static boolean parseDirectSpellcheckerSettings(XContentParser parser, String fieldName,
DirectSpellcheckerSettings suggestion) throws IOException {
if ("accuracy".equals(fieldName)) {
suggestion.accuracy(parser.floatValue());
} else if (Fields.SUGGEST_MODE.match(fieldName)) {
suggestion.suggestMode(SuggestUtils.resolveSuggestMode(parser.text()));
} else if ("sort".equals(fieldName)) {
suggestion.sort(SuggestUtils.resolveSort(parser.text()));
} else if (Fields.STRING_DISTANCE.match(fieldName)) {
suggestion.stringDistance(SuggestUtils.resolveDistance(parser.text()));
} else if (Fields.MAX_EDITS.match(fieldName)) {
suggestion.maxEdits(parser.intValue());
if (suggestion.maxEdits() < 1 || suggestion.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new ElasticsearchIllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits());
}
} else if (Fields.MAX_INSPECTIONS.match(fieldName)) {
suggestion.maxInspections(parser.intValue());
} else if (Fields.MAX_TERM_FREQ.match(fieldName)) {
suggestion.maxTermFreq(parser.floatValue());
} else if (Fields.PREFIX_LENGTH.match(fieldName)) {
suggestion.prefixLength(parser.intValue());
} else if (Fields.MIN_WORD_LENGTH.match(fieldName)) {
suggestion.minQueryLength(parser.intValue());
} else if (Fields.MIN_DOC_FREQ.match(fieldName)) {
suggestion.minDocFreq(parser.floatValue());
} else {
return false;
}
return true;
}
public static boolean parseSuggestContext(XContentParser parser, MapperService mapperService, String fieldName,
SuggestionSearchContext.SuggestionContext suggestion) throws IOException {
if ("analyzer".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
suggestion.setAnalyzer(analyzer);
} else if ("field".equals(fieldName)) {
suggestion.setField(parser.text());
} else if ("size".equals(fieldName)) {
suggestion.setSize(parser.intValue());
} else if (Fields.SHARD_SIZE.match(fieldName)) {
suggestion.setShardSize(parser.intValue());
} else {
return false;
}
return true;
}
public static void verifySuggestion(MapperService mapperService, BytesRef globalText, SuggestionContext suggestion) {
// Verify options and set defaults
if (suggestion.getField() == null) {
throw new ElasticsearchIllegalArgumentException("The required field option is missing");
}
if (suggestion.getText() == null) {
if (globalText == null) {
throw new ElasticsearchIllegalArgumentException("The required text option is missing");
}
suggestion.setText(globalText);
}
if (suggestion.getAnalyzer() == null) {
suggestion.setAnalyzer(mapperService.searchAnalyzer());
}
if (suggestion.getShardSize() == -1) {
suggestion.setShardSize(Math.max(suggestion.getSize(), 5));
}
}
public static ShingleTokenFilterFactory.Factory getShingleFilterFactory(Analyzer analyzer) {
if (analyzer instanceof NamedAnalyzer) {
analyzer = ((NamedAnalyzer)analyzer).analyzer();
}
if (analyzer instanceof CustomAnalyzer) {
final CustomAnalyzer a = (CustomAnalyzer) analyzer;
final TokenFilterFactory[] tokenFilters = a.tokenFilters();
for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
if (tokenFilterFactory instanceof ShingleTokenFilterFactory) {
return ((ShingleTokenFilterFactory)tokenFilterFactory).getInnerFactory();
} else if (tokenFilterFactory instanceof ShingleTokenFilterFactory.Factory) {
return (ShingleTokenFilterFactory.Factory) tokenFilterFactory;
}
}
}
return null;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_suggest_SuggestUtils.java
|
288 |
public class OScriptOrientWrapper {
protected final ODatabase db;
public OScriptOrientWrapper() {
this.db = null;
}
public OScriptOrientWrapper(final ODatabase db) {
this.db = db;
}
public OScriptDocumentDatabaseWrapper getDatabase() {
if (db == null)
throw new OConfigurationException("No database instance found in context");
if (db instanceof ODatabaseDocumentTx)
return new OScriptDocumentDatabaseWrapper((ODatabaseDocumentTx) db);
if (db instanceof ODatabaseRecordTx)
return new OScriptDocumentDatabaseWrapper((ODatabaseRecordTx) db);
throw new OConfigurationException("No valid database instance found in context: " + db + ", class: " + db.getClass());
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OScriptOrientWrapper.java
|
1,534 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Direction direction;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.direction = Direction.valueOf(context.getConfiguration().get(DIRECTION));
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.direction.equals(IN) || this.direction.equals(BOTH)) {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
value.getPaths(edge, true);
edgesProcessed++;
edge.clearPaths();
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
} else {
for (final Edge e : value.getEdges(IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edge.clearPaths();
}
}
}
if (this.direction.equals(OUT) || this.direction.equals(BOTH)) {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
value.getPaths(edge, true);
edgesProcessed++;
edge.clearPaths();
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
} else {
for (final Edge e : value.getEdges(OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edge.clearPaths();
}
}
}
context.write(NullWritable.get(), value);
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_EdgesVerticesMap.java
|
2,819 |
public class InternalPartitionServiceImpl implements InternalPartitionService, ManagedService,
EventPublishingService<MigrationEvent, MigrationListener> {
private final Node node;
private final NodeEngineImpl nodeEngine;
private final ILogger logger;
private final int partitionCount;
private final InternalPartitionImpl[] partitions;
private final PartitionReplicaVersions[] replicaVersions;
private final AtomicReferenceArray<ReplicaSyncInfo> replicaSyncRequests;
private final EntryTaskScheduler<Integer, ReplicaSyncInfo> replicaSyncScheduler;
private final AtomicInteger replicaSyncProcessCount = new AtomicInteger();
private final MigrationThread migrationThread;
private final long partitionMigrationInterval;
private final long partitionMigrationTimeout;
private final PartitionStateGenerator partitionStateGenerator;
private final MemberGroupFactory memberGroupFactory;
private final PartitionServiceProxy proxy;
private final Lock lock = new ReentrantLock();
private final AtomicInteger stateVersion = new AtomicInteger();
private final BlockingQueue<Runnable> migrationQueue = new LinkedBlockingQueue<Runnable>();
private final AtomicBoolean migrationActive = new AtomicBoolean(true);
private final AtomicLong lastRepartitionTime = new AtomicLong();
private final SystemLogService systemLogService;
// can be read and written concurrently...
private volatile int memberGroupsSize;
// updates will be done under lock, but reads will be multithreaded.
private volatile boolean initialized;
// updates will be done under lock, but reads will be multithreaded.
private final ConcurrentMap<Integer, MigrationInfo> activeMigrations
= new ConcurrentHashMap<Integer, MigrationInfo>(3, 0.75f, 1);
// both reads and updates will be done under lock!
private final LinkedList<MigrationInfo> completedMigrations = new LinkedList<MigrationInfo>();
public InternalPartitionServiceImpl(Node node) {
this.partitionCount = node.groupProperties.PARTITION_COUNT.getInteger();
this.node = node;
this.nodeEngine = node.nodeEngine;
this.logger = node.getLogger(InternalPartitionService.class);
this.systemLogService = node.getSystemLogService();
this.partitions = new InternalPartitionImpl[partitionCount];
PartitionListener partitionListener = new LocalPartitionListener(this, node.getThisAddress());
for (int i = 0; i < partitionCount; i++) {
this.partitions[i] = new InternalPartitionImpl(i, partitionListener);
}
replicaVersions = new PartitionReplicaVersions[partitionCount];
for (int i = 0; i < replicaVersions.length; i++) {
replicaVersions[i] = new PartitionReplicaVersions(i);
}
memberGroupFactory = MemberGroupFactoryFactory.newMemberGroupFactory(node.getConfig().getPartitionGroupConfig());
partitionStateGenerator = new PartitionStateGeneratorImpl();
partitionMigrationInterval = node.groupProperties.PARTITION_MIGRATION_INTERVAL.getLong() * 1000;
// partitionMigrationTimeout is 1.5 times of real timeout
partitionMigrationTimeout = (long) (node.groupProperties.PARTITION_MIGRATION_TIMEOUT.getLong() * 1.5f);
migrationThread = new MigrationThread(node);
proxy = new PartitionServiceProxy(this);
replicaSyncRequests = new AtomicReferenceArray<ReplicaSyncInfo>(new ReplicaSyncInfo[partitionCount]);
ScheduledExecutorService scheduledExecutor = nodeEngine.getExecutionService()
.getDefaultScheduledExecutor();
replicaSyncScheduler = EntryTaskSchedulerFactory.newScheduler(scheduledExecutor,
new ReplicaSyncEntryProcessor(this), ScheduleType.SCHEDULE_IF_NEW);
}
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
migrationThread.start();
int partitionTableSendInterval = node.groupProperties.PARTITION_TABLE_SEND_INTERVAL.getInteger();
if (partitionTableSendInterval <= 0) {
partitionTableSendInterval = 1;
}
ExecutionService executionService = nodeEngine.getExecutionService();
executionService.scheduleAtFixedRate(new SendClusterStateTask(),
partitionTableSendInterval, partitionTableSendInterval, TimeUnit.SECONDS);
int backupSyncCheckInterval = node.groupProperties.PARTITION_BACKUP_SYNC_INTERVAL.getInteger();
if (backupSyncCheckInterval <= 0) {
backupSyncCheckInterval = 1;
}
executionService.scheduleWithFixedDelay(new SyncReplicaVersionTask(),
backupSyncCheckInterval, backupSyncCheckInterval, TimeUnit.SECONDS);
}
@Override
public Address getPartitionOwner(int partitionId) {
if (!initialized) {
firstArrangement();
}
if (partitions[partitionId].getOwnerOrNull() == null && !node.isMaster() && node.joined()) {
notifyMasterToAssignPartitions();
}
return partitions[partitionId].getOwnerOrNull();
}
private void notifyMasterToAssignPartitions() {
if (lock.tryLock()) {
try {
if (!initialized && !node.isMaster() && node.getMasterAddress() != null && node.joined()) {
Future f = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME, new AssignPartitions(),
node.getMasterAddress()).setTryCount(1).invoke();
f.get(1, TimeUnit.SECONDS);
}
} catch (Exception e) {
logger.finest(e);
} finally {
lock.unlock();
}
}
}
@Override
public void firstArrangement() {
if (!node.isMaster() || !node.isActive()) {
return;
}
if (!initialized) {
lock.lock();
try {
if (initialized) {
return;
}
PartitionStateGenerator psg = partitionStateGenerator;
logger.info("Initializing cluster partition table first arrangement...");
final Set<Member> members = node.getClusterService().getMembers();
Collection<MemberGroup> memberGroups = memberGroupFactory.createMemberGroups(members);
Address[][] newState = psg.initialize(memberGroups, partitionCount);
if (newState != null) {
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
InternalPartitionImpl partition = partitions[partitionId];
Address[] replicas = newState[partitionId];
partition.setPartitionInfo(replicas);
}
initialized = true;
publishPartitionRuntimeState();
}
} finally {
lock.unlock();
}
}
}
private void updateMemberGroupsSize() {
Set<Member> members = node.getClusterService().getMembers();
final Collection<MemberGroup> groups = memberGroupFactory.createMemberGroups(members);
int size = 0;
for (MemberGroup group : groups) {
if (group.size() > 0) {
size++;
}
}
memberGroupsSize = size;
}
@Override
public int getMemberGroupsSize() {
int size = memberGroupsSize;
// size = 0 means service is not initialized yet.
// return 1 instead since there should be at least one member group
return size > 0 ? size : 1;
}
public void memberAdded(MemberImpl member) {
if (!member.localMember()) {
updateMemberGroupsSize();
}
if (node.isMaster() && node.isActive()) {
lock.lock();
try {
migrationQueue.clear();
if (initialized) {
migrationQueue.add(new RepartitioningTask());
// send initial partition table to newly joined node.
Collection<MemberImpl> members = node.clusterService.getMemberList();
PartitionStateOperation op = new PartitionStateOperation(createPartitionState(members));
nodeEngine.getOperationService().send(op, member.getAddress());
}
} finally {
lock.unlock();
}
}
}
public void memberRemoved(final MemberImpl member) {
updateMemberGroupsSize();
final Address deadAddress = member.getAddress();
final Address thisAddress = node.getThisAddress();
if (deadAddress == null || deadAddress.equals(thisAddress)) {
return;
}
lock.lock();
try {
migrationQueue.clear();
if (!activeMigrations.isEmpty()) {
if (node.isMaster()) {
rollbackActiveMigrationsFromPreviousMaster(node.getLocalMember().getUuid());
}
for (MigrationInfo migrationInfo : activeMigrations.values()) {
if (deadAddress.equals(migrationInfo.getSource()) || deadAddress.equals(migrationInfo.getDestination())) {
migrationInfo.invalidate();
}
}
}
// Pause migration and let all other members notice the dead member
// and fix their own partitions.
// Otherwise new master may take action fast and send new partition state
// before other members realize the dead one.
pauseMigration();
for (InternalPartitionImpl partition : partitions) {
boolean promote = false;
if (deadAddress.equals(partition.getOwnerOrNull()) && thisAddress.equals(partition.getReplicaAddress(1))) {
promote = true;
}
// shift partition table up.
partition.onDeadAddress(deadAddress);
// safety check!
if (partition.onDeadAddress(deadAddress)) {
throw new IllegalStateException("Duplicate address found in partition replicas!");
}
if (promote) {
final Operation op = new PromoteFromBackupOperation();
op.setPartitionId(partition.getPartitionId())
.setNodeEngine(nodeEngine)
.setValidateTarget(false)
.setService(this);
nodeEngine.getOperationService().executeOperation(op);
}
}
if (node.isMaster() && initialized) {
migrationQueue.add(new RepartitioningTask());
}
// Add a delay before activating migration, to give other nodes time to notice the dead one.
long migrationActivationDelay = node.groupProperties.CONNECTION_MONITOR_INTERVAL.getLong()
* node.groupProperties.CONNECTION_MONITOR_MAX_FAULTS.getInteger() * 5;
long callTimeout = node.groupProperties.OPERATION_CALL_TIMEOUT_MILLIS.getLong();
// delay should be smaller than call timeout, otherwise operations may fail because of invalid partition table
migrationActivationDelay = Math.min(migrationActivationDelay, callTimeout / 2);
migrationActivationDelay = Math.max(migrationActivationDelay, 1000L);
nodeEngine.getExecutionService().schedule(new Runnable() {
@Override
public void run() {
resumeMigration();
}
}, migrationActivationDelay, TimeUnit.MILLISECONDS);
} finally {
lock.unlock();
}
}
private void rollbackActiveMigrationsFromPreviousMaster(final String currentMasterUuid) {
lock.lock();
try {
if (!activeMigrations.isEmpty()) {
for (MigrationInfo migrationInfo : activeMigrations.values()) {
if (!currentMasterUuid.equals(migrationInfo.getMasterUuid())) {
// Still there is possibility of the other endpoint commits the migration
// but this node roll-backs!
logger.info("Rolling-back migration initiated by the old master -> " + migrationInfo);
finalizeActiveMigration(migrationInfo);
}
}
}
} finally {
lock.unlock();
}
}
private PartitionRuntimeState createPartitionState(Collection<MemberImpl> members) {
lock.lock();
try {
List<MemberInfo> memberInfos = new ArrayList<MemberInfo>(members.size());
for (MemberImpl member : members) {
MemberInfo memberInfo = new MemberInfo(member.getAddress(), member.getUuid(), member.getAttributes());
memberInfos.add(memberInfo);
}
ArrayList<MigrationInfo> migrationInfos = new ArrayList<MigrationInfo>(completedMigrations);
final long clusterTime = node.getClusterService().getClusterTime();
ILogger logger = node.getLogger(PartitionRuntimeState.class);
return new PartitionRuntimeState(
logger, memberInfos, partitions, migrationInfos, clusterTime, stateVersion.get());
} finally {
lock.unlock();
}
}
private void publishPartitionRuntimeState() {
if (!initialized) {
// do not send partition state until initialized!
return;
}
if (!node.isMaster() || !node.isActive() || !node.joined()) {
return;
}
if (!migrationActive.get()) {
// migration is disabled because of a member leave, wait till enabled!
return;
}
lock.lock();
try {
Collection<MemberImpl> members = node.clusterService.getMemberList();
PartitionRuntimeState partitionState = createPartitionState(members);
PartitionStateOperation op = new PartitionStateOperation(partitionState);
OperationService operationService = nodeEngine.getOperationService();
for (MemberImpl member : members) {
if (!member.localMember()) {
try {
operationService.send(op, member.getAddress());
} catch (Exception e) {
logger.finest(e);
}
}
}
} finally {
lock.unlock();
}
}
private void syncPartitionRuntimeState() {
syncPartitionRuntimeState(node.clusterService.getMemberList());
}
private void syncPartitionRuntimeState(Collection<MemberImpl> members) {
if (!initialized) {
// do not send partition state until initialized!
return;
}
if (!node.isMaster() || !node.isActive() || !node.joined()) {
return;
}
lock.lock();
try {
PartitionRuntimeState partitionState = createPartitionState(members);
OperationService operationService = nodeEngine.getOperationService();
List<Future> calls = new ArrayList<Future>(members.size());
for (MemberImpl member : members) {
if (!member.localMember()) {
try {
PartitionStateOperation op = new PartitionStateOperation(partitionState, true);
Future<Object> f = operationService
.invokeOnTarget(SERVICE_NAME, op, member.getAddress());
calls.add(f);
} catch (Exception e) {
logger.finest(e);
}
}
}
for (Future f : calls) {
try {
f.get(3, TimeUnit.SECONDS);
} catch (Exception e) {
logger.info("Partition state sync invocation timed out: " + e);
}
}
} finally {
lock.unlock();
}
}
void processPartitionRuntimeState(PartitionRuntimeState partitionState) {
lock.lock();
try {
if (!node.isActive() || !node.joined()) {
if (logger.isFinestEnabled()) {
logger.finest("Node should be active(" + node.isActive() + ") and joined(" + node.joined()
+ ") to be able to process partition table!");
}
return;
}
final Address sender = partitionState.getEndpoint();
final Address master = node.getMasterAddress();
if (node.isMaster()) {
logger.warning("This is the master node and received a PartitionRuntimeState from "
+ sender + ". Ignoring incoming state! ");
return;
} else {
if (sender == null || !sender.equals(master)) {
if (node.clusterService.getMember(sender) == null) {
logger.severe("Received a ClusterRuntimeState from an unknown member!"
+ " => Sender: " + sender + ", Master: " + master + "! ");
return;
} else {
logger.warning("Received a ClusterRuntimeState, but its sender doesn't seem to be master!"
+ " => Sender: " + sender + ", Master: " + master + "! "
+ "(Ignore if master node has changed recently.)");
}
}
}
final Set<Address> unknownAddresses = new HashSet<Address>();
PartitionInfo[] state = partitionState.getPartitions();
for (int partitionId = 0; partitionId < state.length; partitionId++) {
PartitionInfo partitionInfo = state[partitionId];
InternalPartitionImpl currentPartition = partitions[partitionId];
for (int index = 0; index < InternalPartition.MAX_REPLICA_COUNT; index++) {
Address address = partitionInfo.getReplicaAddress(index);
if (address != null && getMember(address) == null) {
if (logger.isFinestEnabled()) {
logger.finest(
"Unknown " + address + " found in partition table sent from master "
+ sender + ". It has probably already left the cluster. Partition: "
+ partitionId);
}
unknownAddresses.add(address);
}
}
// backup replicas will be assigned after active migrations are finalized.
currentPartition.setOwner(partitionInfo.getReplicaAddress(0));
}
if (!unknownAddresses.isEmpty() && logger.isLoggable(Level.WARNING)) {
StringBuilder s = new StringBuilder("Following unknown addresses are found in partition table")
.append(" sent from master[").append(sender).append("].")
.append(" (Probably they have recently joined or left the cluster.)")
.append(" {");
for (Address address : unknownAddresses) {
s.append("\n\t").append(address);
}
s.append("\n}");
logger.warning(s.toString());
}
Collection<MigrationInfo> completedMigrations = partitionState.getCompletedMigrations();
for (MigrationInfo completedMigration : completedMigrations) {
addCompletedMigration(completedMigration);
finalizeActiveMigration(completedMigration);
}
if (!activeMigrations.isEmpty()) {
final MemberImpl masterMember = getMasterMember();
rollbackActiveMigrationsFromPreviousMaster(masterMember.getUuid());
}
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
InternalPartitionImpl partition = partitions[partitionId];
Address[] replicas = state[partitionId].getReplicaAddresses();
partition.setPartitionInfo(replicas);
}
stateVersion.set(partitionState.getVersion());
initialized = true;
} finally {
lock.unlock();
}
}
private void finalizeActiveMigration(final MigrationInfo migrationInfo) {
if (activeMigrations.containsKey(migrationInfo.getPartitionId())) {
lock.lock();
try {
if (activeMigrations.containsValue(migrationInfo)) {
if (migrationInfo.startProcessing()) {
try {
Address thisAddress = node.getThisAddress();
boolean source = thisAddress.equals(migrationInfo.getSource());
boolean destination = thisAddress.equals(migrationInfo.getDestination());
if (source || destination) {
int partitionId = migrationInfo.getPartitionId();
InternalPartitionImpl migratingPartition = getPartitionImpl(partitionId);
Address ownerAddress = migratingPartition.getOwnerOrNull();
boolean success = migrationInfo.getDestination().equals(ownerAddress);
MigrationEndpoint endpoint = source ? MigrationEndpoint.SOURCE : MigrationEndpoint.DESTINATION;
FinalizeMigrationOperation op = new FinalizeMigrationOperation(endpoint, success);
op.setPartitionId(partitionId)
.setNodeEngine(nodeEngine)
.setValidateTarget(false)
.setService(this);
nodeEngine.getOperationService().executeOperation(op);
}
} catch (Exception e) {
logger.warning(e);
} finally {
migrationInfo.doneProcessing();
}
} else {
logger.info("Scheduling finalization of " + migrationInfo
+ ", because migration process is currently running.");
nodeEngine.getExecutionService().schedule(new Runnable() {
@Override
public void run() {
finalizeActiveMigration(migrationInfo);
}
}, 3, TimeUnit.SECONDS);
}
}
} finally {
lock.unlock();
}
}
}
void addActiveMigration(MigrationInfo migrationInfo) {
lock.lock();
try {
int partitionId = migrationInfo.getPartitionId();
partitions[partitionId].setMigrating(true);
MigrationInfo currentMigrationInfo = activeMigrations.putIfAbsent(partitionId, migrationInfo);
if (currentMigrationInfo != null) {
boolean oldMaster = false;
MigrationInfo oldMigration;
MigrationInfo newMigration;
MemberImpl masterMember = getMasterMember();
String master = masterMember.getUuid();
//todo: 2 different branches with the same content.
if (!master.equals(currentMigrationInfo.getMasterUuid())) {
// master changed
oldMigration = currentMigrationInfo;
newMigration = migrationInfo;
oldMaster = true;
} else if (!master.equals(migrationInfo.getMasterUuid())) {
// master changed
oldMigration = migrationInfo;
newMigration = currentMigrationInfo;
oldMaster = true;
} else if (!currentMigrationInfo.isProcessing() && migrationInfo.isProcessing()) {
// new migration arrived before partition state!
oldMigration = currentMigrationInfo;
newMigration = migrationInfo;
} else {
String message = "Something is seriously wrong! There are two migration requests for the "
+ "same partition! First-> " + currentMigrationInfo + ", Second -> " + migrationInfo;
IllegalStateException error = new IllegalStateException(message);
logger.severe(message, error);
throw error;
}
if (oldMaster) {
logger.info("Finalizing migration instantiated by the old master -> " + oldMigration);
} else {
if (logger.isFinestEnabled()) {
logger.finest("Finalizing previous migration -> " + oldMigration);
}
}
finalizeActiveMigration(oldMigration);
activeMigrations.put(partitionId, newMigration);
}
} finally {
lock.unlock();
}
}
private MemberImpl getMasterMember() {
return node.clusterService.getMember(node.getMasterAddress());
}
MigrationInfo getActiveMigration(int partitionId) {
return activeMigrations.get(partitionId);
}
MigrationInfo removeActiveMigration(int partitionId) {
partitions[partitionId].setMigrating(false);
return activeMigrations.remove(partitionId);
}
public Collection<MigrationInfo> getActiveMigrations() {
return Collections.unmodifiableCollection(activeMigrations.values());
}
private void addCompletedMigration(MigrationInfo migrationInfo) {
lock.lock();
try {
if (completedMigrations.size() > 25) {
completedMigrations.removeFirst();
}
completedMigrations.add(migrationInfo);
} finally {
lock.unlock();
}
}
private void evictCompletedMigrations() {
lock.lock();
try {
if (!completedMigrations.isEmpty()) {
completedMigrations.removeFirst();
}
} finally {
lock.unlock();
}
}
private void clearPartitionReplica(final int partitionId, final int replicaIndex) {
ClearReplicaOperation op = new ClearReplicaOperation();
op.setPartitionId(partitionId).setNodeEngine(nodeEngine).setService(this);
nodeEngine.getOperationService().executeOperation(op);
}
void triggerPartitionReplicaSync(int partitionId, int replicaIndex) {
syncPartitionReplica(partitionId, replicaIndex, 0L, false);
}
void forcePartitionReplicaSync(int partitionId, int replicaIndex) {
syncPartitionReplica(partitionId, replicaIndex, 0L, true);
}
void schedulePartitionReplicaSync(int partitionId, int replicaIndex, long delayMillis) {
syncPartitionReplica(partitionId, replicaIndex, delayMillis, true);
}
private void syncPartitionReplica(int partitionId, int replicaIndex, long delayMillis, boolean force) {
if (replicaIndex < 0 || replicaIndex > InternalPartition.MAX_REPLICA_COUNT) {
throw new IllegalArgumentException("Invalid replica index: " + replicaIndex);
}
final InternalPartitionImpl partitionImpl = getPartition(partitionId);
final Address target = partitionImpl.getOwnerOrNull();
if (target != null) {
if (target.equals(nodeEngine.getThisAddress())) {
if (force) {
Address thisAddress = node.nodeEngine.getThisAddress();
throw new IllegalStateException("Replica target cannot be this node -> thisNode: "
+ thisAddress +" partitionId: " + partitionId
+ ", replicaIndex: " + replicaIndex + ", partition-info: " + partitionImpl);
} else {
if (logger.isFinestEnabled()) {
logger.finest(
"This node is now owner of partition, cannot sync replica -> partitionId: " + partitionId
+ ", replicaIndex: " + replicaIndex + ", partition-info: " + partitionImpl
);
}
return;
}
}
final ReplicaSyncRequest syncRequest = new ReplicaSyncRequest();
syncRequest.setPartitionId(partitionId).setReplicaIndex(replicaIndex);
final ReplicaSyncInfo currentSyncInfo = replicaSyncRequests.get(partitionId);
final ReplicaSyncInfo syncInfo = new ReplicaSyncInfo(partitionId, replicaIndex, target);
boolean sendRequest = false;
if (currentSyncInfo == null) {
sendRequest = replicaSyncRequests.compareAndSet(partitionId, null, syncInfo);
} else if (currentSyncInfo.requestTime < (Clock.currentTimeMillis() - 10000)
|| nodeEngine.getClusterService().getMember(currentSyncInfo.target) == null) {
sendRequest = replicaSyncRequests.compareAndSet(partitionId, currentSyncInfo, syncInfo);
} else if (force) {
replicaSyncRequests.set(partitionId, syncInfo);
sendRequest = true;
}
if (sendRequest) {
if (logger.isFinestEnabled()) {
logger.finest("Sending sync replica request to -> " + target + "; for partition: " + partitionId
+ ", replica: " + replicaIndex);
}
replicaSyncScheduler.cancel(partitionId);
if (delayMillis <= 0) {
replicaSyncScheduler.schedule(DEFAULT_REPLICA_SYNC_DELAY, partitionId, syncInfo);
nodeEngine.getOperationService().send(syncRequest, target);
} else {
replicaSyncScheduler.schedule(delayMillis, partitionId, syncInfo);
}
}
} else {
logger.warning("Sync replica target is null, no need to sync -> partition: " + partitionId
+ ", replica: " + replicaIndex);
}
}
@Override
public InternalPartition[] getPartitions() {
//a defensive copy is made to prevent breaking with the old approach, but imho not needed
InternalPartition[] result = new InternalPartition[partitions.length];
System.arraycopy(partitions, 0, result, 0, partitions.length);
return result;
}
@Override
public MemberImpl getMember(Address address) {
return node.clusterService.getMember(address);
}
private InternalPartitionImpl getPartitionImpl(int partitionId) {
return partitions[partitionId];
}
@Override
public InternalPartitionImpl getPartition(int partitionId) {
InternalPartitionImpl p = getPartitionImpl(partitionId);
if (p.getOwnerOrNull() == null) {
// probably ownerships are not set yet.
// force it.
getPartitionOwner(partitionId);
}
return p;
}
@Override
public boolean prepareToSafeShutdown(long timeout, TimeUnit unit) {
long timeoutInMillis = unit.toMillis(timeout);
int sleep = 1000;
while (timeoutInMillis > 0) {
while (timeoutInMillis > 0 && shouldWaitMigrationOrBackups(Level.INFO)) {
try {
//noinspection BusyWait
Thread.sleep(sleep);
} catch (InterruptedException ignored) {
}
timeoutInMillis -= sleep;
}
if (timeoutInMillis <= 0) {
return false;
}
if (node.isMaster()) {
syncPartitionRuntimeState();
} else {
while (timeoutInMillis > 0 && hasOnGoingMigrationMaster(Level.WARNING)) {
// ignore elapsed time during master inv.
logger.info("Waiting for the master node to complete remaining migrations!");
try {
//noinspection BusyWait
Thread.sleep(sleep);
} catch (InterruptedException ignored) {
}
timeoutInMillis -= sleep;
}
if (timeoutInMillis <= 0) {
return false;
}
}
long start = Clock.currentTimeMillis();
boolean ok = checkReplicaSyncState();
timeoutInMillis -= (Clock.currentTimeMillis() - start);
if (ok) {
logger.finest("Replica sync state before shutdown is OK");
return true;
} else {
if (timeoutInMillis <= 0) {
return false;
}
logger.info("Some backup replicas are inconsistent with primary, " +
"waiting for synchronization. Timeout: " + timeoutInMillis + "ms");
try {
//noinspection BusyWait
Thread.sleep(sleep);
} catch (InterruptedException ignored) {
}
timeoutInMillis -= sleep;
}
}
return false;
}
@Override
public boolean hasOnGoingMigration() {
return hasOnGoingMigrationLocal() || (!node.isMaster() && hasOnGoingMigrationMaster(Level.FINEST));
}
private boolean hasOnGoingMigrationMaster(Level level) {
Operation op = new HasOngoingMigration();
Future f = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME, op, node.getMasterAddress())
.setTryCount(100).setTryPauseMillis(100).invoke();
try {
return (Boolean) f.get(1, TimeUnit.MINUTES);
} catch (InterruptedException ignored) {
} catch (Exception e) {
logger.log(level, "Could not get a response from master about migrations! -> " + e.toString());
}
return false;
}
boolean hasOnGoingMigrationLocal() {
return !activeMigrations.isEmpty() || !migrationQueue.isEmpty()
|| !migrationActive.get()
|| migrationThread.isMigrating()
|| shouldWaitMigrationOrBackups(Level.OFF);
}
private boolean checkReplicaSyncState() {
if (!initialized || !node.joined()) {
return true;
}
if (getMemberGroupsSize() < 2) {
return true;
}
final Address thisAddress = node.getThisAddress();
final Semaphore s = new Semaphore(0);
final AtomicBoolean ok = new AtomicBoolean(true);
final Callback<Object> callback = new Callback<Object>() {
@Override
public void notify(Object object) {
if (Boolean.FALSE.equals(object)) {
ok.compareAndSet(true, false);
} else if (object instanceof Throwable) {
ok.compareAndSet(true, false);
}
s.release();
}
};
int notOwnedCount = 0;
for (InternalPartitionImpl partition : partitions) {
Address owner = partition.getOwnerOrNull();
if (thisAddress.equals(owner)) {
if (partition.getReplicaAddress(1) != null) {
SyncReplicaVersion op = new SyncReplicaVersion(1, callback);
op.setService(this);
op.setNodeEngine(nodeEngine);
op.setResponseHandler(ResponseHandlerFactory
.createErrorLoggingResponseHandler(node.getLogger(SyncReplicaVersion.class)));
op.setPartitionId(partition.getPartitionId());
nodeEngine.getOperationService().executeOperation(op);
} else {
ok.set(false);
s.release();
}
} else {
if (owner == null) {
ok.set(false);
}
notOwnedCount++;
}
}
s.release(notOwnedCount);
try {
if (ok.get()) {
return s.tryAcquire(partitionCount, 10, TimeUnit.SECONDS) && ok.get();
} else {
return false;
}
} catch (InterruptedException ignored) {
return false;
}
}
private boolean shouldWaitMigrationOrBackups(Level level) {
if (!initialized) {
return false;
}
if (getMemberGroupsSize() < 2) {
return false;
}
final int activeSize = activeMigrations.size();
if (activeSize != 0) {
if (logger.isLoggable(level)) {
logger.log(level, "Waiting for active migration tasks: " + activeSize);
}
return true;
}
int queueSize = migrationQueue.size();
if (queueSize != 0) {
if (logger.isLoggable(level)) {
logger.log(level, "Waiting for cluster migration tasks: " + queueSize);
}
return true;
}
for (InternalPartitionImpl partition : partitions) {
if (partition.getReplicaAddress(1) == null) {
if (logger.isLoggable(level)) {
logger.log(level, "Should take backup of partition: " + partition.getPartitionId());
}
return true;
} else {
int replicaSyncProcesses = replicaSyncProcessCount.get();
if (replicaSyncProcesses > 0) {
if (logger.isLoggable(level)) {
logger.log(level, "Processing replica sync requests: " + replicaSyncProcesses);
}
return true;
}
}
}
return false;
}
@Override
public final int getPartitionId(Data key) {
int hash = key.getPartitionHash();
if (hash == Integer.MIN_VALUE) {
return 0;
} else {
return Math.abs(hash) % partitionCount;
}
}
@Override
public final int getPartitionId(Object key) {
return getPartitionId(nodeEngine.toData(key));
}
@Override
public final int getPartitionCount() {
return partitionCount;
}
// called in operation threads
// Caution: Returning version array without copying for performance reasons. Callers must not modify this array!
@Override
public long[] incrementPartitionReplicaVersions(int partitionId, int backupCount) {
PartitionReplicaVersions replicaVersion = replicaVersions[partitionId];
return replicaVersion.incrementAndGet(backupCount);
}
// called in operation threads
@Override
public void updatePartitionReplicaVersions(int partitionId, long[] versions, int replicaIndex) {
PartitionReplicaVersions partitionVersion = replicaVersions[partitionId];
if (!partitionVersion.update(versions, replicaIndex)) {
triggerPartitionReplicaSync(partitionId, replicaIndex);
}
}
// called in operation threads
// Caution: Returning version array without copying for performance reasons. Callers must not modify this array!
@Override
public long[] getPartitionReplicaVersions(int partitionId) {
return replicaVersions[partitionId].get();
}
// called in operation threads
@Override
public void setPartitionReplicaVersions(int partitionId, long[] versions) {
replicaVersions[partitionId].reset(versions);
}
@Override
public void clearPartitionReplicaVersions(int partitionId) {
replicaVersions[partitionId].clear();
}
// called in operation threads
void finalizeReplicaSync(int partitionId, long[] versions) {
setPartitionReplicaVersions(partitionId, versions);
replicaSyncRequests.set(partitionId, null);
replicaSyncScheduler.cancel(partitionId);
}
boolean incrementReplicaSyncProcessCount() {
int c = replicaSyncProcessCount.get();
if (c >= MAX_PARALLEL_REPLICATIONS) {
return false;
}
c = replicaSyncProcessCount.incrementAndGet();
if (c >= MAX_PARALLEL_REPLICATIONS) {
replicaSyncProcessCount.decrementAndGet();
return false;
}
return true;
}
void decrementReplicaSyncProcessCount() {
replicaSyncProcessCount.decrementAndGet();
}
@Override
public Map<Address, List<Integer>> getMemberPartitionsMap() {
final int members = node.getClusterService().getSize();
Map<Address, List<Integer>> memberPartitions = new HashMap<Address, List<Integer>>(members);
for (int i = 0; i < partitionCount; i++) {
Address owner;
while ((owner = getPartitionOwner(i)) == null) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
throw new HazelcastException(e);
}
}
List<Integer> ownedPartitions = memberPartitions.get(owner);
if (ownedPartitions == null) {
ownedPartitions = new ArrayList<Integer>();
memberPartitions.put(owner, ownedPartitions);
}
ownedPartitions.add(i);
}
return memberPartitions;
}
@Override
public List<Integer> getMemberPartitions(Address target) {
List<Integer> ownedPartitions = new LinkedList<Integer>();
for (int i = 0; i < partitionCount; i++) {
final Address owner = getPartitionOwner(i);
if (target.equals(owner)) {
ownedPartitions.add(i);
}
}
return ownedPartitions;
}
@Override
public void reset() {
migrationQueue.clear();
for (int k = 0; k < replicaSyncRequests.length(); k++) {
replicaSyncRequests.set(k, null);
}
replicaSyncScheduler.cancelAll();
lock.lock();
try {
initialized = false;
for (InternalPartitionImpl partition : partitions) {
for (int i = 0; i < InternalPartition.MAX_REPLICA_COUNT; i++) {
partition.setReplicaAddress(i, null);
partition.setMigrating(false);
}
}
activeMigrations.clear();
completedMigrations.clear();
stateVersion.set(0);
} finally {
lock.unlock();
}
}
public void pauseMigration() {
migrationActive.set(false);
}
public void resumeMigration() {
migrationActive.set(true);
}
@Override
public void shutdown(boolean terminate) {
logger.finest("Shutting down the partition service");
migrationThread.stopNow();
reset();
}
public long getMigrationQueueSize() {
return migrationQueue.size();
}
public PartitionServiceProxy getPartitionServiceProxy() {
return proxy;
}
private void sendMigrationEvent(final MigrationInfo migrationInfo, final MigrationStatus status) {
MemberImpl current = getMember(migrationInfo.getSource());
MemberImpl newOwner = getMember(migrationInfo.getDestination());
MigrationEvent event = new MigrationEvent(migrationInfo.getPartitionId(), current, newOwner, status);
EventService eventService = nodeEngine.getEventService();
Collection<EventRegistration> registrations = eventService.getRegistrations(SERVICE_NAME, SERVICE_NAME);
eventService.publishEvent(SERVICE_NAME, registrations, event, event.getPartitionId());
}
@Override
public String addMigrationListener(MigrationListener listener) {
EventService eventService = nodeEngine.getEventService();
EventRegistration registration = eventService.registerListener(SERVICE_NAME, SERVICE_NAME, listener);
return registration.getId();
}
@Override
public boolean removeMigrationListener(String registrationId) {
EventService eventService = nodeEngine.getEventService();
return eventService.deregisterListener(SERVICE_NAME, SERVICE_NAME, registrationId);
}
@Override
public void dispatchEvent(MigrationEvent migrationEvent, MigrationListener migrationListener) {
final MigrationStatus status = migrationEvent.getStatus();
switch (status) {
case STARTED:
migrationListener.migrationStarted(migrationEvent);
break;
case COMPLETED:
migrationListener.migrationCompleted(migrationEvent);
break;
case FAILED:
migrationListener.migrationFailed(migrationEvent);
break;
default:
throw new IllegalArgumentException("Not a known MigrationStatus: " + status);
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("PartitionManager[" + stateVersion + "] {\n");
sb.append("\n");
sb.append("migrationQ: ").append(migrationQueue.size());
sb.append("\n}");
return sb.toString();
}
private class SendClusterStateTask implements Runnable {
@Override
public void run() {
if (node.isMaster() && node.isActive()) {
if (!migrationQueue.isEmpty() && migrationActive.get()) {
logger.info("Remaining migration tasks in queue => " + migrationQueue.size());
}
publishPartitionRuntimeState();
}
}
}
private class SyncReplicaVersionTask implements Runnable {
@Override
public void run() {
if (node.isActive() && migrationActive.get()) {
final Address thisAddress = node.getThisAddress();
for (final InternalPartitionImpl partition : partitions) {
if (thisAddress.equals(partition.getOwnerOrNull())) {
for (int index = 1; index < InternalPartition.MAX_REPLICA_COUNT; index++) {
if (partition.getReplicaAddress(index) != null) {
SyncReplicaVersion op = new SyncReplicaVersion(index, null);
op.setService(InternalPartitionServiceImpl.this);
op.setNodeEngine(nodeEngine);
op.setResponseHandler(ResponseHandlerFactory
.createErrorLoggingResponseHandler(node.getLogger(SyncReplicaVersion.class)));
op.setPartitionId(partition.getPartitionId());
nodeEngine.getOperationService().executeOperation(op);
}
}
}
}
}
}
}
private class RepartitioningTask implements Runnable {
@Override
public void run() {
if (node.isMaster() && node.isActive()) {
lock.lock();
try {
if (!initialized) {
return;
}
if (!isMigrationAllowed()) {
return;
}
migrationQueue.clear();
PartitionStateGenerator psg = partitionStateGenerator;
Collection<MemberImpl> members = node.getClusterService().getMemberList();
Collection<MemberGroup> memberGroups = memberGroupFactory.createMemberGroups(members);
Address[][] newState = psg.reArrange(memberGroups, partitions);
if (!isMigrationAllowed()) {
return;
}
int migrationCount = 0;
int lostCount = 0;
lastRepartitionTime.set(Clock.currentTimeMillis());
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
Address[] replicas = newState[partitionId];
InternalPartitionImpl currentPartition = partitions[partitionId];
Address currentOwner = currentPartition.getOwnerOrNull();
Address newOwner = replicas[0];
if (currentOwner == null) {
// assign new owner for lost partition
lostCount++;
currentPartition.setPartitionInfo(replicas);
MigrationInfo migrationInfo = new MigrationInfo(partitionId, null, newOwner);
sendMigrationEvent(migrationInfo, MigrationStatus.STARTED);
sendMigrationEvent(migrationInfo, MigrationStatus.COMPLETED);
} else if (newOwner != null && !currentOwner.equals(newOwner)) {
migrationCount++;
MigrationInfo info = new MigrationInfo(partitionId, currentOwner, newOwner);
MigrateTask migrateTask = new MigrateTask(info, new BackupMigrationTask(partitionId, replicas));
boolean offered = migrationQueue.offer(migrateTask);
if(!offered){
logger.severe("Failed to offer: "+migrateTask);
}
} else {
currentPartition.setPartitionInfo(replicas);
}
}
syncPartitionRuntimeState(members);
if (lostCount > 0) {
logger.warning("Assigning new owners for " + lostCount + " LOST partitions!");
}
if (migrationCount > 0) {
logger.info("Re-partitioning cluster data... Migration queue size: " + migrationCount);
} else {
logger.info("Partition balance is ok, no need to re-partition cluster data... ");
}
} finally {
lock.unlock();
}
}
}
private boolean isMigrationAllowed() {
if (migrationActive.get()) {
return true;
}
migrationQueue.add(this);
return false;
}
}
private class BackupMigrationTask implements Runnable {
final int partitionId;
final Address[] replicas;
BackupMigrationTask(int partitionId, Address[] replicas) {
this.partitionId = partitionId;
this.replicas = replicas;
}
@Override
public void run() {
lock.lock();
try {
InternalPartitionImpl currentPartition = partitions[partitionId];
for (int index = 1; index < InternalPartition.MAX_REPLICA_COUNT; index++) {
currentPartition.setReplicaAddress(index, replicas[index]);
}
} finally {
lock.unlock();
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("BackupMigrationTask{");
sb.append("partitionId=").append(partitionId);
sb.append("replicas=").append(Arrays.toString(replicas));
sb.append('}');
return sb.toString();
}
}
private class MigrateTask implements Runnable {
final MigrationInfo migrationInfo;
final BackupMigrationTask backupTask;
MigrateTask(MigrationInfo migrationInfo, BackupMigrationTask backupTask) {
this.migrationInfo = migrationInfo;
this.backupTask = backupTask;
final MemberImpl masterMember = getMasterMember();
if (masterMember != null) {
migrationInfo.setMasterUuid(masterMember.getUuid());
migrationInfo.setMaster(masterMember.getAddress());
}
}
@Override
public void run() {
if (!node.isActive() || !node.isMaster()) {
return;
}
final MigrationRequestOperation migrationRequestOp = new MigrationRequestOperation(migrationInfo);
try {
MigrationInfo info = migrationInfo;
InternalPartitionImpl partition = partitions[info.getPartitionId()];
Address owner = partition.getOwnerOrNull();
if(owner == null){
logger.severe("ERROR: partition owner is not set! -> "
+ partition + " -VS- " + info);
return;
}
if (!owner.equals(info.getSource())) {
logger.severe("ERROR: partition owner is not the source of migration! -> "
+ partition + " -VS- " + info +" found owner:"+owner);
return;
}
sendMigrationEvent(migrationInfo, MigrationStatus.STARTED);
Boolean result = Boolean.FALSE;
MemberImpl fromMember = getMember(migrationInfo.getSource());
if (logger.isFinestEnabled()) {
logger.finest("Started Migration : " + migrationInfo);
}
systemLogService.logPartition("Started Migration : " + migrationInfo);
if (fromMember == null) {
// Partition is lost! Assign new owner and exit.
logger.warning("Partition is lost! Assign new owner and exit...");
result = Boolean.TRUE;
} else {
Future future = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME,
migrationRequestOp, migrationInfo.getSource()).setTryPauseMillis(1000).invoke();
try {
Object response = future.get(partitionMigrationTimeout, TimeUnit.SECONDS);
result = (Boolean) nodeEngine.toObject(response);
} catch (Throwable e) {
final Level level = node.isActive() && migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Failed migrating from " + fromMember, e);
}
}
if (Boolean.TRUE.equals(result)) {
String message = "Finished Migration: " + migrationInfo;
if (logger.isFinestEnabled()) {
logger.finest(message);
}
systemLogService.logPartition(message);
processMigrationResult();
} else {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Migration failed: " + migrationInfo);
migrationTaskFailed();
}
} catch (Throwable t) {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Error [" + t.getClass() + ": " + t.getMessage() + "] while executing " + migrationRequestOp);
logger.finest(t);
migrationTaskFailed();
}
}
private void migrationTaskFailed() {
systemLogService.logPartition("Migration failed: " + migrationInfo);
lock.lock();
try {
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.FAILED);
// migration failed, clear current pending migration tasks and re-execute RepartitioningTask
migrationQueue.clear();
migrationQueue.add(new RepartitioningTask());
}
private void processMigrationResult() {
lock.lock();
try {
final int partitionId = migrationInfo.getPartitionId();
Address newOwner = migrationInfo.getDestination();
InternalPartitionImpl partition = partitions[partitionId];
partition.setOwner(newOwner);
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
if (backupTask != null) {
backupTask.run();
}
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.COMPLETED);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MigrateTask{");
sb.append("migrationInfo=").append(migrationInfo);
sb.append('}');
return sb.toString();
}
}
private class MigrationThread extends Thread implements Runnable {
private final long sleepTime = Math.max(250L, partitionMigrationInterval);
private volatile boolean migrating;
MigrationThread(Node node) {
super(node.threadGroup, node.getThreadNamePrefix("migration"));
}
@Override
public void run() {
try {
while (!isInterrupted()) {
doRun();
}
} catch (InterruptedException e) {
if (logger.isFinestEnabled()) {
logger.finest("MigrationThread is interrupted: " + e.getMessage());
}
} finally {
migrationQueue.clear();
}
}
private void doRun() throws InterruptedException {
for (; ; ) {
if (!migrationActive.get()) {
break;
}
Runnable r = migrationQueue.poll(1, TimeUnit.SECONDS);
if (r == null) {
break;
}
processTask(r);
if (partitionMigrationInterval > 0) {
Thread.sleep(partitionMigrationInterval);
}
}
boolean hasNoTasks = migrationQueue.isEmpty();
if (hasNoTasks) {
if (migrating) {
migrating = false;
logger.info("All migration tasks has been completed, queues are empty.");
}
evictCompletedMigrations();
Thread.sleep(sleepTime);
} else if (!migrationActive.get()) {
Thread.sleep(sleepTime);
}
}
boolean processTask(Runnable r) {
if (r == null || isInterrupted()) {
return false;
}
try {
migrating = (r instanceof MigrateTask);
r.run();
} catch (Throwable t) {
logger.warning(t);
}
return true;
}
void stopNow() {
migrationQueue.clear();
interrupt();
}
boolean isMigrating() {
return migrating;
}
}
private static class LocalPartitionListener implements PartitionListener {
final Address thisAddress;
private InternalPartitionServiceImpl partitionService;
private LocalPartitionListener(InternalPartitionServiceImpl partitionService, Address thisAddress) {
this.thisAddress = thisAddress;
this.partitionService = partitionService;
}
@Override
public void replicaChanged(PartitionReplicaChangeEvent event) {
int replicaIndex = event.getReplicaIndex();
Address newAddress = event.getNewAddress();
if (replicaIndex > 0) {
// backup replica owner changed!
int partitionId = event.getPartitionId();
if (thisAddress.equals(event.getOldAddress())) {
InternalPartitionImpl partition = partitionService.partitions[partitionId];
if (!partition.isOwnerOrBackup(thisAddress)) {
partitionService.clearPartitionReplica(partitionId, replicaIndex);
}
} else if (thisAddress.equals(newAddress)) {
partitionService.clearPartitionReplica(partitionId, replicaIndex);
partitionService.forcePartitionReplicaSync(partitionId, replicaIndex);
}
}
Node node = partitionService.node;
if (replicaIndex == 0 && newAddress == null && node.isActive() && node.joined()) {
logOwnerOfPartitionIsRemoved(event);
}
if (partitionService.node.isMaster()) {
partitionService.stateVersion.incrementAndGet();
}
}
private void logOwnerOfPartitionIsRemoved(PartitionReplicaChangeEvent event) {
String warning = "Owner of partition is being removed! "
+ "Possible data loss for partition[" + event.getPartitionId() + "]. " + event;
partitionService.logger.warning(warning);
partitionService.systemLogService.logWarningPartition(warning);
}
}
private static class ReplicaSyncEntryProcessor implements ScheduledEntryProcessor<Integer, ReplicaSyncInfo> {
private InternalPartitionServiceImpl partitionService;
public ReplicaSyncEntryProcessor(InternalPartitionServiceImpl partitionService) {
this.partitionService = partitionService;
}
@Override
public void process(EntryTaskScheduler<Integer, ReplicaSyncInfo> scheduler,
Collection<ScheduledEntry<Integer, ReplicaSyncInfo>> entries) {
for (ScheduledEntry<Integer, ReplicaSyncInfo> entry : entries) {
ReplicaSyncInfo syncInfo = entry.getValue();
if (partitionService.replicaSyncRequests.compareAndSet(entry.getKey(), syncInfo, null)) {
logRendingSyncReplicaRequest(syncInfo);
partitionService.triggerPartitionReplicaSync(syncInfo.partitionId, syncInfo.replicaIndex);
}
}
}
private void logRendingSyncReplicaRequest(ReplicaSyncInfo syncInfo) {
ILogger logger = partitionService.logger;
if (logger.isFinestEnabled()) {
logger.finest("Re-sending sync replica request for partition: " + syncInfo.partitionId + ", replica: "
+ syncInfo.replicaIndex);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java
|
81 |
public static class Tab {
public static class Name {
public static final String File_Details = "StaticAssetImpl_FileDetails_Tab";
public static final String Advanced = "StaticAssetImpl_Advanced_Tab";
}
public static class Order {
public static final int File_Details = 2000;
public static final int Advanced = 3000;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetImpl.java
|
2,219 |
public class BoostScoreFunction extends ScoreFunction {
private final float boost;
public BoostScoreFunction(float boost) {
super(CombineFunction.MULT);
this.boost = boost;
}
public float getBoost() {
return boost;
}
@Override
public void setNextReader(AtomicReaderContext context) {
// nothing to do here...
}
@Override
public double score(int docId, float subQueryScore) {
return boost;
}
@Override
public Explanation explainScore(int docId, Explanation subQueryExpl) {
Explanation exp = new Explanation(boost, "static boost factor");
exp.addDetail(new Explanation(boost, "boostFactor"));
return exp;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
BoostScoreFunction that = (BoostScoreFunction) o;
if (Float.compare(that.boost, boost) != 0)
return false;
return true;
}
@Override
public int hashCode() {
return (boost != +0.0f ? Float.floatToIntBits(boost) : 0);
}
@Override
public String toString() {
return "boost[" + boost + "]";
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_function_BoostScoreFunction.java
|
1,147 |
public class UpdateOrderMultishipOptionActivity extends BaseActivity<CartOperationContext> {
@Resource(name = "blOrderMultishipOptionService")
protected OrderMultishipOptionService orderMultishipOptionService;
@Resource(name = "blOrderItemService")
protected OrderItemService orderItemService;
@Override
public CartOperationContext execute(CartOperationContext context) throws Exception {
CartOperationRequest request = context.getSeedData();
Long orderItemId = request.getItemRequest().getOrderItemId();
Integer orderItemQuantityDelta = request.getOrderItemQuantityDelta();
if (orderItemQuantityDelta < 0) {
int numToDelete = -1 * orderItemQuantityDelta;
//find the qty in the default fg
OrderItem orderItem = orderItemService.readOrderItemById(orderItemId);
int qty = 0;
if (!CollectionUtils.isEmpty(orderItem.getOrder().getFulfillmentGroups())) {
FulfillmentGroup fg = orderItem.getOrder().getFulfillmentGroups().get(0);
if (fg.getAddress() == null && fg.getFulfillmentOption() == null) {
for (FulfillmentGroupItem fgItem : fg.getFulfillmentGroupItems()) {
if (fgItem.getOrderItem().getId() == orderItemId) {
qty += fgItem.getQuantity();
}
}
}
}
if (numToDelete >= qty) {
orderMultishipOptionService.deleteOrderItemOrderMultishipOptions(orderItemId, numToDelete - qty);
}
}
return context;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_workflow_update_UpdateOrderMultishipOptionActivity.java
|
387 |
public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
public ClusterUpdateSettingsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterUpdateSettingsRequest());
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*/
public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings settings) {
request.transientSettings(settings);
return this;
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*/
public ClusterUpdateSettingsRequestBuilder setTransientSettings(Settings.Builder settings) {
request.transientSettings(settings);
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
*/
public ClusterUpdateSettingsRequestBuilder setTransientSettings(String settings) {
request.transientSettings(settings);
return this;
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*/
public ClusterUpdateSettingsRequestBuilder setTransientSettings(Map settings) {
request.transientSettings(settings);
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings settings) {
request.persistentSettings(settings);
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Settings.Builder settings) {
request.persistentSettings(settings);
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(String settings) {
request.persistentSettings(settings);
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(Map settings) {
request.persistentSettings(settings);
return this;
}
@Override
protected void doExecute(ActionListener<ClusterUpdateSettingsResponse> listener) {
((ClusterAdminClient) client).updateSettings(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_settings_ClusterUpdateSettingsRequestBuilder.java
|
711 |
public class CountAction extends Action<CountRequest, CountResponse, CountRequestBuilder> {
public static final CountAction INSTANCE = new CountAction();
public static final String NAME = "count";
private CountAction() {
super(NAME);
}
@Override
public CountResponse newResponse() {
return new CountResponse();
}
@Override
public CountRequestBuilder newRequestBuilder(Client client) {
return new CountRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_count_CountAction.java
|
1 |
public interface Abbreviations {
/**
* Gets the original string for which the available abbreviations have
* been calculated.
*
* @return the original string
*/
String getValue();
/**
* Gets the phrases into which the original string has been divided as
* possible abbreviations were found. The phrases, in order, comprise
* all words of the original string.
*
* @return a list of phrases that can be abbreviated
*/
List<String> getPhrases();
/**
* Gets the available abbreviations for a phrase. The list is always
* nonempty, since the first element is the phrase unchanged.
*
* @param phrase the phrase to abbreviate, which may be a single word
* @return a list of possible abbreviations for the phrase
*/
List<String> getAbbreviations(String phrase);
}
| 0true
|
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_Abbreviations.java
|
1,190 |
public class OQueryOperatorMinorEquals extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorMinorEquals() {
super("<=", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
final Object right = OType.convert(iRight, iLeft.getClass());
if (right == null)
return false;
return ((Comparable<Object>) iLeft).compareTo(right) <= 0;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
if (iRight == null || iLeft == null)
return OIndexReuseType.NO_INDEX;
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators() || !internalIndex.hasRangeQuerySupport())
return null;
final Object result;
if (indexDefinition.getParamCount() == 1) {
final Object key;
if (indexDefinition instanceof OIndexDefinitionMultiValue)
key = ((OIndexDefinitionMultiValue) indexDefinition).createSingleValue(keyParams.get(0));
else
key = indexDefinition.createValue(keyParams);
if (key == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(null, false, key, true, fetchLimit);
else if (resultListener != null) {
index.getValuesMinor(key, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesMinor(key, true);
} else {
// if we have situation like "field1 = 1 AND field2 <= 2"
// then we fetch collection which left included boundary is the smallest composite key in the
// index that contains key with value field1=1 and which right not included boundary
// is the biggest composite key in the index that contains key with value field1=1 and field2=2.
final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition;
final Object keyOne = compositeIndexDefinition.createSingleValue(keyParams.subList(0, keyParams.size() - 1));
if (keyOne == null)
return null;
final Object keyTwo = compositeIndexDefinition.createSingleValue(keyParams);
if (keyTwo == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(keyOne, true, keyTwo, true, fetchLimit);
else if (resultListener != null) {
index.getValuesBetween(keyOne, true, keyTwo, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesBetween(keyOne, true, keyTwo, true);
}
updateProfiler(iContext, index, keyParams, indexDefinition);
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
return null;
}
@Override
public ORID getEndRidRange(final Object iLeft, final Object iRight) {
if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot()))
if (iRight instanceof ORID)
return (ORID) iRight;
else {
if (iRight instanceof OSQLFilterItemParameter && ((OSQLFilterItemParameter) iRight).getValue(null, null) instanceof ORID)
return (ORID) ((OSQLFilterItemParameter) iRight).getValue(null, null);
}
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorMinorEquals.java
|
1,439 |
public class LocalRegionCache implements RegionCache {
protected final ITopic<Object> topic;
protected final MessageListener<Object> messageListener;
protected final ConcurrentMap<Object, Value> cache;
protected final Comparator versionComparator;
protected MapConfig config;
public LocalRegionCache(final String name, final HazelcastInstance hazelcastInstance,
final CacheDataDescription metadata) {
try {
config = hazelcastInstance != null ? hazelcastInstance.getConfig().findMapConfig(name) : null;
} catch (UnsupportedOperationException ignored) {
}
versionComparator = metadata != null && metadata.isVersioned() ? metadata.getVersionComparator() : null;
cache = new ConcurrentHashMap<Object, Value>();
messageListener = createMessageListener();
if (hazelcastInstance != null) {
topic = hazelcastInstance.getTopic(name);
topic.addMessageListener(messageListener);
} else {
topic = null;
}
}
public Object get(final Object key) {
final Value value = cache.get(key);
return value != null ? value.getValue() : null;
}
public boolean put(final Object key, final Object value, final Object currentVersion) {
final Value newValue = new Value(currentVersion, value, null, Clock.currentTimeMillis());
cache.put(key, newValue);
return true;
}
public boolean update(final Object key, final Object value, final Object currentVersion,
final Object previousVersion, final SoftLock lock) {
if (lock == LOCK_FAILURE) {
return false;
}
final Value currentValue = cache.get(key);
if (lock == LOCK_SUCCESS) {
if (currentValue != null && currentVersion != null
&& versionComparator.compare(currentVersion, currentValue.getVersion()) < 0) {
return false;
}
}
if (topic != null) {
topic.publish(createMessage(key, value, currentVersion));
}
cache.put(key, new Value(currentVersion, value, lock, Clock.currentTimeMillis()));
return true;
}
protected Object createMessage(final Object key, Object value, final Object currentVersion) {
return new Invalidation(key, currentVersion);
}
protected MessageListener<Object> createMessageListener() {
return new MessageListener<Object>() {
public void onMessage(final Message<Object> message) {
final Invalidation invalidation = (Invalidation) message.getMessageObject();
if (versionComparator != null) {
final Value value = cache.get(invalidation.getKey());
if (value != null) {
Object currentVersion = value.getVersion();
Object newVersion = invalidation.getVersion();
if (versionComparator.compare(newVersion, currentVersion) > 0) {
cache.remove(invalidation.getKey(), value);
}
}
} else {
cache.remove(invalidation.getKey());
}
}
};
}
public boolean remove(final Object key) {
final Value value = cache.remove(key);
if (value != null) {
if (topic != null) {
topic.publish(createMessage(key, null, value.getVersion()));
}
return true;
}
return false;
}
public SoftLock tryLock(final Object key, final Object version) {
final Value value = cache.get(key);
if (value == null) {
if (cache.putIfAbsent(key, new Value(version, null, LOCK_SUCCESS, Clock.currentTimeMillis())) == null) {
return LOCK_SUCCESS;
} else {
return LOCK_FAILURE;
}
} else {
if (version == null || versionComparator.compare(version, value.getVersion()) >= 0) {
if (cache.replace(key, value, value.createLockedValue(LOCK_SUCCESS))) {
return LOCK_SUCCESS;
} else {
return LOCK_FAILURE;
}
} else {
return LOCK_FAILURE;
}
}
}
public void unlock(final Object key, SoftLock lock) {
final Value value = cache.get(key);
if (value != null) {
final SoftLock currentLock = value.getLock();
if (currentLock == lock) {
cache.replace(key, value, value.createUnlockedValue());
}
}
}
public boolean contains(final Object key) {
return cache.containsKey(key);
}
public void clear() {
cache.clear();
}
public long size() {
return cache.size();
}
public long getSizeInMemory() {
return 0;
}
public Map asMap() {
return cache;
}
void cleanup() {
final int maxSize;
final long timeToLive;
if (config != null) {
maxSize = config.getMaxSizeConfig().getSize();
timeToLive = config.getTimeToLiveSeconds() * 1000L;
} else {
maxSize = 100000;
timeToLive = CacheEnvironment.getDefaultCacheTimeoutInMillis();
}
if ((maxSize > 0 && maxSize != Integer.MAX_VALUE) || timeToLive > 0) {
final Iterator<Entry<Object, Value>> iter = cache.entrySet().iterator();
SortedSet<EvictionEntry> entries = null;
final long now = Clock.currentTimeMillis();
while (iter.hasNext()) {
final Entry<Object, Value> e = iter.next();
final Object k = e.getKey();
final Value v = e.getValue();
if (v.getLock() == LOCK_SUCCESS) {
continue;
}
if (v.getCreationTime() + timeToLive < now) {
iter.remove();
} else if (maxSize > 0 && maxSize != Integer.MAX_VALUE) {
if (entries == null) {
entries = new TreeSet<EvictionEntry>();
}
entries.add(new EvictionEntry(k, v));
}
}
final int diff = cache.size() - maxSize;
final int k = diff >= 0 ? (diff + maxSize * 20 / 100) : 0;
if (k > 0 && entries != null) {
int i = 0;
for (EvictionEntry entry : entries) {
if (cache.remove(entry.key, entry.value)) {
if (++i == k) {
break;
}
}
}
}
}
}
static private class EvictionEntry implements Comparable<EvictionEntry> {
final Object key;
final Value value;
private EvictionEntry(final Object key, final Value value) {
this.key = key;
this.value = value;
}
public int compareTo(final EvictionEntry o) {
final long thisVal = this.value.getCreationTime();
final long anotherVal = o.value.getCreationTime();
return (thisVal < anotherVal ? -1 : (thisVal == anotherVal ? 0 : 1));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EvictionEntry that = (EvictionEntry) o;
if (key != null ? !key.equals(that.key) : that.key != null) return false;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
}
@Override
public int hashCode() {
return key != null ? key.hashCode() : 0;
}
}
private static final SoftLock LOCK_SUCCESS = new SoftLock() {
@Override
public String toString() {
return "Lock::Success";
}
};
private static final SoftLock LOCK_FAILURE = new SoftLock() {
@Override
public String toString() {
return "Lock::Failure";
}
};
}
| 1no label
|
hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_LocalRegionCache.java
|
1,688 |
public abstract class OBinaryNetworkProtocolAbstract extends ONetworkProtocol {
protected OChannelBinaryServer channel;
protected int requestType;
protected int clientTxId;
protected final Level logClientExceptions;
protected final boolean logClientFullStackTrace;
public OBinaryNetworkProtocolAbstract(final String iThreadName) {
super(Orient.instance().getThreadGroup(), iThreadName);
logClientExceptions = Level.parse(OGlobalConfiguration.SERVER_LOG_DUMP_CLIENT_EXCEPTION_LEVEL.getValueAsString());
logClientFullStackTrace = OGlobalConfiguration.SERVER_LOG_DUMP_CLIENT_EXCEPTION_FULLSTACKTRACE.getValueAsBoolean();
}
/**
* Executes the request.
*
* @return true if the request has been recognized, otherwise false
* @throws IOException
*/
protected abstract boolean executeRequest() throws IOException;
/**
* Executed before the request.
*
* @throws IOException
*/
protected void onBeforeRequest() throws IOException {
}
/**
* Executed after the request, also in case of error.
*
* @throws IOException
*/
protected void onAfterRequest() throws IOException {
}
@Override
public void config(final OServer iServer, final Socket iSocket, final OContextConfiguration iConfig,
final List<?> iStatelessCommands, List<?> iStatefulCommands) throws IOException {
server = iServer;
channel = new OChannelBinaryServer(iSocket, iConfig);
}
@Override
protected void execute() throws Exception {
requestType = -1;
clientTxId = 0;
long timer = 0;
try {
requestType = channel.readByte();
clientTxId = channel.readInt();
timer = Orient.instance().getProfiler().startChrono();
onBeforeRequest();
try {
if (!executeRequest()) {
OLogManager.instance().error(this, "Request not supported. Code: " + requestType);
channel.clearInput();
sendError(clientTxId, new ONetworkProtocolException("Request not supported. Code: " + requestType));
}
} finally {
onAfterRequest();
}
} catch (IOException e) {
handleConnectionError(channel, e);
sendShutdown();
} catch (OException e) {
sendError(clientTxId, e);
} catch (RuntimeException e) {
sendError(clientTxId, e);
} catch (Throwable t) {
sendError(clientTxId, t);
} finally {
Orient.instance().getProfiler()
.stopChrono("server.network.requests", "Total received requests", timer, "server.network.requests");
OSerializationThreadLocal.INSTANCE.get().clear();
}
}
@Override
public void shutdown() {
channel.close();
}
@Override
public OChannel getChannel() {
return channel;
}
protected void sendOk(final int iClientTxId) throws IOException {
channel.writeByte(OChannelBinaryProtocol.RESPONSE_STATUS_OK);
channel.writeInt(iClientTxId);
}
protected void sendError(final int iClientTxId, final Throwable t) throws IOException {
channel.acquireWriteLock();
try {
channel.writeByte(OChannelBinaryProtocol.RESPONSE_STATUS_ERROR);
channel.writeInt(iClientTxId);
Throwable current;
if (t instanceof OLockException && t.getCause() instanceof ODatabaseException)
// BYPASS THE DB POOL EXCEPTION TO PROPAGATE THE RIGHT SECURITY ONE
current = t.getCause();
else
current = t;
while (current != null) {
// MORE DETAILS ARE COMING AS EXCEPTION
channel.writeByte((byte) 1);
channel.writeString(current.getClass().getName());
channel.writeString(current != null ? current.getMessage() : null);
current = current.getCause();
}
channel.writeByte((byte) 0);
channel.flush();
if (OLogManager.instance().isLevelEnabled(logClientExceptions)) {
if (logClientFullStackTrace)
OLogManager.instance().log(this, logClientExceptions, "Sent run-time exception to the client %s: %s", t,
channel.socket.getRemoteSocketAddress(), t.toString());
else
OLogManager.instance().log(this, logClientExceptions, "Sent run-time exception to the client %s: %s", null,
channel.socket.getRemoteSocketAddress(), t.toString());
}
} catch (Exception e) {
if (e instanceof SocketException)
shutdown();
} finally {
channel.releaseWriteLock();
}
}
/**
* Write a OIdentifiable instance using this format:<br/>
* - 2 bytes: class id [-2=no record, -3=rid, -1=no class id, > -1 = valid] <br/>
* - 1 byte: record type [d,b,f] <br/>
* - 2 bytes: cluster id <br/>
* - 8 bytes: position in cluster <br/>
* - 4 bytes: record version <br/>
* - x bytes: record content <br/>
*
* @param o
* @throws IOException
*/
public void writeIdentifiable(final OIdentifiable o) throws IOException {
if (o == null)
channel.writeShort(OChannelBinaryProtocol.RECORD_NULL);
else if (o instanceof ORecordId) {
channel.writeShort(OChannelBinaryProtocol.RECORD_RID);
channel.writeRID((ORID) o);
} else {
writeRecord((ORecordInternal<?>) o.getRecord());
}
}
private void writeRecord(final ORecordInternal<?> iRecord) throws IOException {
channel.writeShort((short) 0);
channel.writeByte(iRecord.getRecordType());
channel.writeRID(iRecord.getIdentity());
channel.writeVersion(iRecord.getRecordVersion());
try {
final byte[] stream = iRecord.toStream();
// TRIM TAILING SPACES (DUE TO OVERSIZE)
int realLength = stream.length;
for (int i = stream.length - 1; i > -1; --i) {
if (stream[i] == 32)
--realLength;
else
break;
}
channel.writeBytes(stream, realLength);
} catch (Exception e) {
channel.writeBytes(null);
OLogManager.instance().error(this, "Error on unmarshalling record " + iRecord.getIdentity().toString() + " (" + e + ")",
OSerializationException.class);
}
}
protected void checkStorageExistence(final String iDatabaseName) {
for (OStorage stg : Orient.instance().getStorages()) {
if (stg.getName().equalsIgnoreCase(iDatabaseName) && stg.exists())
throw new ODatabaseException("Database named '" + iDatabaseName + "' already exists: " + stg);
}
}
protected ODatabaseDocumentTx createDatabase(final ODatabaseDocumentTx iDatabase, String dbUser, final String dbPasswd) {
if (iDatabase.exists())
throw new ODatabaseException("Database '" + iDatabase.getURL() + "' already exists");
iDatabase.create();
if (dbUser != null) {
OUser oUser = iDatabase.getMetadata().getSecurity().getUser(dbUser);
if (oUser == null) {
iDatabase.getMetadata().getSecurity().createUser(dbUser, dbPasswd, new String[] { ORole.ADMIN });
} else {
oUser.setPassword(dbPasswd);
oUser.save();
}
}
OLogManager.instance().info(this, "Created database '%s' of type '%s'", iDatabase.getName(),
iDatabase.getStorage() instanceof OStorageLocalAbstract ? iDatabase.getStorage().getType() : "memory");
// if (iDatabase.getStorage() instanceof OStorageLocal)
// // CLOSE IT BECAUSE IT WILL BE OPEN AT FIRST USE
// iDatabase.close();
return iDatabase;
}
protected ODatabaseDocumentTx getDatabaseInstance(final String dbName, final String dbType, final String storageType) {
String path;
final OStorage stg = Orient.instance().getStorage(dbName);
if (stg != null)
path = stg.getURL();
else if (storageType.equals(OEngineLocal.NAME) || storageType.equals(OEngineLocalPaginated.NAME)) {
// if this storage was configured return always path from config file, otherwise return default path
path = server.getConfiguration().getStoragePath(dbName);
if (path == null)
path = storageType + ":" + server.getDatabaseDirectory() + "/" + dbName;
} else if (storageType.equals(OEngineMemory.NAME)) {
path = storageType + ":" + dbName;
} else
throw new IllegalArgumentException("Cannot create database: storage mode '" + storageType + "' is not supported.");
return Orient.instance().getDatabaseFactory().createDatabase(dbType, path);
}
protected int deleteRecord(final ODatabaseRecord iDatabase, final ORID rid, final ORecordVersion version) {
try {
iDatabase.delete(rid, version);
return 1;
} catch (Exception e) {
return 0;
}
}
protected int cleanOutRecord(final ODatabaseRecord iDatabase, final ORID rid, final ORecordVersion version) {
iDatabase.delete(rid, version);
return 1;
}
protected ORecordInternal<?> createRecord(final ODatabaseRecord iDatabase, final ORecordId rid, final byte[] buffer,
final byte recordType, final int dataSegmentId) {
final ORecordInternal<?> record = Orient.instance().getRecordFactoryManager().newInstance(recordType);
record.fill(rid, OVersionFactory.instance().createVersion(), buffer, true);
if (dataSegmentId > 0)
record.setDataSegmentName(iDatabase.getDataSegmentNameById(dataSegmentId));
iDatabase.save(record);
return record;
}
protected ORecordVersion updateRecord(final ODatabaseRecord iDatabase, final ORecordId rid, final byte[] buffer,
final ORecordVersion version, final byte recordType) {
final ORecordInternal<?> newRecord = Orient.instance().getRecordFactoryManager().newInstance(recordType);
newRecord.fill(rid, version, buffer, true);
// if (((OSchemaProxy) iDatabase.getMetadata().getSchema()).getIdentity().equals(rid))
// // || ((OIndexManagerImpl) connection.database.getMetadata().getIndexManager()).getDocument().getIdentity().equals(rid)) {
// throw new OSecurityAccessException("Cannot update internal record " + rid);
final ORecordInternal<?> currentRecord;
if (newRecord instanceof ODocument) {
currentRecord = iDatabase.load(rid);
if (currentRecord == null)
throw new ORecordNotFoundException(rid.toString());
((ODocument) currentRecord).merge((ODocument) newRecord, false, false);
} else
currentRecord = newRecord;
currentRecord.getRecordVersion().copyFrom(version);
iDatabase.save(currentRecord);
if (currentRecord.getIdentity().toString().equals(iDatabase.getStorage().getConfiguration().indexMgrRecordId)
&& !iDatabase.getStatus().equals(STATUS.IMPORTING)) {
// FORCE INDEX MANAGER UPDATE. THIS HAPPENS FOR DIRECT CHANGES FROM REMOTE LIKE IN GRAPH
iDatabase.getMetadata().getIndexManager().reload();
}
return currentRecord.getRecordVersion();
}
protected void handleConnectionError(final OChannelBinaryServer channel, final Throwable e) {
try {
channel.flush();
} catch (IOException e1) {
}
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_network_protocol_binary_OBinaryNetworkProtocolAbstract.java
|
63 |
{
@Override
public TransactionState create( Transaction tx )
{
return new NoTransactionState()
{
@Override
@SuppressWarnings("deprecation")
public TxIdGenerator getTxIdGenerator()
{
return TxIdGenerator.DEFAULT;
}
};
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestXaFramework.java
|
320 |
public class NodesHotThreadsResponse extends NodesOperationResponse<NodeHotThreads> {
NodesHotThreadsResponse() {
}
public NodesHotThreadsResponse(ClusterName clusterName, NodeHotThreads[] nodes) {
super(clusterName, nodes);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeHotThreads[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeHotThreads.readNodeHotThreads(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeHotThreads node : nodes) {
node.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodesHotThreadsResponse.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.