name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AsyncAdmin_mergeSwitch_rdh | /**
* Turn the Merge switch on or off.
*
* @param enabled
* enabled or not
* @return Previous switch value wrapped by a {@link CompletableFuture}
*/
default CompletableFuture<Boolean> mergeSwitch(boolean enabled) {
return m4(enabled, false);} | 3.26 |
hbase_TakeSnapshotHandler_process_rdh | /**
* Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)} call
* should get implemented for each snapshot flavor.
*/
@Override
@SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public void process() {String msg = (((((("Running " + snapshot.getType()) + " table snapshot ") + snapshot.getName()) + " ") + eventType) + " on table ") + snapshotTable;
LOG.info(msg);
MasterLock tableLockToRelease = this.tableLock;
status.setStatus(msg);
try {
if (downgradeToSharedTableLock()) {
// release the exclusive lock and hold the shared lock instead
tableLockToRelease = master.getLockManager().createMasterLock(snapshotTable, LockType.SHARED, (this.getClass().getName() + ": take snapshot ") + snapshot.getName());
tableLock.release();
tableLockToRelease.acquire();
}
// If regions move after this meta scan, the region specific snapshot should fail, triggering
// an external exception that gets captured here.
// write down the snapshot info in the working directory
SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
snapshotManifest.addTableDescriptor(this.htd);
f0.rethrowException();
List<Pair<RegionInfo, ServerName>> regionsAndLocations = master.getAssignmentManager().getTableRegionsAndLocations(snapshotTable, false);// run the snapshot
snapshotRegions(regionsAndLocations);
f0.rethrowException();
// flush the in-memory state, and write the single manifest
status.setStatus("Consolidate snapshot: " + snapshot.getName());
snapshotManifest.consolidate();
// verify the snapshot is valid
status.setStatus("Verifying snapshot: " + snapshot.getName());
verifier.verifySnapshot(workingDir, true);
// complete the snapshot, atomically moving from tmp to .snapshot dir.
SnapshotDescriptionUtils.completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs, this.conf);
finished = true;
msg = ((("Snapshot " + snapshot.getName()) + " of table ") + snapshotTable) + " completed";
status.markComplete(msg);
LOG.info(msg);
metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postCompletedSnapshotAction(ProtobufUtil.createSnapshotDesc(snapshot), this.htd);
}
} catch (Exception e) {
// FindBugs: REC_CATCH_EXCEPTION
status.abort((((("Failed to complete snapshot " + snapshot.getName()) + " on table ") + snapshotTable) + " because ") + e.getMessage());
String reason =
(("Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)) + " due to exception:") + e.getMessage();
LOG.error(reason, e);
ForeignException ee = new ForeignException(reason, e);
f0.receive(ee);
// need to mark this completed to close off and allow cleanup to happen.
cancel(reason);
} finally
{
LOG.debug("Launching cleanup of working dir:" + workingDir);
try {
// if the working dir is still present, the snapshot has failed. it is present we delete
// it.
if (!workingDirFs.delete(workingDir, true)) {
LOG.error("Couldn't delete snapshot working directory:" + workingDir);
}
} catch (IOException e) {
LOG.error("Couldn't delete snapshot working directory:" + workingDir);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Table snapshot journal : \n" + status.prettyPrintJournal());
}tableLockToRelease.release();
}
} | 3.26 |
hbase_TakeSnapshotHandler_snapshotDisabledRegion_rdh | /**
* Take a snapshot of the specified disabled region
*/
protected void snapshotDisabledRegion(final RegionInfo regionInfo) throws IOException {
snapshotManifest.addRegion(CommonFSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
f0.rethrowException();
status.setStatus((("Completed referencing HFiles for offline region " + regionInfo.toString()) + " of table: ") + snapshotTable);
} | 3.26 |
hbase_WALKey_toStringMap_rdh | /**
* Produces a string map for this key. Useful for programmatic use and manipulation of the data
* stored in an WALKeyImpl, for example, printing as JSON.
*
* @return a Map containing data from this key
*/
default Map<String, Object> toStringMap() {
Map<String, Object> stringMap =
new HashMap<>();
stringMap.put("table", getTableName());
stringMap.put("region", Bytes.toStringBinary(m0()));
stringMap.put("sequence", getSequenceId());
Map<String, byte[]> extendedAttributes = getExtendedAttributes();
if (extendedAttributes != null) {
for (Map.Entry<String, byte[]> entry : extendedAttributes.entrySet()) {
stringMap.put(entry.getKey(), Bytes.toStringBinary(entry.getValue()));
}
}
return stringMap;
} | 3.26 |
hbase_WALKey_getNonceGroup_rdh | /**
* Returns The nonce group
*/
default long getNonceGroup() {
return HConstants.NO_NONCE;
} | 3.26 |
hbase_WALKey_getExtendedAttribute_rdh | /**
* Return a named String value injected into the WALKey during processing, such as by a
* coprocessor
*
* @param attributeKey
* The key of a key / value pair
*/
default byte[] getExtendedAttribute(String attributeKey) {
return null;
} | 3.26 |
hbase_WALKey_getNonce_rdh | /**
* Returns The nonce
*/
default long getNonce() {
return HConstants.NO_NONCE;
} | 3.26 |
hbase_WALKey_getExtendedAttributes_rdh | /**
* Returns a map of all extended attributes injected into this WAL key.
*/
default Map<String, byte[]> getExtendedAttributes() {
return new HashMap<>();
} | 3.26 |
hbase_ServerCall_done_rdh | /**
* Call is done. Execution happened and we returned results to client. It is now safe to cleanup.
*/
@SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Presume the lock on processing request held by caller is protection enough")
@Override
public void done() {
if (this.cellBlockStream != null) {
// This will return back the BBs which we got from pool.
this.cellBlockStream.releaseResources();
this.cellBlockStream = null;
}
// If the call was run successfuly, we might have already returned the BB
// back to pool. No worries..Then inputCellBlock will be null
cleanup();
span.end();
} | 3.26 |
hbase_MapReduceBackupMergeJob_copyFile_rdh | /**
* Copy file in DFS from p to newPath
*
* @param fs
* file system
* @param p
* old path
* @param newPath
* new path
* @throws IOException
* exception
*/
protected void copyFile(FileSystem fs, Path p, Path newPath) throws IOException {
try (InputStream in = fs.open(p);OutputStream out = fs.create(newPath, true)) {
IOUtils.copy(in, out);
}
boolean exists = fs.exists(newPath);
if (!exists) {
throw new IOException("Failed to copy meta file to: " + newPath);
}
} | 3.26 |
hbase_MapReduceBackupMergeJob_copyMetaData_rdh | /**
* Copy meta data to of a backup session
*
* @param fs
* file system
* @param tmpBackupDir
* temp backup directory, where meta is locaed
* @param backupDirPath
* new path for backup
* @throws IOException
* exception
*/
protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath) throws IOException {
RemoteIterator<LocatedFileStatus> it = fs.listFiles(tmpBackupDir, true);
List<Path> toKeep = new ArrayList<Path>();
while (it.hasNext()) {
Path p = it.next().getPath();
if
(fs.isDirectory(p)) {
continue;
}
// Keep meta
String fileName = p.toString();
if ((fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0) || (fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0)) {toKeep.add(p);
}
}
// Copy meta to destination
for (Path p : toKeep) {
Path newPath = convertToDest(p, backupDirPath);
copyFile(fs, p, newPath);
}
} | 3.26 |
hbase_MapReduceBackupMergeJob_convertToDest_rdh | /**
* Converts path before copying
*
* @param p
* path
* @param backupDirPath
* backup root
* @return converted path
*/
protected Path convertToDest(Path p, Path backupDirPath) {
String v32 = backupDirPath.getName();
Deque<String> stack = new ArrayDeque<String>();
String name = null;
while (true) {
name = p.getName();
if (!name.equals(v32)) {
stack.push(name);
p = p.getParent();
} else {
break;
}
}
Path newPath = new Path(backupDirPath.toString());
while (!stack.isEmpty()) {
newPath = new Path(newPath, stack.pop());
}
return newPath;
} | 3.26 |
hbase_DefaultVisibilityLabelServiceImpl_mutateLabelsRegion_rdh | /**
* Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus
* might have some entries in it where the OpStatus is FAILURE. We will leave those and set in
* others in the order.
*
* @return whether we need a ZK update or not.
*/
private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus) throws IOException {OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
int i = 0;
boolean updateZk = false;
for (OperationStatus status : opStatus) {
// Update the zk when atleast one of the mutation was added successfully.
updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS);
for (; i < finalOpStatus.length; i++) {if (finalOpStatus[i] == null) {
finalOpStatus[i]
= status;break;
}
}
}
return
updateZk;
} | 3.26 |
hbase_DefaultVisibilityLabelServiceImpl_compareTagsOrdinals_rdh | /* @return true when all the visibility tags in Put matches with visibility tags in Delete. */
private static boolean compareTagsOrdinals(List<List<Integer>> putVisTags, List<List<Integer>> deleteVisTags) {
boolean matchFound = false;
if (deleteVisTags.size() == putVisTags.size()) {
for (List<Integer> deleteTagOrdinals : deleteVisTags) {matchFound = false;
for (List<Integer> tagOrdinals : putVisTags) {if (deleteTagOrdinals.equals(tagOrdinals)) {
matchFound = true;
break;
}
}
if (!matchFound)
break;
}
}
return matchFound;
} | 3.26 |
hbase_DefaultVisibilityLabelServiceImpl_createModifiedVisExpression_rdh | /**
* - all the visibility tags associated with the current Cell
*
* @return - the modified visibility expression as byte[]
*/
private byte[] createModifiedVisExpression(final List<Tag> tags) throws IOException {
StringBuilder visibilityString = new StringBuilder();
for (Tag tag : tags) {
if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) {
if (visibilityString.length() != 0) {
visibilityString.append(VisibilityConstants.CLOSED_PARAN).append(VisibilityConstants.OR_OPERATOR);
}
int offset
= tag.getValueOffset();int endOffset = offset + tag.getValueLength();
boolean expressionStart = true;
while (offset < endOffset) {
Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset);
int currLabelOrdinal = result.getFirst();
if (currLabelOrdinal < 0) {
int temp = -currLabelOrdinal;
String label = this.labelsCache.getLabel(temp);
if (expressionStart) {
// Quote every label in case of unicode characters if present
visibilityString.append(VisibilityConstants.OPEN_PARAN).append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label));} else {
visibilityString.append(VisibilityConstants.AND_OPERATOR).append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label));
}
} else {
String label =
this.labelsCache.getLabel(currLabelOrdinal);
if (expressionStart) {
visibilityString.append(VisibilityConstants.OPEN_PARAN).append(CellVisibility.quote(label));
} else {
visibilityString.append(VisibilityConstants.AND_OPERATOR).append(CellVisibility.quote(label));
}
}
expressionStart =
false;
offset += result.getSecond();}
}
}
if (visibilityString.length()
!= 0) {
visibilityString.append(VisibilityConstants.CLOSED_PARAN);
// Return the string formed as byte[]
return
Bytes.toBytes(visibilityString.toString());
}
return null;} | 3.26 |
hbase_Cacheable_refCnt_rdh | /**
* Reference count of this Cacheable.
*/
default int refCnt() {
return 0;
} | 3.26 |
hbase_Cacheable_retain_rdh | /**
* ***************************** ReferenceCounted Interfaces **********************************
*/
/**
* Increase its reference count, and only when no reference we can free the object's memory.
*/
default Cacheable retain() {
return this;
} | 3.26 |
hbase_Cacheable_release_rdh | /**
* Decrease its reference count, and if no reference then free the memory of this object, its
* backend is usually a {@link org.apache.hadoop.hbase.nio.ByteBuff}, and we will put its NIO
* ByteBuffers back to {@link org.apache.hadoop.hbase.io.ByteBuffAllocator}
*/
default boolean release() {
return false;
} | 3.26 |
hbase_MapReduceBackupCopyJob_copy_rdh | /**
* Do backup copy based on different types.
*
* @param context
* The backup info
* @param conf
* The hadoop configuration
* @param copyType
* The backup copy type
* @param options
* Options for customized ExportSnapshot or DistCp
* @throws IOException
* exception
*/
@Override
public int copy(BackupInfo context, BackupManager backupManager, Configuration conf, BackupType copyType, String[] options) throws IOException {
int res = 0;
try {
if (copyType == BackupType.FULL) {
SnapshotCopy snapshotCp = new SnapshotCopy(context, context.getTableBySnapshot(options[1]));
LOG.debug("Doing SNAPSHOT_COPY");
// Make a new instance of conf to be used by the snapshot copy class.
snapshotCp.setConf(new Configuration(conf));
res = snapshotCp.run(options);
} else if (copyType == BackupType.INCREMENTAL) {
LOG.debug("Doing COPY_TYPE_DISTCP");
setSubTaskPercntgInWholeTask(1.0F);
BackupDistCp distcp = new BackupDistCp(new Configuration(conf), null, context, backupManager);
// Handle a special case where the source file is a single file.
// In this case, distcp will not create the target dir. It just take the
// target as a file name and copy source file to the target (as a file name).
// We need to create the target dir before run distcp.
LOG.debug("DistCp options: " + Arrays.toString(options));
Path dest = new Path(options[options.length -
1]);
String[] newOptions = new String[options.length + 1];
System.arraycopy(options, 0, newOptions, 1, options.length);
newOptions[0] = "-async";// run DisCp in async mode
FileSystem destfs = dest.getFileSystem(conf);
if (!destfs.exists(dest)) {
destfs.mkdirs(dest);
}
res = distcp.run(newOptions);
}
return res;
} catch (Exception e) {
throw new IOException(e);
}
} | 3.26 |
hbase_MapReduceBackupCopyJob_setSubTaskPercntgInWholeTask_rdh | /**
* Set the current copy task percentage within the whole task if multiple copies are needed. Must
* be called before calling
* {@link #copy(BackupInfo, BackupManager, Configuration, BackupType, String[])}
*
* @param subTaskPercntgInWholeTask
* The percentage of the copy subtask
*/
public void setSubTaskPercntgInWholeTask(float subTaskPercntgInWholeTask) {
this.subTaskPercntgInWholeTask = subTaskPercntgInWholeTask;
} | 3.26 |
hbase_MapReduceBackupCopyJob_getSubTaskPercntgInWholeTask_rdh | /**
* Get the current copy task percentage within the whole task if multiple copies are needed.
*
* @return the current copy task percentage
*/
public float getSubTaskPercntgInWholeTask() {
return subTaskPercntgInWholeTask;
} | 3.26 |
hbase_Constraints_m0_rdh | /**
* Read the {@link Configuration} stored in the byte stream.
*
* @param bytes
* to read from
* @return A valid configuration
*/
private static Configuration m0(byte[] bytes) throws IOException {
ByteArrayInputStream is = new ByteArrayInputStream(bytes);
Configuration conf = new Configuration(false);
conf.addResource(is);
return conf;
} | 3.26 |
hbase_Constraints_addConstraint_rdh | /**
* Write the raw constraint and configuration to the descriptor.
* <p/>
* This method takes care of creating a new configuration based on the passed in configuration and
* then updating that with enabled and priority of the constraint.
* <p/>
* When a constraint is added, it is automatically enabled.
*/
private static TableDescriptorBuilder addConstraint(TableDescriptorBuilder builder, Class<? extends Constraint> clazz, Configuration conf, long priority) throws IOException {
return writeConstraint(builder, serializeConstraintClass(clazz), configure(conf, true, priority));
} | 3.26 |
hbase_Constraints_configure_rdh | /**
* Setup the configuration for a constraint as to whether it is enabled and its priority
*
* @param conf
* on which to base the new configuration
* @param enabled
* <tt>true</tt> if it should be run
* @param priority
* relative to other constraints
* @return a new configuration, storable in the {@link TableDescriptor}
*/
private static Configuration configure(Configuration conf, boolean enabled, long priority) {
// create the configuration to actually be stored
// clone if possible, but otherwise just create an empty configuration
Configuration toWrite = (conf == null) ? new Configuration() : new
Configuration(conf);
// update internal properties
toWrite.setBooleanIfUnset(ENABLED_KEY, enabled);
// set if unset long
if (toWrite.getLong(PRIORITY_KEY, UNSET_PRIORITY) == UNSET_PRIORITY) {
toWrite.setLong(PRIORITY_KEY,
priority);
}
return toWrite;
} | 3.26 |
hbase_Constraints_disableConstraint_rdh | /**
* Disable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
* {@link Constraint}, but it just doesn't load the {@link Constraint} on the table.
*
* @param builder
* {@link TableDescriptorBuilder} to modify
* @param clazz
* {@link Constraint} to disable.
* @throws IOException
* if the constraint cannot be found
*/
public static void disableConstraint(TableDescriptorBuilder builder, Class<? extends Constraint> clazz) throws IOException {
m2(builder, clazz, false);} | 3.26 |
hbase_Constraints_serializeConfiguration_rdh | /**
* Write the configuration to a String
*
* @param conf
* to write
* @return String representation of that configuration
*/
private static String serializeConfiguration(Configuration conf) throws IOException {
// write the configuration out to the data stream
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);
conf.writeXml(dos);
dos.flush();
byte[] data = bos.toByteArray();
return Bytes.toString(data);
} | 3.26 |
hbase_Constraints_setConfiguration_rdh | /**
* Update the configuration for the {@link Constraint}; does not change the order in which the
* constraint is run.
*
* @param builder
* {@link TableDescriptorBuilder} to update
* @param clazz
* {@link Constraint} to update
* @param configuration
* to update the {@link Constraint} with.
* @throws IOException
* if the Constraint was not stored correctly
* @throws IllegalArgumentException
* if the Constraint was not present on this table.
*/
public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder builder, Class<? extends Constraint> clazz, Configuration configuration) throws IOException, IllegalArgumentException {
// get the entry for this class
Pair<String, String> e = getKeyValueForClass(builder, clazz);
if (e == null) {
throw new IllegalArgumentException(("Constraint: " + clazz.getName()) + " is not associated with this table.");
}
// clone over the configuration elements
Configuration conf = new Configuration(configuration);
// read in the previous info about the constraint
Configuration internal = m0(e.getSecond());
// update the fields based on the previous settings
conf.setIfUnset(ENABLED_KEY, internal.get(ENABLED_KEY));
conf.setIfUnset(PRIORITY_KEY, internal.get(PRIORITY_KEY));
// update the current value
return writeConstraint(builder, e.getFirst(), conf);
} | 3.26 |
hbase_Constraints_m1_rdh | /**
* Read in the configuration from the String encoded configuration
*
* @param bytes
* to read from
* @return A valid configuration
* @throws IOException
* if the configuration could not be read
*/
private static Configuration m1(String bytes) throws IOException {
return m0(Bytes.toBytes(bytes));
} | 3.26 |
hbase_Constraints_remove_rdh | /**
* Remove the constraint (and associated information) for the table descriptor.
*
* @param builder
* {@link TableDescriptorBuilder} to modify
* @param clazz
* {@link Constraint} class to remove
*/
public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, Class<? extends Constraint> clazz) {
String key = serializeConstraintClass(clazz);
return builder.removeValue(key);
} | 3.26 |
hbase_Constraints_enable_rdh | /**
* Enable constraints on a table.
* <p/>
* Currently, if you attempt to add a constraint to the table, then Constraints will automatically
* be turned on.
*/
public static TableDescriptorBuilder enable(TableDescriptorBuilder builder) throws IOException {
if (!builder.hasCoprocessor(ConstraintProcessor.class.getName())) {
builder.setCoprocessor(ConstraintProcessor.class.getName());
}
return builder;
} | 3.26 |
hbase_Constraints_add_rdh | /**
* Add a {@link Constraint} to the table with the given configuration
* <p/>
* Each constraint, when added to the table, will have a specific priority, dictating the order in
* which the {@link Constraint} will be run. A {@link Constraint} added will run on the
* regionserver before those added to the {@link TableDescriptorBuilder} later.
*
* @param builder
* {@link TableDescriptorBuilder} to add a {@link Constraint}
* @param constraint
* to be added
* @param conf
* configuration associated with the constraint
* @throws IOException
* if any constraint could not be deserialized. Assumes if 1 constraint is not
* loaded properly, something has gone terribly wrong and that all constraints
* need to be enforced.
*/
public static TableDescriptorBuilder add(TableDescriptorBuilder builder, Class<? extends Constraint> constraint, Configuration conf) throws IOException {
enable(builder);
long priority = getNextPriority(builder);
addConstraint(builder, constraint, conf, priority++);
return updateLatestPriority(builder, priority);
} | 3.26 |
hbase_Constraints_enableConstraint_rdh | /**
* Enable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
* {@link Constraint}, but makes sure that it gets loaded on the table.
*
* @param builder
* {@link TableDescriptorBuilder} to modify
* @param clazz
* {@link Constraint} to enable
* @throws IOException
* If the constraint cannot be properly deserialized
*/
public static void enableConstraint(TableDescriptorBuilder builder, Class<? extends Constraint> clazz) throws IOException {
m2(builder, clazz, true);
} | 3.26 |
hbase_Constraints_m2_rdh | /**
* Change the whether the constraint (if it is already present) is enabled or disabled.
*/
private static TableDescriptorBuilder m2(TableDescriptorBuilder builder, Class<? extends Constraint> clazz, boolean enabled) throws IOException {
// get the original constraint
Pair<String, String> entry = getKeyValueForClass(builder, clazz);
if (entry == null) {
throw new IllegalArgumentException(("Constraint: " + clazz.getName()) + " is not associated with this table. You can't enable it!");
}
// create a new configuration from that conf
Configuration conf = m0(entry.getSecond());
// set that it is enabled
conf.setBoolean(ENABLED_KEY, enabled);
// write it back out
return writeConstraint(builder, entry.getFirst(), conf);
}
/**
* Check to see if the given constraint is enabled.
*
* @param desc
* {@link TableDescriptor} to check.
* @param clazz
* {@link Constraint} to check for
* @return <tt>true</tt> if the {@link Constraint} | 3.26 |
hbase_Constraints_getConstraints_rdh | /**
* Get the constraints stored in the table descriptor
*
* @param desc
* To read from
* @param classloader
* To use when loading classes. If a special classloader is used on a region,
* for instance, then that should be the classloader used to load the
* constraints. This could also apply to unit-testing situation, where want to
* ensure that class is reloaded or not.
* @return List of configured {@link Constraint Constraints}
* @throws IOException
* if any part of reading/arguments fails
*/
static List<? extends Constraint> getConstraints(TableDescriptor desc, ClassLoader classloader) throws IOException {
List<Constraint> constraints = new ArrayList<>();
// loop through all the key, values looking for constraints
for (Map.Entry<Bytes, Bytes> e : desc.getValues().entrySet()) {
// read out the constraint
String key = Bytes.toString(e.getKey().get()).trim();String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key);
if (className.length == 2) {
key = className[1];
if (LOG.isDebugEnabled()) {
LOG.debug("Loading constraint:" + key);
}
// read in the rest of the constraint
Configuration conf;
try {
conf = m0(e.getValue().get());
} catch (IOException e1) {
// long that we don't have a valid configuration stored, and move on.
LOG.warn(("Corrupted configuration found for key:" + key) + ", skipping it.");
continue;
}// if it is not enabled, skip it
if (!conf.getBoolean(ENABLED_KEY, false)) {
LOG.debug("Constraint: {} is DISABLED - skipping it", key);
// go to the next constraint
continue;
}
try {
// add the constraint, now that we expect it to be valid.
Class<? extends Constraint> clazz = classloader.loadClass(key).asSubclass(Constraint.class);
Constraint constraint = clazz.getDeclaredConstructor().newInstance();
constraint.setConf(conf);constraints.add(constraint);
} catch (InvocationTargetException | NoSuchMethodException | ClassNotFoundException | InstantiationException |
IllegalAccessException e1) {
throw new IOException(e1);
}
}
}
// sort them, based on the priorities
Collections.sort(constraints, constraintComparator);
return constraints;
} | 3.26 |
hbase_Constraints_serializeConstraintClass_rdh | /**
* Just write the class to a String representation of the class as a key for the
* {@link TableDescriptor}
*
* @param clazz
* Constraint class to convert to a {@link TableDescriptor} key
* @return key to store in the {@link TableDescriptor}
*/
private static String serializeConstraintClass(Class<? extends Constraint> clazz) {
String constraintClazz = clazz.getName();
return CONSTRAINT_HTD_KEY_PREFIX + constraintClazz;
}
/**
* Write the given key and associated configuration to the {@link TableDescriptorBuilder} | 3.26 |
hbase_FlushPolicy_configureForRegion_rdh | /**
* Upon construction, this method will be called with the region to be governed. It will be called
* once and only once.
*/
protected void configureForRegion(HRegion region) {
this.region = region;
} | 3.26 |
hbase_CellArrayImmutableSegment_reinitializeCellSet_rdh | /* ------------------------------------------------------------------------ */
// Create CellSet based on CellChunkMap from current ConcurrentSkipListMap based CellSet
// (without compacting iterator)
// We do not consider cells bigger than chunks!
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner, CellSet oldCellSet, MemStoreCompactionStrategy.Action action) {
Cell[] cells = new Cell[numOfCells];// build the Cell Array
Cell curCell;
int idx = 0;
int numUniqueKeys = 0;
Cell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
cells[idx++] = curCell;
if (action == Action.FLATTEN_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumn(prev, curCell)) {
numUniqueKeys++;}
} else {
numUniqueKeys++;
}
}
prev = curCell;
}
} catch (IOException ie) {
throw new IllegalStateException(ie);
} finally {
segmentScanner.close();
}
if (action != Action.FLATTEN_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false);// update the CellSet of this Segment
this.setCellSet(oldCellSet, new CellSet(cam, numUniqueKeys));
} | 3.26 |
hbase_CellArrayImmutableSegment_initializeCellSet_rdh | // /////////////////// PRIVATE METHODS /////////////////////
/* ------------------------------------------------------------------------ */
// Create CellSet based on CellArrayMap from compacting iterator
private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) {
boolean merge = (action == Action.MERGE) || (action == Action.MERGE_COUNT_UNIQUE_KEYS);
Cell[] cells = new Cell[numOfCells];// build the Cell Array
int i = 0;
int numUniqueKeys = 0;
Cell prev = null;
while (iterator.hasNext()) {
Cell c = iterator.next();
// The scanner behind the iterator is doing all the elimination logic
if (merge) {
// if this is merge we just move the Cell object without copying MSLAB
// the sizes still need to be updated in the new segment
cells[i] = c;
} else {
// now we just copy it to the new segment (also MSLAB copy)
cells[i] = maybeCloneWithAllocator(c, false);
}
// second parameter true, because in compaction/merge the addition of the cell to new segment
// is always successful
updateMetaInfo(cells[i], true, null);// updates the size per cell
if (action == Action.MERGE_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumnBytes(prev, c)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;}
}
prev = c;
i++;
}
if (action == Action.COMPACT) {
numUniqueKeys = numOfCells;
} else if (action != Action.MERGE_COUNT_UNIQUE_KEYS) {
numUniqueKeys =
CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false);
this.setCellSet(null, new CellSet(cam, numUniqueKeys));// update the CellSet of this Segment
} | 3.26 |
hbase_HFile_isHFileFormat_rdh | /**
* Returns true if the specified file has a valid HFile Trailer.
*
* @param fs
* filesystem
* @param fileStatus
* the file to verify
* @return true if the file has a valid HFile Trailer, otherwise false
* @throws IOException
* if failed to read from the underlying stream
*/
public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileStatus)
throws IOException {
final Path path = fileStatus.getPath();
final long size = fileStatus.getLen();try (FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path)) {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum;// Initially we must read with FS checksum.
FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
return
true;
} catch (IllegalArgumentException e) {
return false;
}
} | 3.26 |
hbase_HFile_createReader_rdh | /**
*
* @param fs
* filesystem
* @param path
* Path to file to read
* @param cacheConf
* This must not be null.
* @param primaryReplicaReader
* true if this is a reader for primary replica
* @param conf
* Configuration
* @return an active Reader instance
* @throws IOException
* Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile
* is corrupt/invalid.
* @see CacheConfig#CacheConfig(Configuration)
*/
public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf, boolean primaryReplicaReader, Configuration conf) throws IOException {
Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
FSDataInputStreamWrapper
stream = new FSDataInputStreamWrapper(fs, path);
ReaderContext context = new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream).withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs()).withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
Reader reader = createReader(context, fileInfo, cacheConf, conf);
fileInfo.initMetaAndIndex(reader);
return reader;
} | 3.26 |
hbase_HFile_longToInt_rdh | // Utility methods.
/* @param l Long to convert to an int.
@return <code>l</code> cast as an int.
*/
static int longToInt(final long l) {
// Expecting the size() of a block not exceeding 4GB. Assuming the
// size() will wrap to negative integer if it exceeds 2GB (From tfile).
return ((int) (l & 0xffffffffL));
} | 3.26 |
hbase_HFile_getWriterFactoryNoCache_rdh | /**
* Returns the factory to be used to create {@link HFile} writers. Disables block cache access for
* all writers created through the returned factory.
*/
public static final WriterFactory getWriterFactoryNoCache(Configuration conf) {return HFile.getWriterFactory(conf, CacheConfig.DISABLED);
} | 3.26 |
hbase_HFile_getAndResetChecksumFailuresCount_rdh | /**
* Number of checksum verification failures. It also clears the counter.
*/
public static final long getAndResetChecksumFailuresCount() {
return CHECKSUM_FAILURES.sumThenReset();
} | 3.26 |
hbase_HFile_getWriterFactory_rdh | /**
* Returns the factory to be used to create {@link HFile} writers
*/
public static final WriterFactory
getWriterFactory(Configuration
conf, CacheConfig cacheConf) {
int version =
getFormatVersion(conf);
switch (version) {
case 2 :
throw new IllegalArgumentException((("This should never happen. " + "Did you change hfile.format.version to read v2? This version of the software writes v3") + " hfiles only (but it can read v2 files without having to update hfile.format.version ") + "in hbase-site.xml)");
case 3 :
return new HFile.WriterFactory(conf, cacheConf);
default :
throw new IllegalArgumentException(("Cannot create writer for HFile " + "format version ") + version);}
} | 3.26 |
hbase_HFile_m4_rdh | /**
* Returns all HFiles belonging to the given region directory. Could return an empty list.
*
* @param fs
* The file system reference.
* @param regionDir
* The region directory to scan.
* @return The list of files found.
* @throws IOException
* When scanning the files fails.
*/
public static List<Path> m4(FileSystem
fs, Path regionDir) throws
IOException {
List<Path> regionHFiles = new ArrayList<>();
PathFilter dirFilter = new FSUtils.DirFilter(fs);
FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
for (FileStatus dir : familyDirs) {
FileStatus[] files = fs.listStatus(dir.getPath());
for (FileStatus
file : files) {
if (((!file.isDirectory()) && (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME))) && (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) {
regionHFiles.add(file.getPath());
}
}
}
return regionHFiles;
}
/**
* Checks the given {@link HFile} format version, and throws an exception if invalid. Note that if
* the version number comes from an input file and has not been verified, the caller needs to
* re-throw an {@link IOException} | 3.26 |
hbase_HFile_getChecksumFailuresCount_rdh | /**
* Number of checksum verification failures. It also clears the counter.
*/
public static final long getChecksumFailuresCount() {
return CHECKSUM_FAILURES.sum();
} | 3.26 |
hbase_DisabledTableSnapshotHandler_snapshotRegions_rdh | // TODO consider parallelizing these operations since they are independent. Right now its just
// easier to keep them serial though
@Override
public void snapshotRegions(List<Pair<RegionInfo, ServerName>>
regionsAndLocations) throws IOException, KeeperException {
try {
// 1. get all the regions hosting this table.
// extract each pair to separate lists
Set<RegionInfo> regions = new HashSet<>();
for (Pair<RegionInfo, ServerName> p : regionsAndLocations) {
// Don't include non-default regions
RegionInfo hri = p.getFirst();
if (RegionReplicaUtil.isDefaultReplica(hri))
{
regions.add(hri);
}
}
// handle the mob files if any.
boolean mobEnabled = MobUtils.hasMobColumns(htd);
if (mobEnabled) {
// snapshot the mob files as a offline region.
RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(htd.getTableName());
regions.add(mobRegionInfo);
}
// 2. for each region, write all the info to disk
String msg = "Starting to write region info and WALs for regions for offline snapshot:" + ClientSnapshotDescriptionUtils.toString(snapshot);
LOG.info(msg);
status.setStatus(msg);
ThreadPoolExecutor exec = SnapshotManifest.createExecutor(conf, "DisabledTableSnapshot");
try {
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo regionInfo) throws IOException {
snapshotManifest.addRegion(CommonFSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
}
});
} finally {
exec.shutdown();
}
} catch (Exception e) {
// make sure we capture the exception to propagate back to the client later
String reason = (("Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)) + " due to exception:") + e.getMessage();
ForeignException ee = new ForeignException(reason, e);
monitor.receive(ee);
status.abort((("Snapshot of table: " + snapshotTable) + " failed because ") + e.getMessage());
} finally {
LOG.debug(("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot)) + " as finished.");
}
} | 3.26 |
hbase_MasterRegistry_parseMasterAddrs_rdh | /**
* Parses the list of master addresses from the provided configuration. Supported format is comma
* separated host[:port] values. If no port number if specified, default master port is assumed.
*
* @param conf
* Configuration to parse from.
*/
public static Set<ServerName> parseMasterAddrs(Configuration conf) throws UnknownHostException {
final int defaultPort = getDefaultMasterPort(conf);
final Set<ServerName> masterAddrs = new HashSet<>();
final String configuredMasters = getMasterAddr(conf);
for (String masterAddr : Splitter.onPattern(MASTER_ADDRS_CONF_SEPARATOR).split(configuredMasters)) {
final HostAndPort masterHostPort = HostAndPort.fromString(masterAddr.trim()).withDefaultPort(defaultPort);
masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE));
}
Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed");
return masterAddrs;} | 3.26 |
hbase_MasterRegistry_getDefaultMasterPort_rdh | /**
* Supplies the default master port we should use given the provided configuration.
*
* @param conf
* Configuration to parse from.
*/
private static int getDefaultMasterPort(Configuration conf) {
final int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
if (port == 0) {
// Master port may be set to 0. We should substitute the default port in that case.
return HConstants.DEFAULT_MASTER_PORT;
}
return port;
} | 3.26 |
hbase_ZNodePaths_isClientReadable_rdh | /**
* Returns whether the path is supposed to be readable by the client and DOES NOT contain
* sensitive information (world readable).
*/
public boolean isClientReadable(String
path) {
// Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS
// all clients need to access this data to work. Using zk for sharing data to clients (other
// than service lookup case is not a recommended design pattern.
return (((((path.equals(baseZNode) || isMetaZNodePath(path)) || path.equals(masterAddressZNode)) || path.equals(clusterIdZNode)) || path.equals(f0)) ||
// /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not
path.equals(tableZNode)) || path.startsWith(tableZNode + "/");
} | 3.26 |
hbase_ZNodePaths_getMetaReplicaIdFromPath_rdh | /**
* Parses the meta replicaId from the passed path.
*
* @param path
* the name of the full path which includes baseZNode.
*/
public int getMetaReplicaIdFromPath(String path) {// Extract the znode from path. The prefix is of the following format.
// baseZNode + PATH_SEPARATOR.
int prefixLen = baseZNode.length() + 1;
return getMetaReplicaIdFromZNode(path.substring(prefixLen));
} | 3.26 |
hbase_ZNodePaths_isMetaZNodePath_rdh | /**
* Returns True is the fully qualified path is for meta location
*/
public boolean isMetaZNodePath(String path) {
int prefixLen = baseZNode.length() + 1;return (path.length() > prefixLen) && isMetaZNodePrefix(path.substring(prefixLen));
} | 3.26 |
hbase_ZNodePaths_getMetaReplicaIdFromZNode_rdh | /**
* Parse the meta replicaId from the passed znode
*
* @param znode
* the name of the znode, does not include baseZNode
*/public int getMetaReplicaIdFromZNode(String znode) {
return znode.equals(metaZNodePrefix) ? RegionInfo.DEFAULT_REPLICA_ID : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
} | 3.26 |
hbase_ZNodePaths_getZNodeForReplica_rdh | /**
* Returns the znode string corresponding to a replicaId
*/
public String getZNodeForReplica(int replicaId) {
if (RegionReplicaUtil.isDefaultReplica(replicaId)) {
return joinZNode(baseZNode, metaZNodePrefix);
} else {
return joinZNode(baseZNode, (metaZNodePrefix + "-") + replicaId);
}
} | 3.26 |
hbase_ZNodePaths_joinZNode_rdh | /**
* Join the prefix znode name with the suffix znode name to generate a proper full znode name.
* <p>
* Assumes prefix does not end with slash and suffix does not begin with it.
*
* @param prefix
* beginning of znode name
* @param suffix
* ending of znode name
* @return result of properly joining prefix with suffix
*/
public static String joinZNode(String prefix, String... suffix) {
StringBuilder sb = new StringBuilder(prefix);
for (String s
: suffix) {
sb.append(ZNodePaths.ZNODE_PATH_SEPARATOR).append(s);
}
return sb.toString();
} | 3.26 |
hbase_ZNodePaths_isMetaZNodePrefix_rdh | /**
* Returns True if meta znode.
*/
public boolean isMetaZNodePrefix(String znode) {
return
(znode != null) && znode.startsWith(this.metaZNodePrefix);
} | 3.26 |
hbase_HttpServer_addJerseyResourcePackage_rdh | /**
* Add a Jersey resource package.
*
* @param packageName
* The Java package name containing the Jersey resource.
* @param pathSpec
* The path spec for the servlet
*/
public void addJerseyResourcePackage(final String packageName, final String pathSpec) {
LOG.info((("addJerseyResourcePackage: packageName=" + packageName) + ", pathSpec=") + pathSpec);
ResourceConfig application =
new ResourceConfig().packages(packageName);
final ServletHolder sh = new ServletHolder(new ServletContainer(application));
webAppContext.addServlet(sh, pathSpec);} | 3.26 |
hbase_HttpServer_stop_rdh | /**
* stop the server
*/
public void stop()
throws Exception {
MultiException exception
= null;
for (ListenerInfo li : listeners) {
if (!li.f0) {
continue;
}
try {
li.listener.close();
} catch (Exception e) {LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e);
exception = m6(exception, e);
}
}
try {
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();
webAppContext.stop();
} catch (Exception e) {
LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), e);
exception = m6(exception, e);
}
try {webServer.stop();
} catch (Exception e) {
LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = m6(exception, e);
}
if (exception != null) {
exception.ifExceptionThrow();
}
} | 3.26 |
hbase_HttpServer_hasAdministratorAccess_rdh | /**
* Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case,
* response will be modified to send an error to the user.
*
* @param servletContext
* the {@link ServletContext} to use
* @param request
* the {@link HttpServletRequest} to check
* @param response
* used to send the error response if user does not have admin access.
* @return true if admin-authorized, false otherwise
* @throws IOException
* if an unauthenticated or unauthorized user tries to access the page
*/
public static boolean hasAdministratorAccess(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException {
Configuration conf = ((Configuration) (servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE)));
AccessControlList acl
= ((AccessControlList) (servletContext.getAttribute(ADMINS_ACL)));
return hasAdministratorAccess(conf, acl, request, response);
} | 3.26 |
hbase_HttpServer_getParameter_rdh | /**
* Unquote the name and quote the value.
*/
@Override
public String
getParameter(String name) {
return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name)));} | 3.26 |
hbase_HttpServer_toString_rdh | /**
* Return the host and port of the HttpServer, if live
*
* @return the classname and any HTTP URL
*/
@Override
public String toString() {
if (listeners.isEmpty()) {
return "Inactive HttpServer";
} else {
StringBuilder
sb = new StringBuilder("HttpServer (").append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:");
for (ListenerInfo li : listeners) {
ServerConnector l = li.listener;
sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
}
return sb.toString();
}
} | 3.26 |
hbase_HttpServer_getParameterNames_rdh | /**
* Return the set of parameter names, quoting each name.
*/@Override
public Enumeration<String> getParameterNames() {
return new Enumeration<String>() {private Enumeration<String> rawIterator = rawRequest.getParameterNames();
@Override
public boolean hasMoreElements() {
return rawIterator.hasMoreElements();
}
@Override
public
String nextElement() {
return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement());
}
};
} | 3.26 |
hbase_HttpServer_start_rdh | /**
* Start the server. Does not wait for the server to start.
*/
public void start() throws IOException {try {
try {
openListeners();
webServer.start();
} catch (IOException ex) {
LOG.info("HttpServer.start() threw a non Bind IOException", ex);
throw ex;
} catch (MultiException ex) {
LOG.info("HttpServer.start() threw a MultiException", ex);
throw ex;
}
// Make sure there is no handler failures.
Handler[] handlers = webServer.getHandlers();
for (int i = 0; i < handlers.length; i++) {
if (handlers[i].isFailed()) {
throw new IOException("Problem in starting http server. Server handlers failed");
}
}
// Make sure there are no errors initializing the context.
Throwable unavailableException = webAppContext.getUnavailableException();
if
(unavailableException != null) {
// Have to stop the webserver, or else its non-daemon threads
// will hang forever.
webServer.stop();
throw new IOException("Unable to initialize WebAppContext", unavailableException);
}} catch (IOException e) {
throw e;
} catch (InterruptedException e) {
throw ((IOException) (new InterruptedIOException("Interrupted while starting HTTP server").initCause(e)));
} catch (Exception e) {
throw new IOException("Problem starting http server", e);
}
} | 3.26 |
hbase_HttpServer_getAttribute_rdh | /**
* Get the value in the webapp context.
*
* @param name
* The name of the attribute
* @return The value of the attribute
*/
public Object getAttribute(String name)
{
return webAppContext.getAttribute(name);
} | 3.26 |
hbase_HttpServer_setBindAddress_rdh | /**
*
* @see #addEndpoint(URI)
* @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead.
*/
@Deprecated
public Builder setBindAddress(String bindAddress) {
this.bindAddress = bindAddress;
return this;
} | 3.26 |
hbase_HttpServer_addPrivilegedServlet_rdh | /**
* Adds a servlet in the server that only administrators can access. This method differs from
* {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user
* who are identified as administrators can interact with the servlet added by this method.
*/
public void addPrivilegedServlet(String pathSpec, ServletHolder holder) {
addServletWithAuth(pathSpec, holder, true);
} | 3.26 |
hbase_HttpServer_isInstrumentationAccessAllowed_rdh | /**
* Checks the user has privileges to access to instrumentation servlets.
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE (default value)
* it always returns TRUE.
* </p>
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE it will check
* that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns
* TRUE, otherwise it returns FALSE.
* </p>
*
* @param servletContext
* the servlet context.
* @param request
* the servlet request.
* @param response
* the servlet response.
* @return TRUE/FALSE based on the logic decribed above.
*/
public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException {
Configuration conf = ((Configuration) (servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE)));
boolean access = true;
boolean adminAccess = conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false);if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access; } | 3.26 |
hbase_HttpServer_buildGzipHandler_rdh | /**
* Construct and configure an instance of {@link GzipHandler}. With complex
* multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the
* instance of {@link Server} near the end of its configuration, something like
*
* <pre>
* Server server = new Server();
* // ...
* server.setHandler(buildGzipHandler(server.getHandler()));
* server.start();
* </pre>
*/
public static GzipHandler buildGzipHandler(final Handler wrapped) {
final GzipHandler gzipHandler = new GzipHandler();
gzipHandler.setHandler(wrapped);
return gzipHandler;
} | 3.26 |
hbase_HttpServer_needsClientAuth_rdh | /**
* Specify whether the server should authorize the client in SSL connections.
*/
public Builder needsClientAuth(boolean value) {
this.needsClientAuth = value;
return this;
} | 3.26 |
hbase_HttpServer_setName_rdh | /**
*
* @see #setAppDir(String)
* @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead.
*/
@Deprecated
public Builder setName(String name) {
this.name = name;return this;
} | 3.26 |
hbase_HttpServer_getServerName_rdh | /**
* Quote the server name so that users specifying the HOST HTTP header can't inject attacks.
*/
@Override
public String getServerName() {
return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName());
} | 3.26 |
hbase_HttpServer_setThreads_rdh | /**
* Set the min, max number of worker threads (simultaneous connections).
*/
public void setThreads(int min, int max) {
QueuedThreadPool pool
= ((QueuedThreadPool) (webServer.getThreadPool()));
pool.setMinThreads(min);
pool.setMaxThreads(max);
} | 3.26 |
hbase_HttpServer_addDefaultApps_rdh | /**
* Add default apps.
*
* @param appDir
* The application directory
*/
protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, Configuration conf) {
// set up the context for "/logs/" if "hadoop.log.dir" property is defined.
String logDir = this.logDir;
if (logDir == null) {
logDir = System.getProperty("hadoop.log.dir");
}
if (logDir != null) {
ServletContextHandler
logContext = new ServletContextHandler(parent,
"/logs");
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
logContext.setResourceBase(logDir);
if (conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) {
Map<String, String> params = logContext.getInitParams();
params.put("org.mortbay.jetty.servlet.Default.aliases", "true");
}
logContext.setDisplayName("logs");
setContextAttributes(logContext, conf);
addNoCacheFilter(logContext, conf);
defaultContexts.put(logContext, true);
}
// set up the context for "/static/*"
ServletContextHandler staticContext = new ServletContextHandler(parent, "/static");
staticContext.setResourceBase(appDir + "/static");
staticContext.addServlet(DefaultServlet.class, "/*");
staticContext.setDisplayName("static");
setContextAttributes(staticContext, conf);
defaultContexts.put(staticContext, true);
} | 3.26 |
hbase_HttpServer_hostName_rdh | /**
* Set the hostname of the http server. The host name is used to resolve the _HOST field in
* Kerberos principals. The hostname of the first listener will be used if the name is
* unspecified.
*/
public Builder hostName(String hostName) {
this.hostName = hostName;
return this;
} | 3.26 |
hbase_HttpServer_isAlive_rdh | /**
* Test for the availability of the web server
*
* @return true if the web server is started, false otherwise
*/
public boolean isAlive() {
return (webServer != null) && webServer.isStarted();
} | 3.26 |
hbase_HttpServer_setPort_rdh | /**
*
* @see #addEndpoint(URI)
* @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead.
*/
@Deprecated
public Builder setPort(int port) {
this.port = port;
return this;
} | 3.26 |
hbase_HttpServer_openListeners_rdh | /**
* Open the main listener for the server
*
* @throws Exception
* if the listener cannot be opened or the appropriate port is already in use
*/void
openListeners() throws Exception {
for (ListenerInfo li : listeners) {
ServerConnector listener
= li.listener;
if ((!li.f0) || ((li.listener.getLocalPort() != (-1)) && (li.listener.getLocalPort() != (-2)))) {
// This listener is either started externally, or has not been opened, or has been closed
continue;
}
int port = listener.getPort();
while (true) {
// jetty has a bug where you can't reopen a listener that previously
// failed to open w/o issuing a close first, even if the port is changed
try {
listener.close();
listener.open();
LOG.info("Jetty bound to port " + listener.getLocalPort());
break;
} catch (IOException ex) {
if ((!(ex instanceof BindException)) && (!(ex.getCause() instanceof BindException))) {
throw ex;
}
if ((port == 0) || (!findPort)) {
BindException be = new BindException((("Port in use: " + listener.getHost()) + ":") + listener.getPort());
be.initCause(ex);
throw be;
}
}
// try the next port number
listener.setPort(++port);
Thread.sleep(100);
}
}
} | 3.26 |
hbase_HttpServer_inferMimeType_rdh | /**
* Infer the mime type for the response based on the extension of the request URI. Returns null
* if unknown.
*/private String inferMimeType(ServletRequest request) {
String path = ((HttpServletRequest) (request)).getRequestURI();
ServletContext context = config.getServletContext();
return context.getMimeType(path);
} | 3.26 |
hbase_HttpServer_getWebAppsPath_rdh | /**
* Get the pathname to the webapps files.
*
* @param appName
* eg "secondary" or "datanode"
* @return the pathname as a URL
* @throws FileNotFoundException
* if 'webapps' directory cannot be found on CLASSPATH.
*/protected String getWebAppsPath(String
webapps, String appName) throws FileNotFoundException {
URL url = getClass().getClassLoader().getResource((webapps + "/") + appName);
if (url == null) {
throw new FileNotFoundException(((webapps + "/") + appName) + " not found in CLASSPATH");
}
String urlString = url.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
} | 3.26 |
hbase_HttpServer_isMissing_rdh | /**
* Returns true if the argument is non-null and not whitespace
*/
private boolean isMissing(String value) {
if (null == value) {
return true;
}
return value.trim().isEmpty();
} | 3.26 |
hbase_HttpServer_addInternalServlet_rdh | /**
* Add an internal servlet in the server, specifying whether or not to protect with Kerberos
* authentication. Note: This method is to be used for adding servlets that facilitate internal
* communication and not for user facing functionality. For servlets added using this method,
* filters (except internal Kerberos filters) are not enabled.
*
* @param name
* The name of the {@link Servlet} (can be passed as null)
* @param pathSpec
* The path spec for the {@link Servlet}
* @param clazz
* The {@link Servlet} class
* @param requireAuthz
* Require Kerberos authenticate to access servlet
*/
void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet>
clazz, boolean requireAuthz) {
ServletHolder holder = new ServletHolder(clazz);
if (name != null) {
holder.setName(name);
}
m4(pathSpec, holder, requireAuthz);
} | 3.26 |
hbase_HttpServer_addServletWithAuth_rdh | /**
* Internal method to add a servlet to the HTTP server. Developers should not call this method
* directly, but invoke it via {@link #addUnprivilegedServlet(String, ServletHolder)} or
* {@link #addPrivilegedServlet(String, ServletHolder)}.
*/
void
addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAuthz) {
m4(pathSpec, holder, requireAuthz);
m5(pathSpec, webAppContext);
} | 3.26 |
hbase_HttpServer_m4_rdh | /**
* Add an internal servlet in the server, specifying whether or not to protect with Kerberos
* authentication. Note: This method is to be used for adding servlets that facilitate internal
* communication and not for user facing functionality. For servlets added using this method,
* filters (except internal Kerberos filters) are not enabled.
*
* @param pathSpec
* The path spec for the {@link Servlet}
* @param holder
* The object providing the {@link Servlet} instance
* @param requireAuthz
* Require Kerberos authenticate to access servlet
*/void m4(String pathSpec, ServletHolder holder, boolean requireAuthz) {
if (authenticationEnabled && requireAuthz) {
FilterHolder filter = new FilterHolder(AdminAuthorizedFilter.class);
filter.setName(AdminAuthorizedFilter.class.getSimpleName());
FilterMapping
fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setDispatches(FilterMapping.ALL);
fmap.setFilterName(AdminAuthorizedFilter.class.getSimpleName());
webAppContext.getServletHandler().addFilter(filter, fmap);}
webAppContext.getSessionHandler().getSessionCookieConfig().setHttpOnly(true);
webAppContext.getSessionHandler().getSessionCookieConfig().setSecure(true);
webAppContext.addServlet(holder, pathSpec);
} | 3.26 |
hbase_HttpServer_addUnprivilegedServlet_rdh | /**
* Adds a servlet in the server that any user can access. This method differs from
* {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can
* interact with the servlet added by this method.
*
* @param pathSpec
* The path spec for the servlet
* @param holder
* The servlet holder
*/public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) {
addServletWithAuth(pathSpec, holder, false);
} | 3.26 |
hbase_HttpServer_defineFilter_rdh | /**
* Define a filter for a context and set up default url mappings.
*/
public static void defineFilter(ServletContextHandler handler, String name, String classname, Map<String, String> parameters, String[] urls) {
FilterHolder holder = new FilterHolder();holder.setName(name);
holder.setClassName(classname);
if (parameters != null) {
holder.setInitParameters(parameters);
}
FilterMapping fmap = new FilterMapping();
fmap.setPathSpecs(urls);
fmap.setDispatches(FilterMapping.ALL);
fmap.setFilterName(name);
handler.getServletHandler().addFilter(holder, fmap);
} | 3.26 |
hbase_HttpServer_getOrEmptyString_rdh | /**
* Extracts the value for the given key from the configuration of returns a string of zero length.
*/
private String getOrEmptyString(Configuration conf, String key) {
if (null == key) {
return EMPTY_STRING;
}final String value = conf.get(key.trim());
return null == value ? EMPTY_STRING
: value;
} | 3.26 |
hbase_HttpServer_m5_rdh | /**
* Add the path spec to the filter path mapping.
*
* @param pathSpec
* The path spec
* @param webAppCtx
* The WebApplicationContext to add to
*/protected void
m5(String pathSpec, WebAppContext webAppCtx)
{
for (String name : filterNames) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(name);
fmap.setDispatches(FilterMapping.ALL);
webAppCtx.getServletHandler().addFilterMapping(fmap);
}
} | 3.26 |
hbase_HttpServer_setAttribute_rdh | /**
* Set a value in the webapp context. These values are available to the jsp pages as
* "application.getAttribute(name)".
*
* @param name
* The name of the attribute
* @param value
* The value of the attribute
*/
public void setAttribute(String name, Object value) {
webAppContext.setAttribute(name, value);
} | 3.26 |
hbase_HttpServer_addDefaultServlets_rdh | /**
* Add default servlets.
*/
protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf) throws IOException {
// set up default servlets
addPrivilegedServlet("stacks", "/stacks", HttpServer.StackServlet.class);
addPrivilegedServlet("logLevel", "/logLevel", Servlet.class);
// While we don't expect users to have sensitive information in their configuration, they
// might. Give them an option to not expose the service configuration to all users.
if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) {
addPrivilegedServlet("conf", "/conf", ConfServlet.class);
} else {
addUnprivilegedServlet("conf", "/conf", ConfServlet.class);
}
final String asyncProfilerHome = ProfileServlet.getAsyncProfilerHome();
if ((asyncProfilerHome != null) &&
(!asyncProfilerHome.trim().isEmpty())) {
addPrivilegedServlet("prof", "/prof", ProfileServlet.class);
Path tmpDir = Paths.get(ProfileServlet.OUTPUT_DIR);
if (Files.notExists(tmpDir)) {
Files.createDirectories(tmpDir);
}
ServletContextHandler genCtx = new ServletContextHandler(contexts, "/prof-output-hbase");
genCtx.addServlet(ProfileOutputServlet.class, "/*");
genCtx.setResourceBase(tmpDir.toAbsolutePath().toString());
genCtx.setDisplayName("prof-output-hbase");
} else {
addUnprivilegedServlet("prof", "/prof", DisabledServlet.class);
LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + "not specified. Disabling /prof endpoint.");
}
/* register metrics servlets */String[] enabledServlets = conf.getStrings(METRIC_SERVLETS_CONF_KEY, METRICS_SERVLETS_DEFAULT);
for (String enabledServlet : enabledServlets) {
try {
ServletConfig servletConfig = METRIC_SERVLETS.get(enabledServlet);
if (servletConfig != null) {
Class<?> clz = Class.forName(servletConfig.getClazz());
addPrivilegedServlet(servletConfig.getName(), servletConfig.getPathSpec(), clz.asSubclass(HttpServlet.class));
}
} catch (Exception e) {
/* shouldn't be fatal, so warn the user about it */
LOG.warn("Couldn't register the servlet " + enabledServlet, e);
}
}
} | 3.26 |
hbase_HttpServer_getPort_rdh | /**
* Get the port that the server is on
*
* @return the port
* @deprecated Since 0.99.0
*/
@Deprecated
public int getPort()
{return ((ServerConnector) (webServer.getConnectors()[0])).getLocalPort();
} | 3.26 |
hbase_HttpServer_getFilterInitializers_rdh | /**
* Get an array of FilterConfiguration specified in the conf
*/
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZERS_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for (int i = 0; i < classes.length; i++) {
initializers[i] = ((FilterInitializer) (ReflectionUtils.newInstance(classes[i])));
}
return initializers;
} | 3.26 |
hbase_OnlineLogRecord_getScan_rdh | /**
* If {@value org.apache.hadoop.hbase.HConstants#SLOW_LOG_SCAN_PAYLOAD_ENABLED} is enabled then
* this value may be present and should represent the Scan that produced the given
* {@link OnlineLogRecord}
*/
public Optional<Scan> getScan() {
return scan;
} | 3.26 |
hbase_OnlineLogRecord_getBlockBytesScanned_rdh | /**
* Return the amount of block bytes scanned to retrieve the response cells.
*/
public long getBlockBytesScanned() {
return blockBytesScanned;
} | 3.26 |
hbase_OnlineLogRecord_setBlockBytesScanned_rdh | /**
* Sets the amount of block bytes scanned to retrieve the response cells.
*/
public OnlineLogRecordBuilder setBlockBytesScanned(long blockBytesScanned) { this.blockBytesScanned = blockBytesScanned;
return this;
} | 3.26 |
hbase_DoubleArrayCost_applyCostsChange_rdh | /**
* We do not want to introduce a getCosts method to let upper layer get the cost array directly,
* so here we introduce this method to take a {@link Consumer} as parameter, where we will pass
* the actual cost array in, so you can change the element of the cost array in the
* {@link Consumer} implementation.
* <p/>
* Usually, in prepare method, you need to fill all the elements of the cost array, while in
* regionMoved method, you just need to update the element for the effect region servers.
*/
void applyCostsChange(Consumer<double[]> consumer) {
consumer.accept(costs);
costsChanged =
true;
} | 3.26 |
hbase_DoubleArrayCost_getMaxSkew_rdh | /**
* Return the max deviation of distribution Compute max as if all region servers had 0 and one had
* the sum of all costs. This must be a zero sum cost for this to make sense.
*/
public static double getMaxSkew(double total, double numServers) {
if (numServers == 0) {
return 0;
}
double mean = total / numServers;
return Math.sqrt(((total - mean) * (total - mean)) + (((numServers - 1) * mean) * mean));
} | 3.26 |
hbase_DoubleArrayCost_getMinSkew_rdh | /**
* Return the min skew of distribution
*
* @param total
* is total number of regions
*/
public static double getMinSkew(double total, double numServers) {
if (numServers ==
0) {
return 0;
}
double mean = total / numServers;
// It's possible that there aren't enough regions to go around
double min;
if (numServers > total) {
min = (((numServers - total) * mean) * mean) + (((1 - mean) * (1 - mean)) * total);
} else {
// Some will have 1 more than everything else.
int numHigh = ((int) (total - (Math.floor(mean) * numServers)));
int numLow
= ((int) (numServers - numHigh));
min = ((numHigh * (Math.ceil(mean) - mean)) * (Math.ceil(mean) - mean)) + ((numLow * (mean - Math.floor(mean))) * (mean - Math.floor(mean)));
}
return Math.sqrt(min);
} | 3.26 |
hbase_ClusterConnectionFactory_createAsyncClusterConnection_rdh | /**
* Create a new {@link AsyncClusterConnection} instance to be used at server side where we have a
* {@link ConnectionRegistryEndpoint}.
*/
public static AsyncClusterConnection createAsyncClusterConnection(ConnectionRegistryEndpoint endpoint, Configuration conf, SocketAddress localAddress, User user) throws IOException {
ShortCircuitConnectionRegistry registry = new ShortCircuitConnectionRegistry(endpoint);
return createAsyncClusterConnection(conf, registry, localAddress, user);
} | 3.26 |
hbase_RestCsrfPreventionFilter_handleHttpInteraction_rdh | /**
* Handles an {@link HttpInteraction} by applying the filtering logic.
*
* @param httpInteraction
* caller's HTTP interaction
* @throws IOException
* if there is an I/O error
* @throws ServletException
* if the implementation relies on the servlet API and a servlet API call
* has failed
*/
public void handleHttpInteraction(HttpInteraction httpInteraction) throws IOException, ServletException {
if (((!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT))) || methodsToIgnore.contains(httpInteraction.getMethod())) || (httpInteraction.getHeader(headerName) != null)) {
httpInteraction.proceed();
} else {
httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing Required Header for CSRF Vulnerability Protection");}
} | 3.26 |
hbase_RestCsrfPreventionFilter_getFilterParams_rdh | /**
* Constructs a mapping of configuration properties to be used for filter initialization. The
* mapping includes all properties that start with the specified configuration prefix. Property
* names in the mapping are trimmed to remove the configuration prefix.
*
* @param conf
* configuration to read
* @param confPrefix
* configuration prefix
* @return mapping of configuration properties to be used for filter initialization
*/
public static Map<String, String> getFilterParams(Configuration conf, String confPrefix) {
Map<String, String> filterConfigMap = new HashMap<>();
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(confPrefix)) {
String value = conf.get(name);
name = name.substring(confPrefix.length()); filterConfigMap.put(name, value);
}
}
return filterConfigMap;
}
/**
* {@link HttpInteraction} | 3.26 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.