name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_HAServiceTarget_getProxy | /**
* @return a proxy to connect to the target HA Service.
* @param timeoutMs timeout in milliseconds.
* @param conf Configuration.
* @throws IOException raised on errors performing I/O.
*/
public HAServiceProtocol getProxy(Configuration conf, int timeoutMs)
throws IOException {
return getProxyForAddress(conf, timeoutMs, getAddress());
} | 3.68 |
flink_Transformation_setMaxParallelism | /**
* Sets the maximum parallelism for this stream transformation.
*
* @param maxParallelism Maximum parallelism for this stream transformation.
*/
public void setMaxParallelism(int maxParallelism) {
OperatorValidationUtils.validateMaxParallelism(maxParallelism, UPPER_BOUND_MAX_PARALLELISM);
this.maxParallelism = maxParallelism;
} | 3.68 |
hadoop_DocumentStoreFactory_createDocumentStoreReader | /**
* Creates a DocumentStoreReader for a {@link DocumentStoreVendor}.
* @param conf
* for creating client connection
* @param <Document> type of Document for which the writer has to be created,
* i.e TimelineEntityDocument, FlowActivityDocument etc
* @return document store reader
* @throws DocumentStoreNotSupportedException if there is no implementation
* for a configured {@link DocumentStoreVendor} or unknown
* {@link DocumentStoreVendor} is configured.
* @throws YarnException if the required configs for DocumentStore is missing.
* */
public static <Document extends TimelineDocument>
DocumentStoreReader<Document> createDocumentStoreReader(
Configuration conf) throws YarnException {
final DocumentStoreVendor storeType = getStoreVendor(conf);
switch (storeType) {
case COSMOS_DB:
DocumentStoreUtils.validateCosmosDBConf(conf);
return new CosmosDBDocumentStoreReader<>(conf);
default:
throw new DocumentStoreNotSupportedException(
"Unable to create DocumentStoreReader for type : "
+ storeType);
}
} | 3.68 |
flink_MultipleFuturesAvailabilityHelper_getAvailableFuture | /** @return combined future using anyOf logic */
public CompletableFuture<?> getAvailableFuture() {
return availableFuture;
} | 3.68 |
hadoop_BinaryPartitioner_setRightOffset | /**
* Set the subarray to be used for partitioning to
* <code>bytes[:(offset+1)]</code> in Python syntax.
*
* @param conf configuration object
* @param offset right Python-style offset
*/
public static void setRightOffset(Configuration conf, int offset) {
conf.setInt(RIGHT_OFFSET_PROPERTY_NAME, offset);
} | 3.68 |
hmily_HmilyDeleteStatement_getWhere | /**
* Get where.
*
* @return where segment
*/
public Optional<HmilyWhereSegment> getWhere() {
return Optional.ofNullable(where);
} | 3.68 |
flink_SingleOutputStreamOperator_setDescription | /**
* Sets the description for this operation.
*
* <p>Description is used in json plan and web ui, but not in logging and metrics where only
* name is available. Description is expected to provide detailed information about the sink,
* while name is expected to be more simple, providing summary information only, so that we can
* have more user-friendly logging messages and metric tags without losing useful messages for
* debugging.
*
* @param description The description for this operation.
* @return The operation with new description.
*/
@PublicEvolving
public SingleOutputStreamOperator<T> setDescription(String description) {
transformation.setDescription(description);
return this;
} | 3.68 |
framework_LegacyCommunicationManager_removeClientCache | /**
* Clear out client cache for the given UI. This should be called when the
* UI is detached and the cache becomes obsolete.
*
* @param uI
* the UI whose client cache should be removed
* @deprecated because this cleanup is only needed for a deprecated feature
*/
@Deprecated
private void removeClientCache(UI uI) {
Integer uiId = Integer.valueOf(uI.getUIId());
uiToClientCache.remove(uiId);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_updateInternalState | /**
* Updates DOM properties and listeners based on the current state of this
* layout and its children.
*/
private void updateInternalState() {
// Avoid updating again for the same data
int lastResponseId = getConnection().getLastSeenServerSyncId();
if (processedResponseId == lastResponseId) {
return;
}
Profiler.enter("AOLC.updateInternalState");
// Remember that everything is updated for this response
processedResponseId = lastResponseId;
hasChildrenWithRelativeHeight = false;
hasChildrenWithRelativeWidth = false;
hasChildrenWithMiddleAlignment = false;
VAbstractOrderedLayout widget = getWidget();
needsExpand = widget.vertical ? !isUndefinedHeight()
: !isUndefinedWidth();
boolean onlyZeroExpands = true;
if (needsExpand) {
for (ComponentConnector child : getChildComponents()) {
double expandRatio = getState().childData
.get(child).expandRatio;
if (expandRatio != 0) {
onlyZeroExpands = false;
break;
}
}
}
// First update bookkeeping for all children
for (ComponentConnector child : getChildComponents()) {
Slot slot = widget.getSlot(child.getWidget());
slot.setRelativeWidth(child.isRelativeWidth());
slot.setRelativeHeight(child.isRelativeHeight());
if (child.delegateCaptionHandling()) {
updateCaptionInternal(child);
}
// Update slot style names
List<String> childStyles = child.getState().styles;
if (childStyles == null) {
widget.setSlotStyleNames(child.getWidget(), (String[]) null);
} else {
widget.setSlotStyleNames(child.getWidget(),
childStyles.toArray(new String[childStyles.size()]));
}
AlignmentInfo alignment = new AlignmentInfo(
getState().childData.get(child).alignmentBitmask);
slot.setAlignment(alignment);
if (alignment.isVerticalCenter()) {
hasChildrenWithMiddleAlignment = true;
}
double expandRatio = onlyZeroExpands ? 1
: getState().childData.get(child).expandRatio;
slot.setExpandRatio(expandRatio);
if (child.isRelativeHeight()) {
hasChildrenWithRelativeHeight = true;
}
if (child.isRelativeWidth()) {
hasChildrenWithRelativeWidth = true;
}
}
if (needsFixedHeight()) {
// Add resize listener to ensure the widget itself is measured
getLayoutManager().addElementResizeListener(widget.getElement(),
childComponentResizeListener);
} else {
getLayoutManager().removeElementResizeListener(widget.getElement(),
childComponentResizeListener);
}
// Then update listeners based on bookkeeping
updateAllSlotListeners();
// Update the layout at this point to ensure it's OK even if we get no
// element resize events
updateLayoutHeight();
if (needsExpand()) {
widget.updateExpandedSizes();
// updateExpandedSizes causes fixed size components to temporarily
// lose their size. updateExpandCompensation must be delayed until
// the browser has a chance to measure them.
Scheduler.get()
.scheduleFinally(() -> widget.updateExpandCompensation());
} else {
widget.clearExpand();
}
Profiler.leave("AOLC.updateInternalState");
} | 3.68 |
hmily_JavaBeanBinder_addGetter | /**
* Add getter.
*
* @param getter the getter
*/
void addGetter(final Method getter) {
if (this.getter == null) {
this.getter = getter;
}
} | 3.68 |
morf_ViewChanges_topoSortViews | /**
* Performs a topological sort using a depth-first search algorithm and returns a sorted list of
* database views for the schema upgrade.
*
* @param allViews all of the database views bound into the target schema.
* @param index a complete index of all views in both the source and target schema.
* @return a {@link <a href="http://en.wikipedia.org/wiki/Topological_sorting">topologically sorted list</a>} of view names
*/
private List<String> topoSortViews(Collection<View> allViews, Map<String, View> index) {
if (log.isDebugEnabled()) {
log.debug("Toposorting: " + Joiner.on(", ").join(Collections2.transform(allViews, viewToName())));
}
// The set of views we want to perform the sort on.
Set<String> unmarkedViews = newHashSet(Collections2.transform(allViews, viewToName()));
Set<String> temporarilyMarkedRecords = newHashSet();
List<String> sortedList = newLinkedList();
while (!unmarkedViews.isEmpty()) {
String node = Iterables.getFirst(unmarkedViews, null);
visit(node, temporarilyMarkedRecords, sortedList, index);
unmarkedViews.remove(node);
}
return sortedList;
} | 3.68 |
flink_HadoopOutputFormatBase_close | /**
* commit the task by moving the output file out from the temporary directory.
*
* @throws java.io.IOException
*/
@Override
public void close() throws IOException {
// enforce sequential close() calls
synchronized (CLOSE_MUTEX) {
this.recordWriter.close(new HadoopDummyReporter());
if (this.outputCommitter.needsTaskCommit(this.context)) {
this.outputCommitter.commitTask(this.context);
}
}
} | 3.68 |
Activiti_ProcessEngines_registerProcessEngine | /**
* Registers the given process engine. No {@link ProcessEngineInfo} will be available for this process engine. An engine that is registered will be closed when the {@link ProcessEngines#destroy()}
* is called.
*/
public static void registerProcessEngine(ProcessEngine processEngine) {
processEngines.put(processEngine.getName(), processEngine);
} | 3.68 |
querydsl_NumberExpression_loe | /**
* Create a {@code this <= right} expression
*
* @param <A>
* @param right rhs of the comparison
* @return {@code this <= right}
* @see java.lang.Comparable#compareTo(Object)
*/
public final <A extends Number & Comparable<?>> BooleanExpression loe(Expression<A> right) {
return Expressions.booleanOperation(Ops.LOE, mixin, right);
} | 3.68 |
framework_HierarchicalQuery_getParent | /**
* Get the hierarchical parent object, where <code>null</code> corresponds
* to the root node.
*
* @return the hierarchical parent object
*/
public T getParent() {
return parent;
} | 3.68 |
flink_ThreadInfoSampleService_requestThreadInfoSamples | /**
* Returns a future that completes with a given number of thread info samples for a set of task
* threads.
*
* @param threads the map key is thread id, the map value is the ExecutionAttemptID.
* @param requestParams Parameters of the sampling request.
* @return A future containing the stack trace samples.
*/
public CompletableFuture<Map<ExecutionAttemptID, Collection<ThreadInfoSample>>>
requestThreadInfoSamples(
Map<Long, ExecutionAttemptID> threads,
final ThreadInfoSamplesRequest requestParams) {
checkNotNull(threads, "threads must not be null");
checkNotNull(requestParams, "requestParams must not be null");
CompletableFuture<Map<ExecutionAttemptID, Collection<ThreadInfoSample>>> resultFuture =
new CompletableFuture<>();
scheduledExecutor.execute(
() ->
requestThreadInfoSamples(
threads,
requestParams.getNumSamples(),
requestParams.getDelayBetweenSamples(),
requestParams.getMaxStackTraceDepth(),
CollectionUtil.newHashMapWithExpectedSize(threads.size()),
resultFuture));
return resultFuture;
} | 3.68 |
framework_VaadinFinderLocatorStrategy_splitFirstFragmentFromTheRest | /**
* Splits off the first path fragment from a path and returns an array of
* two elements, where the first element is the first path fragment and the
* second element is the rest of the path (all remaining path fragments
* untouched).
*
* @param path
* The path to split.
* @return An array of two elements: The first path fragment and the rest of
* the path.
*/
private String[] splitFirstFragmentFromTheRest(String path) {
int ixOfSlash = LocatorUtil.indexOfIgnoringQuoted(path, '/');
if (ixOfSlash > 0) {
return new String[] { path.substring(0, ixOfSlash),
path.substring(ixOfSlash) };
}
return new String[] { path };
} | 3.68 |
flink_StateConfigUtil_createTtlConfig | /**
* Creates a {@link StateTtlConfig} depends on retentionTime parameter.
*
* @param retentionTime State ttl time which unit is MILLISECONDS.
*/
public static StateTtlConfig createTtlConfig(long retentionTime) {
if (retentionTime > 0) {
return StateTtlConfig.newBuilder(Time.milliseconds(retentionTime))
.setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
.setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
.build();
} else {
return StateTtlConfig.DISABLED;
}
} | 3.68 |
flink_MapValue_hashCode | /*
* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 47;
int result = 1;
result = prime * result + this.map.hashCode();
return result;
} | 3.68 |
framework_VAbstractSplitPanel_onMouseDown | /**
* Handle initiating content resize and moving of the split position when
* clicking the splitter with a mouse. If the click targets any other
* element, the split position is locked, or this split panel is not
* enabled, nothing is done.
*
* @param event
* the browser event
*/
public void onMouseDown(Event event) {
if (locked || !isEnabled()) {
return;
}
final Element trg = event.getEventTarget().cast();
if (trg == splitter || trg == DOM.getChild(splitter, 0)) {
startResize();
resizing = true;
DOM.setCapture(getElement());
origX = splitter.getPropertyInt("offsetLeft");
origY = splitter.getPropertyInt("offsetTop");
origMouseX = WidgetUtil.getTouchOrMouseClientX(event);
origMouseY = WidgetUtil.getTouchOrMouseClientY(event);
event.stopPropagation();
event.preventDefault();
}
} | 3.68 |
flink_DagConnection_getDataExchangeMode | /**
* Gets the data exchange mode to use for this connection.
*
* @return The data exchange mode to use for this connection.
*/
public ExecutionMode getDataExchangeMode() {
if (dataExchangeMode == null) {
throw new IllegalStateException(
"This connection does not have the data exchange mode set");
}
return dataExchangeMode;
} | 3.68 |
pulsar_ResourceUnitRanking_getLoadedBundles | /**
* Get the loaded bundles.
*/
public Set<String> getLoadedBundles() {
return loadedBundles;
} | 3.68 |
flink_MemoryManager_computeMemorySize | /**
* Computes the memory size corresponding to the fraction of all memory governed by this
* MemoryManager.
*
* @param fraction The fraction of all memory governed by this MemoryManager
* @return The memory size corresponding to the memory fraction
*/
public long computeMemorySize(double fraction) {
validateFraction(fraction);
return (long) Math.floor(memoryBudget.getTotalMemorySize() * fraction);
} | 3.68 |
hudi_HoodieRealtimeRecordReaderUtils_generateProjectionSchema | /**
* Generate a reader schema off the provided writeSchema, to just project out the provided columns.
*/
public static Schema generateProjectionSchema(Schema writeSchema, Map<String, Schema.Field> schemaFieldsMap,
List<String> fieldNames) {
/**
* Avro & Presto field names seems to be case sensitive (support fields differing only in case) whereas
* Hive/Impala/SparkSQL(default) are case-insensitive. Spark allows this to be configurable using
* spark.sql.caseSensitive=true
*
* For a RT table setup with no delta-files (for a latest file-slice) -> we translate parquet schema to Avro Here
* the field-name case is dependent on parquet schema. Hive (1.x/2.x/CDH) translate column projections to
* lower-cases
*
*/
List<Schema.Field> projectedFields = new ArrayList<>();
for (String fn : fieldNames) {
Schema.Field field = schemaFieldsMap.get(fn.toLowerCase());
if (field == null) {
throw new HoodieException("Field " + fn + " not found in log schema. Query cannot proceed! "
+ "Derived Schema Fields: " + new ArrayList<>(schemaFieldsMap.keySet()));
} else {
projectedFields.add(new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal()));
}
}
Schema projectedSchema = Schema.createRecord(writeSchema.getName(), writeSchema.getDoc(),
writeSchema.getNamespace(), writeSchema.isError());
projectedSchema.setFields(projectedFields);
return projectedSchema;
} | 3.68 |
pulsar_Reflections_loadClass | /**
* Load class to resolve array types.
*
* @param className class name
* @param classLoader class loader
* @return loaded class
* @throws ClassNotFoundException
*/
public static Class loadClass(String className, ClassLoader classLoader) throws ClassNotFoundException {
if (className.length() == 1) {
char type = className.charAt(0);
if (type == 'B') {
return Byte.TYPE;
} else if (type == 'C') {
return Character.TYPE;
} else if (type == 'D') {
return Double.TYPE;
} else if (type == 'F') {
return Float.TYPE;
} else if (type == 'I') {
return Integer.TYPE;
} else if (type == 'J') {
return Long.TYPE;
} else if (type == 'S') {
return Short.TYPE;
} else if (type == 'Z') {
return Boolean.TYPE;
} else if (type == 'V') {
return Void.TYPE;
} else {
throw new ClassNotFoundException(className);
}
} else if (isPrimitive(className)) {
return (Class) PRIMITIVE_NAME_TYPE_MAP.get(className);
} else if (className.charAt(0) == 'L' && className.charAt(className.length() - 1) == ';') {
return classLoader.loadClass(className.substring(1, className.length() - 1));
} else {
try {
return classLoader.loadClass(className);
} catch (ClassNotFoundException | NoClassDefFoundError var4) {
if (className.charAt(0) != '[') {
throw var4;
} else {
// CHECKSTYLE.OFF: EmptyStatement
int arrayDimension;
for (arrayDimension = 0; className.charAt(arrayDimension) == '['; ++arrayDimension) {
}
// CHECKSTYLE.ON: EmptyStatement
Class componentType = loadClass(className.substring(arrayDimension), classLoader);
return Array.newInstance(componentType, new int[arrayDimension]).getClass();
}
}
}
} | 3.68 |
hbase_AbstractFSWALProvider_parseServerNameFromWALName | /**
* Parse the server name from wal prefix. A wal's name is always started with a server name in non
* test code.
* @throws IllegalArgumentException if the name passed in is not started with a server name
* @return the server name
*/
public static ServerName parseServerNameFromWALName(String name) {
String decoded;
try {
decoded = URLDecoder.decode(name, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new AssertionError("should never happen", e);
}
Matcher matcher = SERVER_NAME_PATTERN.matcher(decoded);
if (matcher.find()) {
return ServerName.valueOf(matcher.group());
} else {
throw new IllegalArgumentException(name + " is not started with a server name");
}
} | 3.68 |
flink_ProjectOperator_projectTuple14 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>
ProjectOperator<
T,
Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
tType =
new TupleTypeInfo<
Tuple14<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13>>(fTypes);
return new ProjectOperator<
T, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
pulsar_LoadSimulationController_handleStream | // Handle the command line arguments associated with the stream command.
private void handleStream(final ShellArguments arguments) throws Exception {
final List<String> commandArguments = arguments.commandArguments;
// Stream accepts 1 application argument: ZooKeeper connect string.
if (checkAppArgs(commandArguments.size() - 1, 1)) {
final String zkConnectString = commandArguments.get(1);
final ZooKeeper zkClient = new ZooKeeper(zkConnectString, 5000, null);
new BrokerWatcher(zkClient, arguments);
// This controller will now stream rate changes from the given ZK.
// Users wishing to stop this should Ctrl + C and use another
// Controller to send new commands.
while (true) {}
}
} | 3.68 |
hadoop_PlanningAlgorithm_allocateUser | /**
* Performs the actual allocation for a ReservationDefinition within a Plan.
*
* @param reservationId the identifier of the reservation
* @param user the user who owns the reservation
* @param plan the Plan to which the reservation must be fitted
* @param contract encapsulates the resources required by the user for his
* session
* @param oldReservation the existing reservation (null if none)
* @return whether the allocateUser function was successful or not
*
* @throws PlanningException if the session cannot be fitted into the plan
* @throws ContractValidationException if validation fails
*/
protected boolean allocateUser(ReservationId reservationId, String user,
Plan plan, ReservationDefinition contract,
ReservationAllocation oldReservation) throws PlanningException,
ContractValidationException {
// Adjust the ResourceDefinition to account for system "imperfections"
// (e.g., scheduling delays for large containers).
ReservationDefinition adjustedContract = adjustContract(plan, contract);
// Compute the job allocation
RLESparseResourceAllocation allocation =
computeJobAllocation(plan, reservationId, adjustedContract, user);
long period = Long.parseLong(contract.getRecurrenceExpression());
// Make allocation periodic if request is periodic
if (contract.getRecurrenceExpression() != null) {
if (period > 0) {
allocation =
new PeriodicRLESparseResourceAllocation(allocation, period);
}
}
// If no job allocation was found, fail
if (allocation == null) {
throw new PlanningException(
"The planning algorithm could not find a valid allocation"
+ " for your request");
}
// Translate the allocation to a map (with zero paddings)
long step = plan.getStep();
long jobArrival = stepRoundUp(adjustedContract.getArrival(), step);
long jobDeadline = stepRoundUp(adjustedContract.getDeadline(), step);
Map<ReservationInterval, Resource> mapAllocations =
allocationsToPaddedMap(allocation, jobArrival, jobDeadline, period);
// Create the reservation
ReservationAllocation capReservation =
new InMemoryReservationAllocation(reservationId, // ID
adjustedContract, // Contract
user, // User name
plan.getQueueName(), // Queue name
adjustedContract.getArrival(), adjustedContract.getDeadline(),
mapAllocations, // Allocations
plan.getResourceCalculator(), // Resource calculator
plan.getMinimumAllocation()); // Minimum allocation
// Add (or update) the reservation allocation
if (oldReservation != null) {
return plan.updateReservation(capReservation);
} else {
return plan.addReservation(capReservation, false);
}
} | 3.68 |
hibernate-validator_REGONValidator_getWeights | /**
* @param digits a list of digits to be verified. They are used to determine a size of REGON number - is it 9 or 14 digit number
*
* @return an array of weights to be used to calculate a checksum
*/
@Override
protected int[] getWeights(List<Integer> digits) {
if ( digits.size() == 8 ) {
return WEIGHTS_REGON_9;
}
else if ( digits.size() == 13 ) {
return WEIGHTS_REGON_14;
}
else {
return new int[] { };
}
} | 3.68 |
flink_CalciteParser_parse | /**
* Parses a SQL statement into a {@link SqlNode}. The {@link SqlNode} is not yet validated.
*
* @param sql a sql string to parse
* @return a parsed sql node
* @throws SqlParserException if an exception is thrown when parsing the statement
* @throws SqlParserEOFException if the statement is incomplete
*/
public SqlNode parse(String sql) {
try {
SqlParser parser = SqlParser.create(sql, config);
return parser.parseStmt();
} catch (SqlParseException e) {
if (e.getMessage().contains("Encountered \"<EOF>\"")) {
throw new SqlParserEOFException(e.getMessage(), e);
}
throw new SqlParserException("SQL parse failed. " + e.getMessage(), e);
}
} | 3.68 |
hbase_StoreUtils_getLargestFile | /**
* Gets the largest file (with reader) out of the list of files.
* @param candidates The files to choose from.
* @return The largest file; null if no file has a reader.
*/
static Optional<HStoreFile> getLargestFile(Collection<HStoreFile> candidates) {
return candidates.stream().filter(f -> f.getReader() != null)
.max((f1, f2) -> Long.compare(f1.getReader().length(), f2.getReader().length()));
} | 3.68 |
hibernate-validator_AnnotationApiHelper_getAnnotationValueOrDefault | /**
* Returns the annotation value of the given annotation mirror with the
* given name or its default value if it was not specified.
*
* @param annotationMirror An annotation mirror.
* @param name The name of the annotation value of interest.
*
* @return The annotation value with the given name or it's default value or null, if one of the
* input values is null or if no value with the given name exists
* within the given annotation mirror.
*/
public AnnotationValue getAnnotationValueOrDefault(AnnotationMirror annotationMirror, String name) {
if ( annotationMirror == null || name == null ) {
return null;
}
Map<? extends ExecutableElement, ? extends AnnotationValue> elementValues = elementUtils.getElementValuesWithDefaults( annotationMirror );
for ( Entry<? extends ExecutableElement, ? extends AnnotationValue> oneElementValue : elementValues.entrySet() ) {
if ( oneElementValue.getKey().getSimpleName().contentEquals( name ) ) {
return oneElementValue.getValue();
}
}
return null;
} | 3.68 |
flink_ResolvedSchema_getColumnCount | /** Returns the number of {@link Column}s of this schema. */
public int getColumnCount() {
return columns.size();
} | 3.68 |
dubbo_MeshRuleRouter_getMeshRuleCache | /**
* for ut only
*/
@Deprecated
public MeshRuleCache<T> getMeshRuleCache() {
return meshRuleCache;
} | 3.68 |
hmily_HmilyRepositoryStorage_removeHmilyTransaction | /**
* Remove hmily transaction.
*
* @param hmilyTransaction the hmily transaction
*/
public static void removeHmilyTransaction(final HmilyTransaction hmilyTransaction) {
if (Objects.nonNull(hmilyTransaction)) {
PUBLISHER.asyncPublishEvent(hmilyTransaction, EventTypeEnum.REMOVE_HMILY_TRANSACTION.getCode());
}
} | 3.68 |
dubbo_ClassSourceScanner_scopeModelInitializer | /**
* Beans that need to be injected in advance in different ScopeModels.
* For example, the RouterSnapshotSwitcher that needs to be injected when ClusterScopeModelInitializer executes initializeFrameworkModel
* @return Beans that need to be injected in advance
*/
public List<Class<?>> scopeModelInitializer() {
List<Class<?>> classes = new ArrayList<>();
classes.addAll(FrameworkModel.defaultModel().getBeanFactory().getRegisteredClasses());
classes.addAll(FrameworkModel.defaultModel()
.defaultApplication()
.getBeanFactory()
.getRegisteredClasses());
classes.addAll(FrameworkModel.defaultModel()
.defaultApplication()
.getDefaultModule()
.getBeanFactory()
.getRegisteredClasses());
return classes.stream().distinct().collect(Collectors.toList());
} | 3.68 |
framework_VTabsheet_getPreviousTabKey | /**
* Returns the key code of the keyboard shortcut that focuses the previous
* tab in a focused tabsheet.
*
* @return the key to move focus to the previous tab
*/
protected int getPreviousTabKey() {
return KeyCodes.KEY_LEFT;
} | 3.68 |
hadoop_OBSDataBlocks_innerClose | /**
* The close operation will delete the destination file if it still exists.
*/
@Override
protected void innerClose() {
final DestState state = getState();
LOG.debug("Closing {}", this);
switch (state) {
case Writing:
if (bufferFile.exists()) {
// file was not uploaded
LOG.debug(
"Block[{}]: Deleting buffer file as upload "
+ "did not start",
getIndex());
closeBlock();
}
break;
case Upload:
LOG.debug(
"Block[{}]: Buffer file {} exists close upload stream",
getIndex(), bufferFile);
break;
case Closed:
closeBlock();
break;
default:
// this state can never be reached, but checkstyle
// complains, so it is here.
}
} | 3.68 |
flink_Execution_deploy | /**
* Deploys the execution to the previously assigned resource.
*
* @throws JobException if the execution cannot be deployed to the assigned resource
*/
public void deploy() throws JobException {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
checkNotNull(
slot,
"In order to deploy the execution we first have to assign a resource via tryAssignResource.");
// Check if the TaskManager died in the meantime
// This only speeds up the response to TaskManagers failing concurrently to deployments.
// The more general check is the rpcTimeout of the deployment call
if (!slot.isAlive()) {
throw new JobException("Target slot (TaskManager) for deployment is no longer alive.");
}
// make sure exactly one deployment call happens from the correct state
ExecutionState previous = this.state;
if (previous == SCHEDULED) {
if (!transitionState(previous, DEPLOYING)) {
// race condition, someone else beat us to the deploying call.
// this should actually not happen and indicates a race somewhere else
throw new IllegalStateException(
"Cannot deploy task: Concurrent deployment call race.");
}
} else {
// vertex may have been cancelled, or it was already scheduled
throw new IllegalStateException(
"The vertex must be in SCHEDULED state to be deployed. Found state "
+ previous);
}
if (this != slot.getPayload()) {
throw new IllegalStateException(
String.format(
"The execution %s has not been assigned to the assigned slot.", this));
}
try {
// race double check, did we fail/cancel and do we need to release the slot?
if (this.state != DEPLOYING) {
slot.releaseSlot(
new FlinkException(
"Actual state of execution "
+ this
+ " ("
+ state
+ ") does not match expected state DEPLOYING."));
return;
}
LOG.info(
"Deploying {} (attempt #{}) with attempt id {} and vertex id {} to {} with allocation id {}",
vertex.getTaskNameWithSubtaskIndex(),
getAttemptNumber(),
attemptId,
vertex.getID(),
getAssignedResourceLocation(),
slot.getAllocationId());
final TaskDeploymentDescriptor deployment =
vertex.getExecutionGraphAccessor()
.getTaskDeploymentDescriptorFactory()
.createDeploymentDescriptor(
this,
slot.getAllocationId(),
taskRestore,
producedPartitions.values());
// null taskRestore to let it be GC'ed
taskRestore = null;
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
vertex.getExecutionGraphAccessor().getJobMasterMainThreadExecutor();
getVertex().notifyPendingDeployment(this);
// We run the submission in the future executor so that the serialization of large TDDs
// does not block
// the main thread and sync back to the main thread once submission is completed.
CompletableFuture.supplyAsync(
() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor)
.thenCompose(Function.identity())
.whenCompleteAsync(
(ack, failure) -> {
if (failure == null) {
vertex.notifyCompletedDeployment(this);
} else {
final Throwable actualFailure =
ExceptionUtils.stripCompletionException(failure);
if (actualFailure instanceof TimeoutException) {
String taskname =
vertex.getTaskNameWithSubtaskIndex()
+ " ("
+ attemptId
+ ')';
markFailed(
new Exception(
"Cannot deploy task "
+ taskname
+ " - TaskManager ("
+ getAssignedResourceLocation()
+ ") not responding after a rpcTimeout of "
+ rpcTimeout,
actualFailure));
} else {
markFailed(actualFailure);
}
}
},
jobMasterMainThreadExecutor);
} catch (Throwable t) {
markFailed(t);
}
} | 3.68 |
hadoop_EncryptionSecretOperations_getSSEAwsKMSKey | /**
* Gets the SSE-KMS key if present, else let S3 use AWS managed key.
*
* @param secrets source of the encryption secrets.
* @return an optional key to attach to a request.
*/
public static Optional<String> getSSEAwsKMSKey(final EncryptionSecrets secrets) {
if ((secrets.getEncryptionMethod() == S3AEncryptionMethods.SSE_KMS
|| secrets.getEncryptionMethod() == S3AEncryptionMethods.DSSE_KMS)
&& secrets.hasEncryptionKey()) {
return Optional.of(secrets.getEncryptionKey());
} else {
return Optional.empty();
}
} | 3.68 |
hadoop_AbfsDtFetcher_addDelegationTokens | /**
* Returns Token object via FileSystem, null if bad argument.
* @param conf - a Configuration object used with FileSystem.get()
* @param creds - a Credentials object to which token(s) will be added
* @param renewer - the renewer to send with the token request
* @param url - the URL to which the request is sent
* @return a Token, or null if fetch fails.
*/
public Token<?> addDelegationTokens(Configuration conf,
Credentials creds,
String renewer,
String url) throws Exception {
if (!url.startsWith(getServiceName().toString())) {
url = getServiceName().toString() + "://" + url;
}
FileSystem fs = FileSystem.get(URI.create(url), conf);
Token<?> token = fs.getDelegationToken(renewer);
if (token == null) {
throw new IOException(FETCH_FAILED + ": " + url);
}
creds.addToken(token.getService(), token);
return token;
} | 3.68 |
morf_SqlDialect_repairAutoNumberStartPosition | /**
* Make sure the table provided has its next autonum value set to at least the value specified in the column metadata.
*
* <p>Generally databases do not need to do anything special here, but MySQL can lose the value.</p>
*
* @param table The table to repair.
* @param executor The executor to use
* @param connection The connection to use
*/
@SuppressWarnings("unused")
public void repairAutoNumberStartPosition(Table table, SqlScriptExecutor executor, Connection connection) {
} | 3.68 |
hadoop_NativeRuntime_releaseNativeObject | /**
* destroy native object We use to destroy native handlers
*/
public synchronized static void releaseNativeObject(long addr) {
assertNativeLibraryLoaded();
JNIReleaseNativeObject(addr);
} | 3.68 |
framework_GAEVaadinServlet_cleanDatastore | /**
* This will look at the timestamp and delete expired persisted Vaadin and
* appengine sessions from the datastore.
*
* TODO Possible improvements include: 1. Use transactions (requires entity
* groups - overkill?) 2. Delete one-at-a-time, catch possible exception,
* continue w/ next.
*/
private void cleanDatastore() {
long expire = new Date().getTime();
try {
DatastoreService ds = DatastoreServiceFactory.getDatastoreService();
// Vaadin stuff first
{
Query q = new Query(AC_BASE);
q.setKeysOnly();
q.addFilter(PROPERTY_EXPIRES, FilterOperator.LESS_THAN_OR_EQUAL,
expire);
PreparedQuery pq = ds.prepare(q);
List<Entity> entities = pq
.asList(Builder.withLimit(CLEANUP_LIMIT));
if (entities != null) {
getLogger().log(Level.INFO,
"Vaadin cleanup deleting {0} expired Vaadin sessions.",
entities.size());
List<Key> keys = new ArrayList<Key>();
for (Entity e : entities) {
keys.add(e.getKey());
}
ds.delete(keys);
}
}
// Also cleanup GAE sessions
{
Query q = new Query(APPENGINE_SESSION_KIND);
q.setKeysOnly();
q.addFilter(PROPERTY_APPENGINE_EXPIRES,
FilterOperator.LESS_THAN_OR_EQUAL, expire);
PreparedQuery pq = ds.prepare(q);
List<Entity> entities = pq
.asList(Builder.withLimit(CLEANUP_LIMIT));
if (entities != null) {
getLogger().log(Level.INFO,
"Vaadin cleanup deleting {0} expired appengine sessions.",
entities.size());
List<Key> keys = new ArrayList<Key>();
for (Entity e : entities) {
keys.add(e.getKey());
}
ds.delete(keys);
}
}
} catch (Exception e) {
getLogger().log(Level.WARNING, "Exception while cleaning.", e);
}
} | 3.68 |
flink_MultipleFuturesAvailabilityHelper_anyOf | /**
* Combine {@code availabilityFuture} using anyOf logic with other previously registered
* futures.
*/
public void anyOf(final int idx, CompletableFuture<?> availabilityFuture) {
if (futuresToCombine[idx] == null || futuresToCombine[idx].isDone()) {
futuresToCombine[idx] = availabilityFuture;
assertNoException(availabilityFuture.thenRun(this::notifyCompletion));
}
} | 3.68 |
hbase_UserMetrics_getRequestCount | /**
* Returns the number of write requests and read requests and coprocessor service requests made by
* the user
*/
default long getRequestCount() {
return getReadRequestCount() + getWriteRequestCount();
} | 3.68 |
dubbo_AbstractServerCall_onHeader | // stream listener start
@Override
public void onHeader(Map<String, Object> requestMetadata) {
this.requestMetadata = requestMetadata;
if (serviceDescriptor == null) {
responseErr(TriRpcStatus.UNIMPLEMENTED.withDescription("Service not found:" + serviceName));
return;
}
startCall();
} | 3.68 |
hudi_IncrSourceHelper_getMissingCheckpointStrategy | /**
* Determine the policy to choose if a checkpoint is missing (detected by the absence of a beginInstant),
* during a run of a {@link HoodieIncrSource}.
*
* @param props the usual Hudi props object
* @return
*/
public static MissingCheckpointStrategy getMissingCheckpointStrategy(TypedProperties props) {
boolean readLatestOnMissingCkpt = getBooleanWithAltKeys(props, READ_LATEST_INSTANT_ON_MISSING_CKPT);
if (readLatestOnMissingCkpt) {
return MissingCheckpointStrategy.READ_LATEST;
}
if (containsConfigProperty(props, MISSING_CHECKPOINT_STRATEGY)) {
return MissingCheckpointStrategy.valueOf(getStringWithAltKeys(props, MISSING_CHECKPOINT_STRATEGY));
}
return null;
} | 3.68 |
flink_MemorySize_getKibiBytes | /** Gets the memory size in Kibibytes (= 1024 bytes). */
public long getKibiBytes() {
return bytes >> 10;
} | 3.68 |
hbase_DisableTableProcedure_holdLock | // For disabling a table, we does not care whether a region can be online so hold the table xlock
// for ever. This will simplify the logic as we will not be conflict with procedures other than
// SCP.
@Override
protected boolean holdLock(MasterProcedureEnv env) {
return true;
} | 3.68 |
hbase_ShutdownHook_main | /**
* Main to test basic functionality. Run with clean hadoop 0.20 and hadoop 0.21 and cloudera
* patched hadoop to make sure our shutdown hook handling works for all compbinations. Pass
* '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks.
*/
public static void main(final String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
String prop = System.getProperty(RUN_SHUTDOWN_HOOK);
if (prop != null) {
conf.setBoolean(RUN_SHUTDOWN_HOOK, Boolean.parseBoolean(prop));
}
// Instantiate a FileSystem. This will register the fs shutdown hook.
FileSystem fs = FileSystem.get(conf);
Thread donothing = new DoNothingThread();
donothing.start();
ShutdownHook.install(conf, fs, new DoNothingStoppable(), donothing);
} | 3.68 |
flink_MemorySegment_getBoolean | /**
* Reads one byte at the given position and returns its boolean representation.
*
* @param index The position from which the memory will be read.
* @return The boolean value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 1.
*/
public boolean getBoolean(int index) {
return get(index) != 0;
} | 3.68 |
framework_AccordionTabIds_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
Accordion accordion = new Accordion();
final Tab firstTab = accordion.addTab(new Label(FIRST_TAB_MESSAGE));
firstTab.setId(FIRST_TAB_ID);
Button setIdButton = new Button("Set id");
setIdButton.addClickListener(event -> firstTab.setId(FIRST_TAB_ID));
Button clearIdButton = new Button("Clear id");
clearIdButton.addClickListener(event -> firstTab.setId(null));
addComponents(setIdButton, clearIdButton, accordion);
} | 3.68 |
flink_AllWindowedStream_sum | /**
* Applies an aggregation that sums every window of the pojo data stream at the given field for
* every window.
*
* <p>A field expression is either the name of a public field or a getter method with
* parentheses of the stream's underlying type. A dot can be used to drill down into objects, as
* in {@code "field1.getInnerField2()" }.
*
* @param field The field to sum
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> sum(String field) {
return aggregate(new SumAggregator<>(field, input.getType(), input.getExecutionConfig()));
} | 3.68 |
hbase_HBaseTestingUtility_getConfiguration | /**
* Returns this classes's instance of {@link Configuration}. Be careful how you use the returned
* Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed
* by the Configuration. If say, a Connection was being used against a cluster that had been
* shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome.
* Rather than use the return direct, its usually best to make a copy and use that. Do
* <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
* @return Instance of Configuration.
*/
@Override
public Configuration getConfiguration() {
return super.getConfiguration();
} | 3.68 |
hmily_HmilyTccTransactionExecutor_preTry | /**
* transaction preTry.
*
* @param point cut point.
* @return TccTransaction hmily transaction
*/
public HmilyTransaction preTry(final ProceedingJoinPoint point) {
LogUtil.debug(LOGGER, () -> "......hmily tcc transaction starter....");
//build tccTransaction
HmilyTransaction hmilyTransaction = createHmilyTransaction();
HmilyRepositoryStorage.createHmilyTransaction(hmilyTransaction);
HmilyParticipant hmilyParticipant = buildHmilyParticipant(point, null, null, HmilyRoleEnum.START.getCode(), hmilyTransaction.getTransId());
HmilyRepositoryStorage.createHmilyParticipant(hmilyParticipant);
hmilyTransaction.registerParticipant(hmilyParticipant);
//save tccTransaction in threadLocal
HmilyTransactionHolder.getInstance().set(hmilyTransaction);
//set TccTransactionContext this context transfer remote
HmilyTransactionContext context = new HmilyTransactionContext();
//set action is try
context.setAction(HmilyActionEnum.TRYING.getCode());
context.setTransId(hmilyTransaction.getTransId());
context.setRole(HmilyRoleEnum.START.getCode());
context.setTransType(TransTypeEnum.TCC.name());
HmilyContextHolder.set(context);
return hmilyTransaction;
} | 3.68 |
hbase_MemStoreCompactor_createSubstitution | /**
* ---------------------------------------------------------------------- Creation of the
* ImmutableSegment either by merge or copy-compact of the segments of the pipeline, based on the
* Compactor Iterator. The new ImmutableSegment is returned.
*/
private ImmutableSegment createSubstitution(MemStoreCompactionStrategy.Action action)
throws IOException {
ImmutableSegment result = null;
MemStoreSegmentsIterator iterator = null;
List<ImmutableSegment> segments = versionedList.getStoreSegments();
for (ImmutableSegment s : segments) {
s.waitForUpdates(); // to ensure all updates preceding s in-memory flush have completed.
// we skip empty segment when create MemStoreSegmentsIterator following.
}
switch (action) {
case COMPACT:
iterator = new MemStoreCompactorSegmentsIterator(segments,
compactingMemStore.getComparator(), compactionKVMax, compactingMemStore.getStore());
result = SegmentFactory.instance().createImmutableSegmentByCompaction(
compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator,
versionedList.getNumOfCells(), compactingMemStore.getIndexType(), action);
iterator.close();
break;
case MERGE:
case MERGE_COUNT_UNIQUE_KEYS:
iterator = new MemStoreMergerSegmentsIterator(segments, compactingMemStore.getComparator(),
compactionKVMax);
result = SegmentFactory.instance().createImmutableSegmentByMerge(
compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator,
versionedList.getNumOfCells(), segments, compactingMemStore.getIndexType(), action);
iterator.close();
break;
default:
throw new RuntimeException("Unknown action " + action); // sanity check
}
return result;
} | 3.68 |
pulsar_FieldParser_stringToLong | /**
* Converts String to Long.
*
* @param val
* The String to be converted.
* @return The converted Long value.
*/
public static Long stringToLong(String val) {
return Long.valueOf(trim(val));
} | 3.68 |
pulsar_KerberosName_getDefaultRealm | /**
* Get the configured default realm.
* @return the default realm from the krb5.conf
*/
public String getDefaultRealm() {
return defaultRealm;
} | 3.68 |
hadoop_SubApplicationRowKey_parseRowKey | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey byte representation of row key.
* @return An <cite>SubApplicationRowKey</cite> object.
*/
public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
return new SubApplicationRowKeyConverter().decode(rowKey);
} | 3.68 |
flink_OptimizerNode_isBranching | /**
* Checks whether this node has branching output. A node's output is branched, if it has more
* than one output connection.
*
* @return True, if the node's output branches. False otherwise.
*/
public boolean isBranching() {
return getOutgoingConnections() != null && getOutgoingConnections().size() > 1;
} | 3.68 |
flink_TableChange_getNewType | /** Get the column type for the new column. */
public DataType getNewType() {
return newColumn.getDataType();
} | 3.68 |
flink_RichSqlInsert_getTargetTableID | /** Returns the target table identifier. */
public SqlNode getTargetTableID() {
return targetTableID;
} | 3.68 |
flink_FactoryUtil_validate | /** Validates the options of the factory. It checks for unconsumed option keys. */
public void validate() {
validateFactoryOptions(factory, allOptions);
validateUnconsumedKeys(
factory.factoryIdentifier(),
allOptions.keySet(),
consumedOptionKeys,
deprecatedOptionKeys);
validateWatermarkOptions(factory.factoryIdentifier(), allOptions);
} | 3.68 |
hbase_MultithreadedTableMapper_setNumberOfThreads | /**
* Set the number of threads in the pool for running maps.
* @param job the job to modify
* @param threads the new number of threads
*/
public static void setNumberOfThreads(Job job, int threads) {
job.getConfiguration().setInt(NUMBER_OF_THREADS, threads);
} | 3.68 |
hadoop_TFile_seekTo | /**
* Move the cursor to the new location. The entry returned by the previous
* entry() call will be invalid.
*
* @param l
* new cursor location. It must fall between the begin and end
* location of the scanner.
* @throws IOException
*/
private void seekTo(Location l) throws IOException {
if (l.compareTo(beginLocation) < 0) {
throw new IllegalArgumentException(
"Attempt to seek before the begin location.");
}
if (l.compareTo(endLocation) > 0) {
throw new IllegalArgumentException(
"Attempt to seek after the end location.");
}
if (l.compareTo(endLocation) == 0) {
parkCursorAtEnd();
return;
}
if (l.getBlockIndex() != currentLocation.getBlockIndex()) {
// going to a totally different block
initBlock(l.getBlockIndex());
} else {
if (valueChecked) {
// may temporarily go beyond the last record in the block (in which
// case the next if loop will always be true).
inBlockAdvance(1);
}
if (l.getRecordIndex() < currentLocation.getRecordIndex()) {
initBlock(l.getBlockIndex());
}
}
inBlockAdvance(l.getRecordIndex() - currentLocation.getRecordIndex());
return;
} | 3.68 |
hbase_HFileBlockIndex_getNumRootEntries | /** Returns how many block index entries there are in the root level */
public final int getNumRootEntries() {
return rootChunk.getNumEntries();
} | 3.68 |
hbase_OnlineLogRecord_getBlockBytesScanned | /**
* Return the amount of block bytes scanned to retrieve the response cells.
*/
public long getBlockBytesScanned() {
return blockBytesScanned;
} | 3.68 |
graphhopper_EdgeBasedTarjanSCC_findComponents | /**
* Runs Tarjan's algorithm using an explicit stack.
*
* @param edgeTransitionFilter Only edge transitions accepted by this filter will be considered when we explore the graph.
* If a turn is not accepted the corresponding path will be ignored (edges that are only connected
* by a path with such a turn will not be considered to belong to the same component)
* @param excludeSingleEdgeComponents if set to true components that only contain a single edge will not be
* returned when calling {@link #findComponents} or {@link #findComponentsRecursive()},
* which can be useful to save some memory.
*/
public static ConnectedComponents findComponents(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponents();
} | 3.68 |
framework_AbstractMedia_getSources | /**
* @return The sources pointed to in this media.
*/
public List<Resource> getSources() {
List<Resource> sources = new ArrayList<>();
for (URLReference ref : getState(false).sources) {
sources.add(((ResourceReference) ref).getResource());
}
return sources;
} | 3.68 |
flink_PythonShellParser_printError | /**
* Prints the error message and help for the client.
*
* @param msg error message
*/
private static void printError(String msg) {
System.err.println(msg);
System.err.println(
"Valid cluster type are \"local\", \"remote <hostname> <portnumber>\", \"yarn\".");
System.err.println();
System.err.println("Specify the help option (-h or --help) to get help on the command.");
} | 3.68 |
hbase_HRegionFileSystem_writeRegionInfoFileContent | /**
* Write the .regioninfo file on-disk.
* <p/>
* Overwrites if exists already.
*/
private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
final Path regionInfoFile, final byte[] content) throws IOException {
// First check to get the permissions
FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// Write the RegionInfo file content
try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null)) {
out.write(content);
}
} | 3.68 |
flink_KeyMap_putIfAbsent | /**
* Inserts a value for the given key, if no value is yet contained for that key. Otherwise,
* returns the value currently contained for the key.
*
* <p>The value that is inserted in case that the key is not contained, yet, is lazily created
* using the given factory.
*
* @param key The key to insert.
* @param factory The factory that produces the value, if no value is contained, yet, for the
* key.
* @return The value in the map after this operation (either the previously contained value, or
* the newly created value).
* @throws java.lang.NullPointerException Thrown, if the key is null.
*/
public final V putIfAbsent(K key, LazyFactory<V> factory) {
final int hash = hash(key);
final int slot = indexOf(hash);
// search the chain from the slot
for (Entry<K, V> entry = table[slot]; entry != null; entry = entry.next) {
if (entry.hashCode == hash && entry.key.equals(key)) {
// found match
return entry.value;
}
}
// no match, insert a new value
V value = factory.create();
insertNewEntry(hash, key, value, slot);
// return the created value
return value;
} | 3.68 |
AreaShop_ResoldRegionEvent_getFromPlayer | /**
* Get the player that the region has been bought from.
* @return The UUID of the player that the region has been bought from
*/
public UUID getFromPlayer() {
return from;
} | 3.68 |
hbase_ExportSnapshot_getOutputPath | /**
* Returns the location where the inputPath will be copied.
*/
private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException {
Path path = null;
switch (inputInfo.getType()) {
case HFILE:
Path inputPath = new Path(inputInfo.getHfile());
String family = inputPath.getParent().getName();
TableName table = HFileLink.getReferencedTableName(inputPath.getName());
String region = HFileLink.getReferencedRegionName(inputPath.getName());
String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
path = new Path(CommonFSUtils.getTableDir(new Path("./"), table),
new Path(region, new Path(family, hfile)));
break;
case WAL:
LOG.warn("snapshot does not keeps WALs: " + inputInfo);
break;
default:
throw new IOException("Invalid File Type: " + inputInfo.getType().toString());
}
return new Path(outputArchive, path);
} | 3.68 |
hbase_SnapshotSegmentScanner_getScannerOrder | /**
* @see KeyValueScanner#getScannerOrder()
*/
@Override
public long getScannerOrder() {
return 0;
} | 3.68 |
hudi_SparkMain_upgradeOrDowngradeTable | /**
* Upgrade or downgrade table.
*
* @param jsc instance of {@link JavaSparkContext} to use.
* @param basePath base path of the dataset.
* @param toVersion version to which upgrade/downgrade to be done.
* @return 0 if success, else -1.
* @throws Exception
*/
protected static int upgradeOrDowngradeTable(JavaSparkContext jsc, String basePath, String toVersion) {
HoodieWriteConfig config = getWriteConfig(basePath, Boolean.parseBoolean(HoodieWriteConfig.ROLLBACK_USING_MARKERS_ENABLE.defaultValue()),
false);
HoodieTableMetaClient metaClient =
HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(config.getBasePath())
.setLoadActiveTimelineOnLoad(false).setConsistencyGuardConfig(config.getConsistencyGuardConfig())
.setLayoutVersion(Option.of(new TimelineLayoutVersion(config.getTimelineLayoutVersion())))
.setFileSystemRetryConfig(config.getFileSystemRetryConfig()).build();
HoodieWriteConfig updatedConfig = HoodieWriteConfig.newBuilder().withProps(config.getProps())
.forTable(metaClient.getTableConfig().getTableName()).build();
try {
new UpgradeDowngrade(metaClient, updatedConfig, new HoodieSparkEngineContext(jsc), SparkUpgradeDowngradeHelper.getInstance())
.run(HoodieTableVersion.valueOf(toVersion), null);
LOG.info(String.format("Table at \"%s\" upgraded / downgraded to version \"%s\".", basePath, toVersion));
return 0;
} catch (Exception e) {
LOG.warn(String.format("Failed: Could not upgrade/downgrade table at \"%s\" to version \"%s\".", basePath, toVersion), e);
return -1;
}
} | 3.68 |
morf_AbstractSqlDialectTest_testIsNull | /**
* Test that IsNull functionality behaves as expected.
*/
@Test
@SuppressWarnings("deprecation")
public void testIsNull() {
String result = testDialect.getSqlFrom(isnull(new FieldLiteral("A"), new FieldLiteral("B")));
assertEquals(expectedIsNull(), result);
} | 3.68 |
flink_StreamExecutionEnvironment_enableCheckpointing | /**
* Enables checkpointing for the streaming job. The distributed state of the streaming dataflow
* will be periodically snapshotted. In case of a failure, the streaming dataflow will be
* restarted from the latest completed checkpoint. This method selects {@link
* CheckpointingMode#EXACTLY_ONCE} guarantees.
*
* <p>The job draws checkpoints periodically, in the default interval. The state will be stored
* in the configured state backend.
*
* <p>NOTE: Checkpointing iterative streaming dataflows is not properly supported at the moment.
* For that reason, iterative jobs will not be started if used with enabled checkpointing. To
* override this mechanism, use the {@link #enableCheckpointing(long, CheckpointingMode,
* boolean)} method.
*
* @deprecated Use {@link #enableCheckpointing(long)} instead.
*/
@Deprecated
@PublicEvolving
public StreamExecutionEnvironment enableCheckpointing() {
checkpointCfg.setCheckpointInterval(500);
return this;
} | 3.68 |
open-banking-gateway_IgnoreFieldsLoaderFactory_createIgnoreFieldsLoader | /**
* Creates ignore rules for a given protocol
* @param protocolId Protocol to load ignore rules for
* @return Field code to Ignore Rule loader
*/
public FieldsToIgnoreLoader createIgnoreFieldsLoader(Long protocolId) {
if (null == protocolId) {
return new NoopFieldsToIgnoreLoader();
}
return new FieldsToIgnoreLoaderImpl(protocolId, ignoreValidationRuleRepository);
} | 3.68 |
hbase_AbstractRpcClient_configureHBaseRpcController | /**
* Configure an hbase rpccontroller
* @param controller to configure
* @param channelOperationTimeout timeout for operation
* @return configured controller
*/
static HBaseRpcController configureHBaseRpcController(RpcController controller,
int channelOperationTimeout) {
HBaseRpcController hrc;
if (controller != null && controller instanceof HBaseRpcController) {
hrc = (HBaseRpcController) controller;
if (!hrc.hasCallTimeout()) {
hrc.setCallTimeout(channelOperationTimeout);
}
} else {
hrc = new HBaseRpcControllerImpl();
hrc.setCallTimeout(channelOperationTimeout);
}
return hrc;
} | 3.68 |
framework_StaticSection_getCellState | /**
* Returns the shared state of this cell.
*
* @return the cell state
*/
protected CellState getCellState() {
return cellState;
} | 3.68 |
flink_PythonDependencyUtils_addPythonFile | /**
* Adds a Python dependency which could be .py files, Python packages(.zip, .egg etc.) or
* local directories. The dependencies will be added to the PYTHONPATH of the Python UDF
* worker and the local Py4J python client.
*
* @param filePath The path of the Python dependency.
*/
private void addPythonFile(Configuration pythonDependencyConfig, String filePath) {
Preconditions.checkNotNull(filePath);
String fileKey = generateUniqueFileKey(PYTHON_FILE_PREFIX, filePath);
registerCachedFileIfNotExist(filePath, fileKey);
if (!pythonDependencyConfig.contains(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)) {
pythonDependencyConfig.set(
PYTHON_FILES_DISTRIBUTED_CACHE_INFO, new LinkedHashMap<>());
}
pythonDependencyConfig
.get(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)
.put(fileKey, new File(filePath).getName());
} | 3.68 |
hbase_CellBlockBuilder_buildCellBlock | /**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
* been flipped and is ready for reading. Use limit to find total size.
*/
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner) throws IOException {
ByteBufferOutputStreamSupplier supplier = new ByteBufferOutputStreamSupplier();
if (buildCellBlock(codec, compressor, cellScanner, supplier)) {
ByteBuffer bb = supplier.baos.getByteBuffer();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
return bb.hasRemaining() ? bb : null;
} else {
return null;
}
} | 3.68 |
Activiti_Tree_bind | /**
* Create a bindings.
* @param fnMapper the function mapper to use
* @param varMapper the variable mapper to use
* @param converter custom type converter
* @return tree bindings
*/
public Bindings bind(
FunctionMapper fnMapper,
VariableMapper varMapper,
TypeConverter converter
) {
Method[] methods = null;
if (!functions.isEmpty()) {
if (fnMapper == null) {
throw new ELException(
LocalMessages.get("error.function.nomapper")
);
}
methods = new Method[functions.size()];
for (int i = 0; i < functions.size(); i++) {
FunctionNode node = functions.get(i);
String image = node.getName();
Method method = null;
int colon = image.indexOf(':');
if (colon < 0) {
method = fnMapper.resolveFunction("", image);
} else {
method =
fnMapper.resolveFunction(
image.substring(0, colon),
image.substring(colon + 1)
);
}
if (method == null) {
throw new ELException(
LocalMessages.get("error.function.notfound", image)
);
}
if (node.isVarArgs() && method.isVarArgs()) {
if (
method.getParameterTypes().length >
node.getParamCount() +
1
) {
throw new ELException(
LocalMessages.get("error.function.params", image)
);
}
} else {
if (
method.getParameterTypes().length !=
node.getParamCount()
) {
throw new ELException(
LocalMessages.get("error.function.params", image)
);
}
}
methods[node.getIndex()] = method;
}
}
ValueExpression[] expressions = null;
if (identifiers.size() > 0) {
expressions = new ValueExpression[identifiers.size()];
for (int i = 0; i < identifiers.size(); i++) {
IdentifierNode node = identifiers.get(i);
ValueExpression expression = null;
if (varMapper != null) {
expression = varMapper.resolveVariable(node.getName());
}
expressions[node.getIndex()] = expression;
}
}
return new Bindings(methods, expressions, converter);
} | 3.68 |
hbase_TableSnapshotInputFormatImpl_calculateLocationsForInputSplit | /**
* Compute block locations for snapshot files (which will get the locations for referred hfiles)
* only when localityEnabled is true.
*/
private static List<String> calculateLocationsForInputSplit(Configuration conf,
TableDescriptor htd, RegionInfo hri, Path tableDir) throws IOException {
return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
} | 3.68 |
flink_BinaryStringDataUtil_toBoolean | /** Parse a {@link StringData} to boolean. */
public static boolean toBoolean(BinaryStringData str) throws TableException {
BinaryStringData lowerCase = str.toLowerCase();
if (TRUE_STRINGS.contains(lowerCase)) {
return true;
}
if (FALSE_STRINGS.contains(lowerCase)) {
return false;
}
throw new TableException("Cannot parse '" + str + "' as BOOLEAN.");
} | 3.68 |
flink_ChangelogStateFactory_getExistingState | /**
* @param name state name
* @param type state type (the only supported type currently are: {@link
* StateMetaInfoSnapshot.BackendStateType#KEY_VALUE key value}, {@link
* StateMetaInfoSnapshot.BackendStateType#PRIORITY_QUEUE priority queue})
* @return an existing state, i.e. the one that was already created. The returned state will not
* apply TTL to the passed values, regardless of the TTL settings. This prevents double
* applying of TTL (recovered values are TTL values if TTL was enabled). The state will,
* however, use TTL serializer if TTL is enabled. WARN: only valid during the recovery.
* @throws UnsupportedOperationException if state type is not supported
*/
public ChangelogState getExistingState(String name, StateMetaInfoSnapshot.BackendStateType type)
throws UnsupportedOperationException {
ChangelogState state;
switch (type) {
case KEY_VALUE:
state = changelogStates.get(name);
break;
case PRIORITY_QUEUE:
state = priorityQueueStatesByName.get(name);
break;
default:
throw new UnsupportedOperationException(
String.format("Unknown state type %s (%s)", type, name));
}
return state;
} | 3.68 |
framework_BeanUtil_getPropertyDescriptors | // Workaround for Java6 bug JDK-6788525. Do nothing for JDK7+.
private static List<PropertyDescriptor> getPropertyDescriptors(
BeanInfo beanInfo) {
PropertyDescriptor[] descriptors = beanInfo.getPropertyDescriptors();
List<PropertyDescriptor> result = new ArrayList<>(descriptors.length);
for (PropertyDescriptor descriptor : descriptors) {
try {
Method readMethod = getMethodFromBridge(
descriptor.getReadMethod());
if (readMethod != null) {
Method writeMethod = getMethodFromBridge(
descriptor.getWriteMethod(),
readMethod.getReturnType());
if (writeMethod == null) {
writeMethod = descriptor.getWriteMethod();
}
PropertyDescriptor descr = new PropertyDescriptor(
descriptor.getName(), readMethod, writeMethod);
result.add(descr);
} else {
result.add(descriptor);
}
} catch (SecurityException ignore) {
// handle next descriptor
} catch (IntrospectionException e) {
result.add(descriptor);
}
}
return result;
} | 3.68 |
flink_AbstractBytesMultiMap_updateValuePointer | /** Update the content from specific offset. */
private void updateValuePointer(RandomAccessInputView view, int newPointer, int ptrOffset)
throws IOException {
view.setReadPosition(ptrOffset);
int currPosInSeg = view.getCurrentPositionInSegment();
view.getCurrentSegment().putInt(currPosInSeg, newPointer);
} | 3.68 |
framework_AbstractComponent_setDebugId | /**
* @deprecated As of 7.0. Use {@link #setId(String)}
*/
@Deprecated
public void setDebugId(String id) {
setId(id);
} | 3.68 |
hadoop_JobMonitor_add | /**
* Add a running job's status to the polling queue.
*/
public void add(JobStats job) throws InterruptedException {
runningJobs.put(job);
} | 3.68 |
flink_Catalog_bulkGetPartitionStatistics | /**
* Get a list of statistics of given partitions.
*
* @param tablePath path of the table
* @param partitionSpecs partition specs of partitions that will be used to filter out all other
* unrelated statistics, i.e. the statistics fetch will be limited within the given
* partitions
* @return list of statistics of given partitions
* @throws PartitionNotExistException if one partition does not exist
* @throws CatalogException in case of any runtime exception
*/
default List<CatalogTableStatistics> bulkGetPartitionStatistics(
ObjectPath tablePath, List<CatalogPartitionSpec> partitionSpecs)
throws PartitionNotExistException, CatalogException {
checkNotNull(partitionSpecs, "partitionSpecs cannot be null");
List<CatalogTableStatistics> result = new ArrayList<>(partitionSpecs.size());
for (CatalogPartitionSpec partitionSpec : partitionSpecs) {
result.add(getPartitionStatistics(tablePath, partitionSpec));
}
return result;
} | 3.68 |
graphhopper_OSMValueExtractor_stringToKmh | /**
* @return the speed in km/h
*/
public static double stringToKmh(String str) {
if (Helper.isEmpty(str))
return Double.NaN;
if ("walk".equals(str))
return 6;
// on some German autobahns and a very few other places
if ("none".equals(str))
return MaxSpeed.UNLIMITED_SIGN_SPEED;
int mpInteger = str.indexOf("mp");
int knotInteger = str.indexOf("knots");
int kmInteger = str.indexOf("km");
int kphInteger = str.indexOf("kph");
double factor;
if (mpInteger > 0) {
str = str.substring(0, mpInteger).trim();
factor = DistanceCalcEarth.KM_MILE;
} else if (knotInteger > 0) {
str = str.substring(0, knotInteger).trim();
factor = 1.852; // see https://en.wikipedia.org/wiki/Knot_%28unit%29#Definitions
} else {
if (kmInteger > 0) {
str = str.substring(0, kmInteger).trim();
} else if (kphInteger > 0) {
str = str.substring(0, kphInteger).trim();
}
factor = 1;
}
double value;
try {
value = Integer.parseInt(str) * factor;
} catch (Exception ex) {
return Double.NaN;
}
if (value <= 0) {
return Double.NaN;
}
return value;
} | 3.68 |
hadoop_RBFMetrics_getField | /**
* Fetches the value for a field name.
*
* @param fieldName the legacy name of the field.
* @return The field data or null if not found.
*/
private static Object getField(BaseRecord record, String fieldName) {
Object result = null;
Method m = locateGetter(record, fieldName);
if (m != null) {
try {
result = m.invoke(record);
} catch (Exception e) {
LOG.error("Cannot get field {} on {}", fieldName, record);
}
}
return result;
} | 3.68 |
dubbo_HashedWheelTimer_waitForNextTick | /**
* calculate goal nanoTime from startTime and current tick number,
* then wait until that goal has been reached.
*
* @return Long.MIN_VALUE if received a shutdown request,
* current time otherwise (with Long.MIN_VALUE changed by +1)
*/
private long waitForNextTick() {
long deadline = tickDuration * (tick + 1);
for (; ; ) {
final long currentTime = System.nanoTime() - startTime;
long sleepTimeMs = (deadline - currentTime + 999999) / 1000000;
if (sleepTimeMs <= 0) {
if (currentTime == Long.MIN_VALUE) {
return -Long.MAX_VALUE;
} else {
return currentTime;
}
}
if (isWindows()) {
sleepTimeMs = sleepTimeMs / 10 * 10;
}
try {
Thread.sleep(sleepTimeMs);
} catch (InterruptedException ignored) {
if (WORKER_STATE_UPDATER.get(HashedWheelTimer.this) == WORKER_STATE_SHUTDOWN) {
return Long.MIN_VALUE;
}
}
}
} | 3.68 |
hudi_KeyGenerator_getRecordKeyFieldNames | /**
* Used during bootstrap, to project out only the record key fields from bootstrap source dataset.
*
* @return list of field names, when concatenated make up the record key.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public List<String> getRecordKeyFieldNames() {
throw new UnsupportedOperationException("Bootstrap not supported for key generator. "
+ "Please override this method in your custom key generator.");
} | 3.68 |
morf_InsertStatement_fields | /**
* Specifies the fields to insert into the table.
*
* <p>
* NOTE: This method should not be used in conjunction with {@link #values}.
* </p>
*
* @param destinationFields the fields to insert into the database table
* @return a statement with the changes applied.
*/
public InsertStatement fields(Iterable<? extends AliasedFieldBuilder> destinationFields) {
return copyOnWriteOrMutate(
b -> b.fields(destinationFields),
() -> {
if (fromTable != null) {
throw new UnsupportedOperationException("Cannot specify both a source table and a list of fields");
}
this.fields.addAll(Builder.Helper.<AliasedField>buildAll(destinationFields));
}
);
} | 3.68 |
hudi_HoodieTimeline_maxInstant | /**
* Returns the greater of the given two instants.
*/
static String maxInstant(String instant1, String instant2) {
return compareTimestamps(instant1, GREATER_THAN, instant2) ? instant1 : instant2;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.