name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_FederationPolicyInitializationContext_getFederationStateStoreFacade | /**
* Getter for the {@link FederationStateStoreFacade}.
*
* @return the facade.
*/
public FederationStateStoreFacade getFederationStateStoreFacade() {
return federationStateStoreFacade;
} | 3.68 |
hbase_HBaseServerBase_getWALFileSystem | /** Returns Return the walFs. */
public FileSystem getWALFileSystem() {
return walFs;
} | 3.68 |
querydsl_ExpressionUtils_eq | /**
* Create a {@code left == right} expression
*
* @param <D> type of expressions
* @param left lhs of expression
* @param right rhs of expression
* @return left == right
*/
public static <D> Predicate eq(Expression<D> left, Expression<? extends D> right) {
return predicate(Ops.EQ, left, right);
} | 3.68 |
hadoop_ParsedTaskAttempt_putCounters | /** Set the task attempt counters */
public void putCounters(Map<String, Long> counters) {
this.countersMap = counters;
} | 3.68 |
hudi_OptionsResolver_isSpecificStartCommit | /**
* Returns whether the read start commit is specific commit timestamp.
*/
public static boolean isSpecificStartCommit(Configuration conf) {
return conf.getOptional(FlinkOptions.READ_START_COMMIT).isPresent()
&& !conf.get(FlinkOptions.READ_START_COMMIT).equalsIgnoreCase(FlinkOptions.START_COMMIT_EARLIEST);
} | 3.68 |
flink_TwoInputUdfOperator_setSemanticProperties | /**
* Sets the semantic properties for the user-defined function (UDF). The semantic properties
* define how fields of tuples and other objects are modified or preserved through this UDF. The
* configured properties can be retrieved via {@link UdfOperator#getSemanticProperties()}.
*
* @param properties The semantic properties for the UDF.
* @see UdfOperator#getSemanticProperties()
*/
@Internal
public void setSemanticProperties(DualInputSemanticProperties properties) {
this.udfSemantics = properties;
this.analyzedUdfSemantics = false;
} | 3.68 |
flink_JoinOperator_projectTuple14 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>
ProjectJoin<
I1,
I2,
Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
tType =
new TupleTypeInfo<
Tuple14<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13>>(fTypes);
return new ProjectJoin<
I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
flink_FactoryUtil_validateUnconsumedKeys | /** Validates unconsumed option keys. */
public static void validateUnconsumedKeys(
String factoryIdentifier, Set<String> allOptionKeys, Set<String> consumedOptionKeys) {
validateUnconsumedKeys(
factoryIdentifier, allOptionKeys, consumedOptionKeys, Collections.emptySet());
} | 3.68 |
framework_VAbstractOrderedLayout_setSlotStyleNames | /**
* Sets the slot's style names. The style names will be prefixed with the
* v-slot prefix.
*
* @param widget
* the widget whose slot to style
* @param stylenames
* The style names of the slot.
*/
public void setSlotStyleNames(Widget widget, String... stylenames) {
Slot slot = getSlot(widget);
if (slot == null) {
throw new IllegalArgumentException(
"A slot for the widget could not be found. Has the widget been added to the layout?");
}
slot.setStyleNames(stylenames);
} | 3.68 |
flink_MessageSerializer_deserializeHeader | /**
* De-serializes the header and returns the {@link MessageType}.
*
* <pre>
* <b>The buffer is expected to be at the header position.</b>
* </pre>
*
* @param buf The {@link ByteBuf} containing the serialized header.
* @return The message type.
* @throws IllegalStateException If unexpected message version or message type.
*/
public static MessageType deserializeHeader(final ByteBuf buf) {
// checking the version
int version = buf.readInt();
Preconditions.checkState(
version == VERSION,
"Version Mismatch: Found " + version + ", Expected: " + VERSION + '.');
// fetching the message type
int msgType = buf.readInt();
MessageType[] values = MessageType.values();
Preconditions.checkState(
msgType >= 0 && msgType < values.length,
"Illegal message type with index " + msgType + '.');
return values[msgType];
} | 3.68 |
flink_NetworkBufferAllocator_allocateUnPooledNetworkBuffer | /**
* Allocates an un-pooled network buffer with the specific size.
*
* @param size The requested buffer size.
* @param dataType The data type this buffer represents.
* @return The un-pooled network buffer.
*/
Buffer allocateUnPooledNetworkBuffer(int size, Buffer.DataType dataType) {
checkArgument(size > 0, "Illegal buffer size, must be positive.");
byte[] byteArray = new byte[size];
MemorySegment memSeg = MemorySegmentFactory.wrap(byteArray);
return new NetworkBuffer(memSeg, FreeingBufferRecycler.INSTANCE, dataType);
} | 3.68 |
framework_Sort_then | /**
* Continue building a Sort order. The provided column is sorted in
* specified order if the previously added columns have been evaluated as
* equals.
*
* @param column
* a grid column object reference
* @param direction
* indicator of sort direction - either ascending or descending
* @return a sort instance, typed to the grid data type
*/
public Sort then(Grid.Column<?, ?> column, SortDirection direction) {
return new Sort(this, column, direction);
} | 3.68 |
dubbo_SingleRouterChain_buildRouterSnapshot | /**
* Build each router's result
*/
public RouterSnapshotNode<T> buildRouterSnapshot(
URL url, BitList<Invoker<T>> availableInvokers, Invocation invocation) {
BitList<Invoker<T>> resultInvokers = availableInvokers.clone();
RouterSnapshotNode<T> parentNode = new RouterSnapshotNode<T>("Parent", resultInvokers.clone());
parentNode.setNodeOutputInvokers(resultInvokers.clone());
// 1. route state router
Holder<RouterSnapshotNode<T>> nodeHolder = new Holder<>();
nodeHolder.set(parentNode);
resultInvokers = headStateRouter.route(resultInvokers, url, invocation, true, nodeHolder);
// result is empty, log out
if (routers.isEmpty() || (resultInvokers.isEmpty() && shouldFailFast)) {
parentNode.setChainOutputInvokers(resultInvokers.clone());
return parentNode;
}
RouterSnapshotNode<T> commonRouterNode = new RouterSnapshotNode<T>("CommonRouter", resultInvokers.clone());
parentNode.appendNode(commonRouterNode);
List<Invoker<T>> commonRouterResult = resultInvokers;
// 2. route common router
for (Router router : routers) {
// Copy resultInvokers to a arrayList. BitList not support
List<Invoker<T>> inputInvokers = new ArrayList<>(commonRouterResult);
RouterSnapshotNode<T> currentNode =
new RouterSnapshotNode<T>(router.getClass().getSimpleName(), inputInvokers);
// append to router node chain
commonRouterNode.appendNode(currentNode);
commonRouterNode = currentNode;
RouterResult<Invoker<T>> routeStateResult = router.route(inputInvokers, url, invocation, true);
List<Invoker<T>> routeResult = routeStateResult.getResult();
String routerMessage = routeStateResult.getMessage();
currentNode.setNodeOutputInvokers(routeResult);
currentNode.setRouterMessage(routerMessage);
commonRouterResult = routeResult;
// result is empty, log out
if (CollectionUtils.isEmpty(routeResult) && shouldFailFast) {
break;
}
if (!routeStateResult.isNeedContinueRoute()) {
break;
}
}
commonRouterNode.setChainOutputInvokers(commonRouterNode.getNodeOutputInvokers());
// 3. set router chain output reverse
RouterSnapshotNode<T> currentNode = commonRouterNode;
while (currentNode != null) {
RouterSnapshotNode<T> parent = currentNode.getParentNode();
if (parent != null) {
// common router only has one child invoke
parent.setChainOutputInvokers(currentNode.getChainOutputInvokers());
}
currentNode = parent;
}
return parentNode;
} | 3.68 |
hbase_RpcServer_setCurrentCall | /**
* Used by {@link org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore}. Set the
* rpc call back after mutate region.
*/
public static void setCurrentCall(RpcCall rpcCall) {
CurCall.set(rpcCall);
} | 3.68 |
hadoop_RequestFactoryImpl_newPutObjectRequestBuilder | /**
* Create a putObject request.
* Adds the ACL, storage class and metadata
* @param key key of object
* @param options options for the request, including headers
* @param length length of object to be uploaded
* @param isDirectoryMarker true if object to be uploaded is a directory marker
* @return the request builder
*/
@Override
public PutObjectRequest.Builder newPutObjectRequestBuilder(String key,
final PutObjectOptions options,
long length,
boolean isDirectoryMarker) {
Preconditions.checkArgument(isNotEmpty(key), "Null/empty key");
PutObjectRequest.Builder putObjectRequestBuilder =
buildPutObjectRequest(length, isDirectoryMarker);
putObjectRequestBuilder.bucket(getBucket()).key(key);
if (options != null) {
putObjectRequestBuilder.metadata(options.getHeaders());
}
putEncryptionParameters(putObjectRequestBuilder);
if (storageClass != null) {
putObjectRequestBuilder.storageClass(storageClass);
}
return prepareRequest(putObjectRequestBuilder);
} | 3.68 |
hbase_Cluster_isEmpty | /** Returns true if no locations have been added, false otherwise */
public boolean isEmpty() {
return nodes.isEmpty();
} | 3.68 |
AreaShop_AreaShop_getWorldGuardHandler | /**
* Function to get WorldGuardInterface for version dependent things.
* @return WorldGuardInterface
*/
public WorldGuardInterface getWorldGuardHandler() {
return this.worldGuardInterface;
} | 3.68 |
hbase_TransitRegionStateProcedure_setTimeoutFailure | /**
* At end of timeout, wake ourselves up so we run again.
*/
@Override
protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) {
setState(ProcedureProtos.ProcedureState.RUNNABLE);
env.getProcedureScheduler().addFront(this);
return false; // 'false' means that this procedure handled the timeout
} | 3.68 |
hadoop_S3AMultipartUploader_parsePartHandlePayload | /**
* Parse the payload marshalled as a part handle.
* @param data handle data
* @return the length and etag
* @throws IOException error reading the payload
*/
@VisibleForTesting
static PartHandlePayload parsePartHandlePayload(
final byte[] data)
throws IOException {
try (DataInputStream input =
new DataInputStream(new ByteArrayInputStream(data))) {
final String header = input.readUTF();
if (!HEADER.equals(header)) {
throw new IOException("Wrong header string: \"" + header + "\"");
}
final String path = input.readUTF();
final String uploadId = input.readUTF();
final int partNumber = input.readInt();
final long len = input.readLong();
final String etag = input.readUTF();
if (len < 0) {
throw new IOException("Negative length");
}
return new PartHandlePayload(path, uploadId, partNumber, len, etag);
}
} | 3.68 |
querydsl_GroupBy_groupBy | /**
* Create a new GroupByBuilder for the given key expressions
*
* @param keys keys for aggregation
* @return builder for further specification
*/
public static GroupByBuilder<List<?>> groupBy(Expression<?>... keys) {
return new GroupByBuilder<List<?>>(Projections.list(keys));
} | 3.68 |
graphhopper_FindMinMax_checkLMConstraints | /**
* This method throws an exception when this CustomModel would decrease the edge weight compared to the specified
* baseModel as in such a case the optimality of A* with landmarks can no longer be guaranteed (as the preparation
* is based on baseModel).
*/
public static void checkLMConstraints(CustomModel baseModel, CustomModel queryModel, EncodedValueLookup lookup) {
if (queryModel.isInternal())
throw new IllegalArgumentException("CustomModel of query cannot be internal");
if (queryModel.getDistanceInfluence() != null) {
double bmDI = baseModel.getDistanceInfluence() == null ? 0 : baseModel.getDistanceInfluence();
if (queryModel.getDistanceInfluence() < bmDI)
throw new IllegalArgumentException("CustomModel in query can only use distance_influence bigger or equal to "
+ bmDI + ", but was: " + queryModel.getDistanceInfluence());
}
checkMultiplyValue(queryModel.getPriority(), lookup);
checkMultiplyValue(queryModel.getSpeed(), lookup);
} | 3.68 |
shardingsphere-elasticjob_JobFacade_registerJobCompleted | /**
* Register job completed.
*
* @param shardingContexts sharding contexts
*/
public void registerJobCompleted(final ShardingContexts shardingContexts) {
executionService.registerJobCompleted(shardingContexts);
if (configService.load(true).isFailover()) {
failoverService.updateFailoverComplete(shardingContexts.getShardingItemParameters().keySet());
}
} | 3.68 |
AreaShop_RentRegion_unRent | /**
* Unrent a region, reset to unrented.
* @param giveMoneyBack true if money should be given back to the player, false otherwise
* @param executor The CommandSender that should get the cancelled message if there is any, or null
* @return true if unrenting succeeded, othwerwise false
*/
@SuppressWarnings("deprecation")
public boolean unRent(boolean giveMoneyBack, CommandSender executor) {
boolean own = executor instanceof Player && this.isRenter((Player) executor);
if(executor != null) {
if(!executor.hasPermission("areashop.unrent") && !own) {
message(executor, "unrent-noPermissionOther");
return false;
}
if(!executor.hasPermission("areashop.unrent") && !executor.hasPermission("areashop.unrentown") && own) {
message(executor, "unrent-noPermission");
return false;
}
}
if(plugin.getEconomy() == null) {
return false;
}
// Broadcast and check event
UnrentingRegionEvent event = new UnrentingRegionEvent(this);
Bukkit.getPluginManager().callEvent(event);
if(event.isCancelled()) {
message(executor, "general-cancelled", event.getReason());
return false;
}
// Do a payback
double moneyBack = getMoneyBackAmount();
if(moneyBack > 0 && giveMoneyBack) {
boolean noPayBack = false;
OfflinePlayer landlordPlayer = null;
if(getLandlord() != null) {
landlordPlayer = Bukkit.getOfflinePlayer(getLandlord());
}
String landlordName = getLandlordName();
EconomyResponse r;
if(landlordName != null) {
if(landlordPlayer != null && landlordPlayer.getName() != null) {
r = plugin.getEconomy().withdrawPlayer(landlordPlayer, getWorldName(), moneyBack);
} else {
r = plugin.getEconomy().withdrawPlayer(landlordName, getWorldName(), moneyBack);
}
if(r == null || !r.transactionSuccess()) {
noPayBack = true;
}
}
// Give back the money
OfflinePlayer player = Bukkit.getOfflinePlayer(getRenter());
if(player != null && !noPayBack) {
r = null;
boolean error = false;
try {
if(player.getName() != null) {
r = plugin.getEconomy().depositPlayer(player, getWorldName(), moneyBack);
} else if(getPlayerName() != null) {
r = plugin.getEconomy().depositPlayer(getPlayerName(), getWorldName(), moneyBack);
}
} catch(Exception e) {
error = true;
}
if(error || r == null || !r.transactionSuccess()) {
AreaShop.warn("Something went wrong with paying back to " + getPlayerName() + " money while unrenting region " + getName());
}
}
}
// Handle schematic save/restore (while %uuid% is still available)
handleSchematicEvent(RegionEvent.UNRENTED);
// Send message: before actual removal of the renter so that it is still available for variables
message(executor, "unrent-unrented");
// Remove friends, the owner and renteduntil values
getFriendsFeature().clearFriends();
UUID oldRenter = getRenter();
setRenter(null);
setRentedUntil(null);
setTimesExtended(-1);
removeLastActiveTime();
// Notify about updates
this.notifyAndUpdate(new UnrentedRegionEvent(this, oldRenter, Math.max(0, moneyBack)));
return true;
} | 3.68 |
framework_Form_getFooter | /**
* Returns a layout that is rendered below normal form contents. This area
* can be used for example to include buttons related to form contents.
*
* @return layout rendered below normal form contents or null if no footer
* is used
*/
public Layout getFooter() {
return (Layout) getState(false).footer;
} | 3.68 |
framework_AbstractProperty_addReadOnlyStatusChangeListener | /**
* Registers a new read-only status change listener for this Property.
*
* @param listener
* the new Listener to be registered.
*/
@Override
public void addReadOnlyStatusChangeListener(
Property.ReadOnlyStatusChangeListener listener) {
if (readOnlyStatusChangeListeners == null) {
readOnlyStatusChangeListeners = new LinkedList<ReadOnlyStatusChangeListener>();
}
readOnlyStatusChangeListeners.add(listener);
} | 3.68 |
hbase_StochasticLoadBalancer_composeAttributeName | /**
* A helper function to compose the attribute name from tablename and costfunction name
*/
static String composeAttributeName(String tableName, String costFunctionName) {
return tableName + TABLE_FUNCTION_SEP + costFunctionName;
} | 3.68 |
framework_VComboBox_updateStyleNames | /**
* Updates style names in suggestion popup to help theme building.
*
* @param componentState
* shared state of the combo box
*/
public void updateStyleNames(AbstractComponentState componentState) {
debug("VComboBox.SP: updateStyleNames()");
setStyleName(
VComboBox.this.getStylePrimaryName() + "-suggestpopup");
menu.setStyleName(
VComboBox.this.getStylePrimaryName() + "-suggestmenu");
status.setClassName(
VComboBox.this.getStylePrimaryName() + "-status");
if (ComponentStateUtil.hasStyles(componentState)) {
for (String style : componentState.styles) {
if (!"".equals(style)) {
addStyleDependentName(style);
}
}
}
} | 3.68 |
hudi_ExpressionPredicates_bindValueLiteral | /**
* Binds value literal to create a column predicate.
*
* @param valueLiteral The value literal to negate.
* @return A column predicate.
*/
public ColumnPredicate bindValueLiteral(ValueLiteralExpression valueLiteral) {
Object literalObject = getValueFromLiteral(valueLiteral);
// validate that literal is serializable
if (literalObject instanceof Serializable) {
this.literal = (Serializable) literalObject;
} else {
LOG.warn("Encountered a non-serializable literal. " + "Cannot push predicate with value literal [{}] into FileInputFormat. " + "This is a bug and should be reported.", valueLiteral);
this.literal = null;
}
return this;
} | 3.68 |
hadoop_CredentialProviderListFactory_createAWSV2CredentialProvider | /**
* Create an AWS v2 credential provider from its class by using reflection.
* @param conf configuration
* @param className credential class name
* @param uri URI of the FS
* @param key configuration key to use
* @return the instantiated class
* @throws IOException on any instantiation failure.
* @see S3AUtils#getInstanceFromReflection
*/
private static AwsCredentialsProvider createAWSV2CredentialProvider(Configuration conf,
String className,
@Nullable URI uri, final String key) throws IOException {
LOG.debug("Credential provider class is {}", className);
return S3AUtils.getInstanceFromReflection(className, conf, uri, AwsCredentialsProvider.class,
"create", key);
} | 3.68 |
hadoop_RegexMountPoint_resolve | /**
* Get resolved path from regex mount points.
* E.g. link: ^/user/(?<username>\\w+) => s3://$user.apache.com/_${user}
* srcPath: is /user/hadoop/dir1
* resolveLastComponent: true
* then return value is s3://hadoop.apache.com/_hadoop
* @param srcPath - the src path to resolve
* @param resolveLastComponent - whether resolve the path after last `/`
* @return mapped path of the mount point.
*/
public InodeTree.ResolveResult<T> resolve(final String srcPath,
final boolean resolveLastComponent) {
String pathStrToResolve = getPathToResolve(srcPath, resolveLastComponent);
for (RegexMountPointInterceptor interceptor : interceptorList) {
pathStrToResolve = interceptor.interceptSource(pathStrToResolve);
}
LOGGER.debug("Path to resolve:" + pathStrToResolve + ", srcPattern:"
+ getSrcPathRegex());
Matcher srcMatcher = getSrcPattern().matcher(pathStrToResolve);
String parsedDestPath = getDstPath();
int mappedCount = 0;
String resolvedPathStr = "";
while (srcMatcher.find()) {
resolvedPathStr = pathStrToResolve.substring(0, srcMatcher.end());
Map<String, Set<String>> varMap = getVarInDestPathMap();
for (Map.Entry<String, Set<String>> entry : varMap.entrySet()) {
String regexGroupNameOrIndexStr = entry.getKey();
Set<String> groupRepresentationStrSetInDest = entry.getValue();
parsedDestPath = replaceRegexCaptureGroupInPath(
parsedDestPath, srcMatcher,
regexGroupNameOrIndexStr, groupRepresentationStrSetInDest);
}
++mappedCount;
}
if (0 == mappedCount) {
return null;
}
Path remainingPath = getRemainingPathStr(srcPath, resolvedPathStr);
for (RegexMountPointInterceptor interceptor : interceptorList) {
parsedDestPath = interceptor.interceptResolvedDestPathStr(parsedDestPath);
remainingPath =
interceptor.interceptRemainingPath(remainingPath);
}
InodeTree.ResolveResult resolveResult = inodeTree
.buildResolveResultForRegexMountPoint(InodeTree.ResultKind.EXTERNAL_DIR,
resolvedPathStr, parsedDestPath, remainingPath);
return resolveResult;
} | 3.68 |
flink_FactoryUtil_createCatalog | /**
* Attempts to discover an appropriate catalog factory and creates an instance of the catalog.
*
* <p>This first uses the legacy {@link TableFactory} stack to discover a matching {@link
* CatalogFactory}. If none is found, it falls back to the new stack using {@link Factory}
* instead.
*/
public static Catalog createCatalog(
String catalogName,
Map<String, String> options,
ReadableConfig configuration,
ClassLoader classLoader) {
// Use the legacy mechanism first for compatibility
try {
final CatalogFactory legacyFactory =
TableFactoryService.find(CatalogFactory.class, options, classLoader);
return legacyFactory.createCatalog(catalogName, options);
} catch (NoMatchingTableFactoryException e) {
// No matching legacy factory found, try using the new stack
final DefaultCatalogContext discoveryContext =
new DefaultCatalogContext(catalogName, options, configuration, classLoader);
try {
final CatalogFactory factory = getCatalogFactory(discoveryContext);
// The type option is only used for discovery, we don't actually want to forward it
// to the catalog factory itself.
final Map<String, String> factoryOptions =
options.entrySet().stream()
.filter(
entry ->
!CommonCatalogOptions.CATALOG_TYPE
.key()
.equals(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
final DefaultCatalogContext context =
new DefaultCatalogContext(
catalogName, factoryOptions, configuration, classLoader);
return factory.createCatalog(context);
} catch (Throwable t) {
throw new ValidationException(
String.format(
"Unable to create catalog '%s'.%n%nCatalog options are:%n%s",
catalogName,
options.entrySet().stream()
.map(
optionEntry ->
stringifyOption(
optionEntry.getKey(),
optionEntry.getValue()))
.sorted()
.collect(Collectors.joining("\n"))),
t);
}
}
} | 3.68 |
graphhopper_OSMReader_isCalculateWayDistance | /**
* @return true if the length of the way shall be calculated and added as an artificial way tag
*/
protected boolean isCalculateWayDistance(ReaderWay way) {
return isFerry(way);
} | 3.68 |
rocketmq-connect_ConnectUtil_convertToMessageQueue | /**
* convert to message queue
*
* @param recordPartition
* @return
*/
public static MessageQueue convertToMessageQueue(RecordPartition recordPartition) {
Map<String, ?> partion = recordPartition.getPartition();
String topic = partion.get("topic").toString();
String brokerName = partion.get("brokerName").toString();
int queueId = partion.containsKey("queueId") ? Integer.parseInt(partion.get("queueId").toString()) : 0;
return new MessageQueue(topic, brokerName, queueId);
} | 3.68 |
hbase_RecoverableZooKeeper_prepareZKMulti | /**
* Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op instances to
* actually pass to multi (need to do this in order to appendMetaData).
*/
private Iterable<Op> prepareZKMulti(Iterable<Op> ops) throws UnsupportedOperationException {
if (ops == null) {
return null;
}
List<Op> preparedOps = new LinkedList<>();
for (Op op : ops) {
if (op.getType() == ZooDefs.OpCode.create) {
CreateRequest create = (CreateRequest) op.toRequestRecord();
preparedOps.add(Op.create(create.getPath(), ZKMetadata.appendMetaData(id, create.getData()),
create.getAcl(), create.getFlags()));
} else if (op.getType() == ZooDefs.OpCode.delete) {
// no need to appendMetaData for delete
preparedOps.add(op);
} else if (op.getType() == ZooDefs.OpCode.setData) {
SetDataRequest setData = (SetDataRequest) op.toRequestRecord();
preparedOps.add(Op.setData(setData.getPath(),
ZKMetadata.appendMetaData(id, setData.getData()), setData.getVersion()));
} else {
throw new UnsupportedOperationException("Unexpected ZKOp type: " + op.getClass().getName());
}
}
return preparedOps;
} | 3.68 |
pulsar_AbstractHierarchicalLedgerManager_asyncProcessLevelNodes | /**
* Process hash nodes in a given path.
*/
void asyncProcessLevelNodes(
final String path, final BookkeeperInternalCallbacks.Processor<String> processor,
final AsyncCallback.VoidCallback finalCb, final Object context,
final int successRc, final int failureRc) {
store.getChildren(path)
.thenAccept(levelNodes -> {
if (levelNodes.isEmpty()) {
finalCb.processResult(successRc, null, context);
return;
}
AsyncListProcessor<String> listProcessor = new AsyncListProcessor<>(scheduler);
// process its children
listProcessor.process(levelNodes, processor, finalCb, context, successRc, failureRc);
}).exceptionally(ex -> {
log.error("Error polling hash nodes of {}: {}", path, ex.getMessage());
finalCb.processResult(failureRc, null, context);
return null;
});
} | 3.68 |
hadoop_DynoInfraUtils_getNameNodeHdfsUri | /**
* Get the URI that can be used to access the launched NameNode for HDFS RPCs.
*
* @param nameNodeProperties The set of properties representing the
* information about the launched NameNode.
* @return The HDFS URI.
*/
static URI getNameNodeHdfsUri(Properties nameNodeProperties) {
return URI.create(String.format("hdfs://%s:%s/",
nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME),
nameNodeProperties.getProperty(DynoConstants.NN_RPC_PORT)));
} | 3.68 |
hbase_HMobStore_getLocations | /**
* @param tableName to look up locations for, can not be null
* @return a list of location in order of working dir, archive dir. will not be null.
*/
public List<Path> getLocations(TableName tableName) throws IOException {
List<Path> locations = map.get(tableName);
if (locations == null) {
IdLock.Entry lockEntry = keyLock.getLockEntry(tableName.hashCode());
try {
locations = map.get(tableName);
if (locations == null) {
locations = new ArrayList<>(2);
locations.add(MobUtils.getMobFamilyPath(conf, tableName,
getColumnFamilyDescriptor().getNameAsString()));
locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName,
MobUtils.getMobRegionInfo(tableName).getEncodedName(),
getColumnFamilyDescriptor().getNameAsString()));
map.put(tableName, locations);
}
} finally {
keyLock.releaseLockEntry(lockEntry);
}
}
return locations;
} | 3.68 |
flink_PlanNode_getCumulativeCosts | /**
* Gets the cumulative costs of this nose. The cumulative costs are the sum of the costs of this
* node and of all nodes in the subtree below this node.
*
* @return The cumulative costs, or null, if not yet set.
*/
public Costs getCumulativeCosts() {
return this.cumulativeCosts;
} | 3.68 |
framework_VLayoutSlot_getWidgetSizeInDirection | /**
* Returns the widget's height if the indicated direction is vertical, and
* width if horizontal.
*
* @param isVertical
* {@code true} if the requested dimension is height,
* {@code false} if width
* @return the widget height or width depending on the indicated direction
*/
public int getWidgetSizeInDirection(boolean isVertical) {
return isVertical ? getWidgetHeight() : getWidgetWidth();
} | 3.68 |
pulsar_ManagedLedgerConfig_getMaxUnackedRangesToPersist | /**
* @return max unacked message ranges that will be persisted and recovered.
*
*/
public int getMaxUnackedRangesToPersist() {
return maxUnackedRangesToPersist;
} | 3.68 |
hudi_SparkRDDWriteClient_insertOverwriteTable | /**
* Removes all existing records of the Hoodie table and inserts the given HoodieRecords, into the table.
*
* @param records HoodieRecords to insert
* @param instantTime Instant time of the commit
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
*/
public HoodieWriteResult insertOverwriteTable(JavaRDD<HoodieRecord<T>> records, final String instantTime) {
HoodieTable<T, HoodieData<HoodieRecord<T>>, HoodieData<HoodieKey>, HoodieData<WriteStatus>> table = initTable(WriteOperationType.INSERT_OVERWRITE_TABLE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE_TABLE, table.getMetaClient());
HoodieWriteMetadata<HoodieData<WriteStatus>> result = table.insertOverwriteTable(context, instantTime, HoodieJavaRDD.of(records));
HoodieWriteMetadata<JavaRDD<WriteStatus>> resultRDD = result.clone(HoodieJavaRDD.getJavaRDD(result.getWriteStatuses()));
return new HoodieWriteResult(postWrite(resultRDD, instantTime, table), result.getPartitionToReplaceFileIds());
} | 3.68 |
hbase_FSDataInputStreamWrapper_fallbackToFsChecksum | /**
* Read from non-checksum stream failed, fall back to FS checksum. Thread-safe.
* @param offCount For how many checksumOk calls to turn off the HBase checksum.
*/
public FSDataInputStream fallbackToFsChecksum(int offCount) throws IOException {
// checksumOffCount is speculative, but let's try to reset it less.
boolean partOfConvoy = false;
if (this.stream == null) {
synchronized (streamNoFsChecksumFirstCreateLock) {
partOfConvoy = (this.stream != null);
if (!partOfConvoy) {
this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
}
}
if (!partOfConvoy) {
this.useHBaseChecksum = false;
this.hbaseChecksumOffCount.set(offCount);
}
return this.stream;
} | 3.68 |
hadoop_TimelineEvents_getAllEvents | /**
* Get a list of {@link EventsOfOneEntity} instances
*
* @return a list of {@link EventsOfOneEntity} instances
*/
@XmlElement(name = "events")
public List<EventsOfOneEntity> getAllEvents() {
return allEvents;
} | 3.68 |
flink_SourceOperatorStreamTask_cleanupOldCheckpoints | /**
* Cleanup any orphaned checkpoint before the given currently triggered checkpoint. These
* checkpoint may occur when the checkpoint is cancelled but the RPC is lost. Note, to be safe,
* checkpoint X is only removed when both RPC and trigger for a checkpoint Y>X is received.
*/
private void cleanupOldCheckpoints(long checkpointId) {
assert (mailboxProcessor.isMailboxThread());
triggeredCheckpoints.headSet(checkpointId).clear();
untriggeredCheckpoints.headMap(checkpointId).clear();
maybeResumeProcessing();
} | 3.68 |
dubbo_StringToDurationConverter_detect | /**
* Detect the style from the given source value.
*
* @param value the source value
* @return the duration style
* @throws IllegalArgumentException if the value is not a known style
*/
public static DurationStyle detect(String value) {
Assert.notNull(value, "Value must not be null");
for (DurationStyle candidate : values()) {
if (candidate.matches(value)) {
return candidate;
}
}
throw new IllegalArgumentException("'" + value + "' is not a valid duration");
} | 3.68 |
hadoop_JobACLsManager_constructJobACLs | /**
* Construct the jobACLs from the configuration so that they can be kept in
* the memory. If authorization is disabled on the JT, nothing is constructed
* and an empty map is returned.
*
* @return JobACL to AccessControlList map.
*/
public Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
Map<JobACL, AccessControlList> acls =
new HashMap<JobACL, AccessControlList>();
// Don't construct anything if authorization is disabled.
if (!areACLsEnabled()) {
return acls;
}
for (JobACL aclName : JobACL.values()) {
String aclConfigName = aclName.getAclName();
String aclConfigured = conf.get(aclConfigName);
if (aclConfigured == null) {
// If ACLs are not configured at all, we grant no access to anyone. So
// jobOwner and cluster administrator _only_ can do 'stuff'
aclConfigured = " ";
}
acls.put(aclName, new AccessControlList(aclConfigured));
}
return acls;
} | 3.68 |
framework_Table_setVisibleColumns | /**
* Sets the array of visible column property id:s.
*
* <p>
* The columns are show in the order of their appearance in this array.
* </p>
*
* @param visibleColumns
* the Array of shown property id:s.
*/
public void setVisibleColumns(Object... visibleColumns) {
// Visible columns must exist
if (visibleColumns == null) {
throw new NullPointerException(
"Can not set visible columns to null value");
}
final LinkedList<Object> newVC = new LinkedList<Object>();
// Checks that the new visible columns contains no nulls, properties
// exist and that there are no duplicates before adding them to newVC.
final Collection<?> properties = getContainerPropertyIds();
for (Object id : visibleColumns) {
if (id == null) {
throw new NullPointerException("Ids must be non-nulls");
} else if (!properties.contains(id)
&& !columnGenerators.containsKey(id)) {
throw new IllegalArgumentException(
"Ids must exist in the Container or as a generated column, missing id: "
+ id);
} else if (newVC.contains(id)) {
throw new IllegalArgumentException(
"Ids must be unique, duplicate id: " + id);
} else {
newVC.add(id);
}
}
this.visibleColumns = newVC;
// Assures visual refresh
refreshRowCache();
} | 3.68 |
framework_TestBench_valueChange | // Handle menu selection and update body
@Override
public void valueChange(Property.ValueChangeEvent event) {
bodyLayout.removeAllComponents();
bodyLayout.setCaption(null);
final Object o = menu.getValue();
if (o != null && o instanceof Class) {
final Class<?> c = (Class<?>) o;
final String title = c.getName();
bodyLayout.setCaption(title);
bodyLayout.addComponent(createTestable(c));
} else {
// NOP node selected or deselected tree item
}
} | 3.68 |
dubbo_Version_getPrefixDigits | /**
* get prefix digits from given version string
*/
private static String getPrefixDigits(String v) {
Matcher matcher = PREFIX_DIGITS_PATTERN.matcher(v);
if (matcher.find()) {
return matcher.group(1);
}
return "";
} | 3.68 |
hbase_TimeRangeTracker_includeTimestamp | /**
* Update the current TimestampRange to include the timestamp from <code>cell</code>. If the Key
* is of type DeleteColumn or DeleteFamily, it includes the entire time range from 0 to timestamp
* of the key.
* @param cell the Cell to include
*/
public void includeTimestamp(final Cell cell) {
includeTimestamp(cell.getTimestamp());
if (PrivateCellUtil.isDeleteColumnOrFamily(cell)) {
includeTimestamp(0);
}
} | 3.68 |
flink_CheckpointedInputGate_processPriorityEvents | /**
* Eagerly pulls and processes all priority events. Must be called from task thread.
*
* <p>Basic assumption is that no priority event needs to be handled by the {@link
* StreamTaskNetworkInput}.
*/
private void processPriorityEvents() throws IOException, InterruptedException {
// check if the priority event is still not processed (could have been pulled before mail
// was being executed)
boolean hasPriorityEvent = inputGate.getPriorityEventAvailableFuture().isDone();
while (hasPriorityEvent) {
// process as many priority events as possible
final Optional<BufferOrEvent> bufferOrEventOpt = pollNext();
if (!bufferOrEventOpt.isPresent()) {
break;
}
final BufferOrEvent bufferOrEvent = bufferOrEventOpt.get();
checkState(bufferOrEvent.hasPriority(), "Should only poll priority events");
hasPriorityEvent = bufferOrEvent.morePriorityEvents();
}
// re-enqueue mail to process future priority events
waitForPriorityEvents(inputGate, mailboxExecutor);
} | 3.68 |
hudi_HoodieTableMetadataUtil_isValidInstant | /**
* Checks if the Instant is a delta commit and has a valid suffix for operations on MDT.
*
* @param instant {@code HoodieInstant} to check.
* @return {@code true} if the instant is valid.
*/
public static boolean isValidInstant(HoodieInstant instant) {
// Should be a deltacommit
if (!instant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION)) {
return false;
}
// Check correct length. The timestamp should have a suffix over the timeline's timestamp format.
final String instantTime = instant.getTimestamp();
if (!(instantTime.length() == MILLIS_INSTANT_ID_LENGTH + OperationSuffix.METADATA_INDEXER.getSuffix().length())) {
return false;
}
// Is this a fixed operations suffix
final String suffix = instantTime.substring(instantTime.length() - 3);
if (OperationSuffix.isValidSuffix(suffix)) {
return true;
}
// Is this a index init suffix?
if (suffix.compareTo(String.format("%03d", PARTITION_INITIALIZATION_TIME_SUFFIX)) >= 0) {
return true;
}
return false;
} | 3.68 |
morf_UpgradeTableResolution_addDiscoveredTables | /**
* Adds information about read/modified tables by given upgrade step
*
* @param upgradeStepName name of the class of the upgrade step to be registered
* @param resolvedTables registry of read/modified tables
*/
public void addDiscoveredTables(String upgradeStepName, ResolvedTables resolvedTables) {
resolvedTablesMap.put(upgradeStepName, resolvedTables);
} | 3.68 |
hudi_HoodieFlinkWriteClient_upgradeDowngrade | /**
* Upgrade downgrade the Hoodie table.
*
* <p>This action should only be executed once for each commit.
* The modification of the table properties is not thread safe.
*/
public void upgradeDowngrade(String instantTime, HoodieTableMetaClient metaClient) {
new UpgradeDowngrade(metaClient, config, context, FlinkUpgradeDowngradeHelper.getInstance())
.run(HoodieTableVersion.current(), instantTime);
} | 3.68 |
hbase_FileSystemUtilizationChore_setLeftoverRegions | /**
* Sets a new collection of Regions as leftovers.
*/
void setLeftoverRegions(Iterator<Region> newLeftovers) {
this.leftoverRegions = newLeftovers;
} | 3.68 |
hbase_LockManager_lockHeartbeat | /**
* @param keepAlive if false, release the lock.
* @return true, if procedure is found and it has the lock; else false.
*/
public boolean lockHeartbeat(final long procId, final boolean keepAlive) throws IOException {
final LockProcedure proc =
master.getMasterProcedureExecutor().getProcedure(LockProcedure.class, procId);
if (proc == null) return false;
master.getMasterCoprocessorHost().preLockHeartbeat(proc, keepAlive);
proc.updateHeartBeat();
if (!keepAlive) {
proc.unlock(master.getMasterProcedureExecutor().getEnvironment());
}
master.getMasterCoprocessorHost().postLockHeartbeat(proc, keepAlive);
return proc.isLocked();
} | 3.68 |
hbase_RegionLocations_getRegionLocation | /**
* Returns the first not-null region location in the list
*/
public HRegionLocation getRegionLocation() {
for (HRegionLocation loc : locations) {
if (loc != null) {
return loc;
}
}
return null;
} | 3.68 |
hadoop_AllocationFileQueueParser_parse | // Load queue elements. A root queue can either be included or omitted. If
// it's included, all other queues must be inside it.
public QueueProperties parse() throws AllocationConfigurationException {
QueueProperties.Builder queuePropertiesBuilder =
new QueueProperties.Builder();
for (Element element : elements) {
String parent = ROOT;
if (element.getAttribute("name").equalsIgnoreCase(ROOT)) {
if (elements.size() > 1) {
throw new AllocationConfigurationException(
"If configuring root queue,"
+ " no other queues can be placed alongside it.");
}
parent = null;
}
loadQueue(parent, element, queuePropertiesBuilder);
}
return queuePropertiesBuilder.build();
} | 3.68 |
morf_AbstractSqlDialectTest_testFloor | /**
* Tests that FLOOR functionality builds the expected SQL string.
*/
@Test
public void testFloor() {
SelectStatement statement = new SelectStatement(floor(new FieldReference(FLOAT_FIELD))).from(new TableReference(
TEST_TABLE));
String actual = testDialect.convertStatementToSQL(statement);
assertEquals("Floor script should match expected", expectedFloor(), actual);
} | 3.68 |
flink_NFACompiler_createLoopingGroupPatternState | /**
* Create the states for the group pattern as a looping one.
*
* @param groupPattern the group pattern to create the states for
* @param sinkState the state that the group pattern being converted should point to
* @return the first state of the states of the group pattern
*/
private State<T> createLoopingGroupPatternState(
final GroupPattern<T, ?> groupPattern, final State<T> sinkState) {
final IterativeCondition<T> proceedCondition = getTrueFunction();
Pattern<T, ?> oldCurrentPattern = currentPattern;
Pattern<T, ?> oldFollowingPattern = followingPattern;
GroupPattern<T, ?> oldGroupPattern = currentGroupPattern;
final State<T> dummyState = createState(State.StateType.Normal, true);
State<T> lastSink = dummyState;
currentGroupPattern = groupPattern;
currentPattern = groupPattern.getRawPattern();
lastSink = createMiddleStates(lastSink);
lastSink = convertPattern(lastSink);
lastSink.addProceed(sinkState, proceedCondition);
dummyState.addProceed(lastSink, proceedCondition);
currentPattern = oldCurrentPattern;
followingPattern = oldFollowingPattern;
currentGroupPattern = oldGroupPattern;
return lastSink;
} | 3.68 |
hbase_AvlUtil_append | /**
* Append a node to the tree
* @param head the head of the linked list
* @param node the node to add to the tail of the list
* @return the new head of the list
*/
public static <TNode extends AvlLinkedNode> TNode append(TNode head, TNode node) {
assert !isLinked(node) : node + " is already linked";
if (head != null) {
TNode tail = (TNode) head.iterPrev;
tail.iterNext = node;
node.iterNext = head;
node.iterPrev = tail;
head.iterPrev = node;
return head;
}
node.iterNext = node;
node.iterPrev = node;
return node;
} | 3.68 |
hmily_ConsulClient_addListener | /**
* Add listener.
*
* @param context the context
* @param passiveHandler the passive handler
* @param config the config
* @throws InterruptedException exception
*/
void addListener(final Supplier<ConfigLoader.Context> context, final ConfigLoader.PassiveHandler<ConsulPassiveConfig> passiveHandler, final ConsulConfig config) throws InterruptedException {
if (!config.isPassive()) {
return;
}
if (consul == null) {
LOGGER.warn("Consul client is null...");
}
ConsulCache consulCache = KVCache.newCache(consul.keyValueClient(), config.getKey());
consulCache.addListener(map -> {
Set<Map.Entry<Object, Value>> set = map.entrySet();
set.forEach(x -> {
ConsulPassiveConfig consulPassiveConfig = new ConsulPassiveConfig();
consulPassiveConfig.setKey(config.getKey());
consulPassiveConfig.setFileExtension(config.getFileExtension());
consulPassiveConfig.setValue(x.getValue().getValueAsString(Charset.forName("utf-8")).get());
passiveHandler.passive(context, consulPassiveConfig);
});
});
consulCache.start();
LOGGER.info("passive consul remote started....");
} | 3.68 |
framework_AbstractFieldElement_clientSelectElement | /**
* Select contents of TextField Element.
*
* NOTE: When testing with firefox browser window should have focus in it
*
* @since 8.0
* @param elem
* element which context will be select
*/
protected void clientSelectElement(WebElement elem) {
JavascriptExecutor js = (JavascriptExecutor) getDriver();
String script = "window.focus();" + "var elem=arguments[0];"
+ "elem.select();elem.focus();";
js.executeScript(script, elem);
} | 3.68 |
hudi_Key_set | /**
* @param value
* @param weight
*/
public void set(byte[] value, double weight) {
if (value == null) {
throw new IllegalArgumentException("value can not be null");
}
this.bytes = value;
this.weight = weight;
} | 3.68 |
zxing_Result_getNumBits | /**
* @return how many bits of {@link #getRawBytes()} are valid; typically 8 times its length
* @since 3.3.0
*/
public int getNumBits() {
return numBits;
} | 3.68 |
hadoop_SuccessData_getFilenames | /**
* @return a list of filenames in the commit.
*/
public List<String> getFilenames() {
return filenames;
} | 3.68 |
hadoop_CustomTokenProviderAdapter_getUserAgentSuffix | /**
* Get a suffix for the UserAgent suffix of HTTP requests, which
* can be used to identify the principal making ABFS requests.
*
* If the adaptee is a BoundDTExtension, it is queried for a UA Suffix;
* otherwise "" is returned.
*
* @return an empty string, or a key=value string to be added to the UA
* header.
*/
public String getUserAgentSuffix() {
String suffix = ExtensionHelper.getUserAgentSuffix(adaptee, "");
return suffix != null ? suffix : "";
} | 3.68 |
querydsl_RelationalPathBase_eq | /**
* Compares the two relational paths using primary key columns
*
* @param right rhs of the comparison
* @return this == right
*/
@Override
public BooleanExpression eq(Expression<? super T> right) {
if (right instanceof RelationalPath) {
return primaryKeyOperation(Ops.EQ, primaryKey, ((RelationalPath) right).getPrimaryKey());
} else {
return super.eq(right);
}
} | 3.68 |
hadoop_EditLogOutputStream_getLastJournalledTxId | /**
* Get the last txId journalled in the stream.
* The txId is recorded when FSEditLogOp is written to the stream.
* The default implementation is dummy.
* JournalSet tracks the txId uniformly for all underlying streams.
*/
public long getLastJournalledTxId() {
return HdfsServerConstants.INVALID_TXID;
} | 3.68 |
dubbo_StringUtils_snakeToSplitName | /**
* Convert snake_case or SNAKE_CASE to kebab-case.
* <p>
* NOTE: Return itself if it's not a snake case.
*
* @param snakeName
* @param split
* @return
*/
public static String snakeToSplitName(String snakeName, String split) {
String lowerCase = snakeName.toLowerCase();
if (isSnakeCase(snakeName)) {
return replace(lowerCase, "_", split);
}
return snakeName;
} | 3.68 |
hbase_TimeRangeTracker_toTimeRange | /** Returns Make a TimeRange from current state of <code>this</code>. */
TimeRange toTimeRange() {
long min = getMin();
long max = getMax();
// Initial TimeRangeTracker timestamps are the opposite of what you want for a TimeRange. Fix!
if (min == INITIAL_MIN_TIMESTAMP) {
min = TimeRange.INITIAL_MIN_TIMESTAMP;
}
if (max == INITIAL_MAX_TIMESTAMP) {
max = TimeRange.INITIAL_MAX_TIMESTAMP;
}
return TimeRange.between(min, max);
} | 3.68 |
hbase_ScheduledChore_onChoreMissedStartTime | /**
* Notify the ChoreService that this chore has missed its start time. Allows the ChoreService to
* make the decision as to whether or not it would be worthwhile to increase the number of core
* pool threads
*/
private synchronized void onChoreMissedStartTime() {
if (choreService != null) {
choreService.onChoreMissedStartTime(this);
}
} | 3.68 |
framework_TableScrollAfterAddRow_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
// TODO Auto-generated method stub
return "";
} | 3.68 |
hbase_AbstractRpcClient_callBlockingMethod | /**
* Make a blocking call. Throws exceptions if there are network problems or if the remote code
* threw an exception.
* @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
* {@link UserProvider#getCurrent()} makes a new instance of User each time so will
* be a new Connection each time.
* @return A pair with the Message response and the Cell data (if any).
*/
private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc,
Message param, Message returnType, final User ticket, final Address isa)
throws ServiceException {
BlockingRpcCallback<Message> done = new BlockingRpcCallback<>();
callMethod(md, hrc, param, returnType, ticket, isa, done);
Message val;
try {
val = done.get();
} catch (IOException e) {
throw new ServiceException(e);
}
if (hrc.failed()) {
throw new ServiceException(hrc.getFailed());
} else {
return val;
}
} | 3.68 |
morf_OracleDialect_truncatedTableName | /**
* Truncate table names to 30 characters since this is the maximum supported by Oracle.
*/
private String truncatedTableName(String tableName) {
return StringUtils.substring(tableName, 0, 30);
} | 3.68 |
hudi_HoodieRecordUtils_loadPayload | /**
* Instantiate a given class with an avro record payload.
*/
public static <T extends HoodieRecordPayload> T loadPayload(String recordPayloadClass,
Object[] payloadArgs,
Class<?>... constructorArgTypes) {
try {
return (T) ReflectionUtils.getClass(recordPayloadClass).getConstructor(constructorArgTypes)
.newInstance(payloadArgs);
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
throw new HoodieException("Unable to instantiate payload class ", e);
}
} | 3.68 |
flink_InstantiationUtil_isProperClass | /**
* Checks, whether the class is a proper class, i.e. not abstract or an interface, and not a
* primitive type.
*
* @param clazz The class to check.
* @return True, if the class is a proper class, false otherwise.
*/
public static boolean isProperClass(Class<?> clazz) {
int mods = clazz.getModifiers();
return !(Modifier.isAbstract(mods)
|| Modifier.isInterface(mods)
|| Modifier.isNative(mods));
} | 3.68 |
zxing_StringUtils_guessCharset | /**
* @param bytes bytes encoding a string, whose encoding should be guessed
* @param hints decode hints if applicable
* @return Charset of guessed encoding; at the moment will only guess one of:
* {@link #SHIFT_JIS_CHARSET}, {@link StandardCharsets#UTF_8},
* {@link StandardCharsets#ISO_8859_1}, {@link StandardCharsets#UTF_16},
* or the platform default encoding if
* none of these can possibly be correct
*/
public static Charset guessCharset(byte[] bytes, Map<DecodeHintType,?> hints) {
if (hints != null && hints.containsKey(DecodeHintType.CHARACTER_SET)) {
return Charset.forName(hints.get(DecodeHintType.CHARACTER_SET).toString());
}
// First try UTF-16, assuming anything with its BOM is UTF-16
if (bytes.length > 2 &&
((bytes[0] == (byte) 0xFE && bytes[1] == (byte) 0xFF) ||
(bytes[0] == (byte) 0xFF && bytes[1] == (byte) 0xFE))) {
return StandardCharsets.UTF_16;
}
// For now, merely tries to distinguish ISO-8859-1, UTF-8 and Shift_JIS,
// which should be by far the most common encodings.
int length = bytes.length;
boolean canBeISO88591 = true;
boolean canBeShiftJIS = true;
boolean canBeUTF8 = true;
int utf8BytesLeft = 0;
int utf2BytesChars = 0;
int utf3BytesChars = 0;
int utf4BytesChars = 0;
int sjisBytesLeft = 0;
int sjisKatakanaChars = 0;
int sjisCurKatakanaWordLength = 0;
int sjisCurDoubleBytesWordLength = 0;
int sjisMaxKatakanaWordLength = 0;
int sjisMaxDoubleBytesWordLength = 0;
int isoHighOther = 0;
boolean utf8bom = bytes.length > 3 &&
bytes[0] == (byte) 0xEF &&
bytes[1] == (byte) 0xBB &&
bytes[2] == (byte) 0xBF;
for (int i = 0;
i < length && (canBeISO88591 || canBeShiftJIS || canBeUTF8);
i++) {
int value = bytes[i] & 0xFF;
// UTF-8 stuff
if (canBeUTF8) {
if (utf8BytesLeft > 0) {
if ((value & 0x80) == 0) {
canBeUTF8 = false;
} else {
utf8BytesLeft--;
}
} else if ((value & 0x80) != 0) {
if ((value & 0x40) == 0) {
canBeUTF8 = false;
} else {
utf8BytesLeft++;
if ((value & 0x20) == 0) {
utf2BytesChars++;
} else {
utf8BytesLeft++;
if ((value & 0x10) == 0) {
utf3BytesChars++;
} else {
utf8BytesLeft++;
if ((value & 0x08) == 0) {
utf4BytesChars++;
} else {
canBeUTF8 = false;
}
}
}
}
}
}
// ISO-8859-1 stuff
if (canBeISO88591) {
if (value > 0x7F && value < 0xA0) {
canBeISO88591 = false;
} else if (value > 0x9F && (value < 0xC0 || value == 0xD7 || value == 0xF7)) {
isoHighOther++;
}
}
// Shift_JIS stuff
if (canBeShiftJIS) {
if (sjisBytesLeft > 0) {
if (value < 0x40 || value == 0x7F || value > 0xFC) {
canBeShiftJIS = false;
} else {
sjisBytesLeft--;
}
} else if (value == 0x80 || value == 0xA0 || value > 0xEF) {
canBeShiftJIS = false;
} else if (value > 0xA0 && value < 0xE0) {
sjisKatakanaChars++;
sjisCurDoubleBytesWordLength = 0;
sjisCurKatakanaWordLength++;
if (sjisCurKatakanaWordLength > sjisMaxKatakanaWordLength) {
sjisMaxKatakanaWordLength = sjisCurKatakanaWordLength;
}
} else if (value > 0x7F) {
sjisBytesLeft++;
//sjisDoubleBytesChars++;
sjisCurKatakanaWordLength = 0;
sjisCurDoubleBytesWordLength++;
if (sjisCurDoubleBytesWordLength > sjisMaxDoubleBytesWordLength) {
sjisMaxDoubleBytesWordLength = sjisCurDoubleBytesWordLength;
}
} else {
//sjisLowChars++;
sjisCurKatakanaWordLength = 0;
sjisCurDoubleBytesWordLength = 0;
}
}
}
if (canBeUTF8 && utf8BytesLeft > 0) {
canBeUTF8 = false;
}
if (canBeShiftJIS && sjisBytesLeft > 0) {
canBeShiftJIS = false;
}
// Easy -- if there is BOM or at least 1 valid not-single byte character (and no evidence it can't be UTF-8), done
if (canBeUTF8 && (utf8bom || utf2BytesChars + utf3BytesChars + utf4BytesChars > 0)) {
return StandardCharsets.UTF_8;
}
// Easy -- if assuming Shift_JIS or >= 3 valid consecutive not-ascii characters (and no evidence it can't be), done
if (canBeShiftJIS && (ASSUME_SHIFT_JIS || sjisMaxKatakanaWordLength >= 3 || sjisMaxDoubleBytesWordLength >= 3)) {
return SHIFT_JIS_CHARSET;
}
// Distinguishing Shift_JIS and ISO-8859-1 can be a little tough for short words. The crude heuristic is:
// - If we saw
// - only two consecutive katakana chars in the whole text, or
// - at least 10% of bytes that could be "upper" not-alphanumeric Latin1,
// - then we conclude Shift_JIS, else ISO-8859-1
if (canBeISO88591 && canBeShiftJIS) {
return (sjisMaxKatakanaWordLength == 2 && sjisKatakanaChars == 2) || isoHighOther * 10 >= length
? SHIFT_JIS_CHARSET : StandardCharsets.ISO_8859_1;
}
// Otherwise, try in order ISO-8859-1, Shift JIS, UTF-8 and fall back to default platform encoding
if (canBeISO88591) {
return StandardCharsets.ISO_8859_1;
}
if (canBeShiftJIS) {
return SHIFT_JIS_CHARSET;
}
if (canBeUTF8) {
return StandardCharsets.UTF_8;
}
// Otherwise, we take a wild guess with platform encoding
return PLATFORM_DEFAULT_ENCODING;
} | 3.68 |
hudi_HashID_bits | /**
* Get this Hash size in bits.
*
* @return bits needed to represent the size
*/
public int bits() {
return this.bits;
} | 3.68 |
framework_TablePushStreaming_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Test that pushes Table data at a high pace to detect possible problems in the streaming protocol";
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterColumnChangingTypeAndCase | /**
* Test renaming a column, changing the case only (e.g. from columnName to ColumnName).
*/
@Test
public void testAlterColumnChangingTypeAndCase() {
testAlterTableColumn(OTHER_TABLE, AlterationType.ALTER, getColumn(OTHER_TABLE, FLOAT_FIELD), column("FloatField", DataType.DECIMAL, 20, 3), expectedAlterColumnChangingLengthAndCase());
} | 3.68 |
flink_InPlaceMutableHashTable_resetAppendPosition | /**
* Sets appendPosition and the write position to 0, so that appending starts overwriting
* elements from the beginning. (This is used in rebuild.)
*
* <p>Note: if data was written to the area after the current appendPosition before a call
* to resetAppendPosition, it should still be readable. To release the segments after the
* current append position, call freeSegmentsAfterAppendPosition()
*/
public void resetAppendPosition() {
appendPosition = 0;
// this is just for safety (making sure that we fail immediately
// if a write happens without calling setWritePosition)
outView.currentSegmentIndex = -1;
outView.seekOutput(null, -1);
} | 3.68 |
starts_Attribute_getLabels | /**
* Returns the labels corresponding to this attribute.
*
* @return the labels corresponding to this attribute, or <code>null</code> if
* this attribute is not a code attribute that contains labels.
*/
protected Label[] getLabels() {
return null;
} | 3.68 |
morf_SqlDialect_appendGroupBy | /**
* appends group by clause to the result
*
* @param result group by clause will be appended here
* @param stmt statement with group by clause
*/
protected void appendGroupBy(StringBuilder result, SelectStatement stmt) {
if (stmt.getGroupBys().size() > 0) {
result.append(" GROUP BY ");
boolean firstGroupByField = true;
for (AliasedField currentGroupByField : stmt.getGroupBys()) {
if (!firstGroupByField) {
result.append(", ");
}
result.append(getSqlFrom(currentGroupByField));
firstGroupByField = false;
}
}
} | 3.68 |
morf_RenameIndex_isApplied | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema,
* ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
if (!schema.tableExists(tableName)) {
return false;
}
Table table = schema.getTable(tableName);
for (Index index : table.indexes()) {
if (index.getName().equalsIgnoreCase(toIndexName)) {
return true;
}
}
return false;
} | 3.68 |
hbase_VisibilityLabelServiceManager_getVisibilityLabelService | /**
* @return singleton instance of {@link VisibilityLabelService}.
* @throws IllegalStateException if this called before initialization of singleton instance.
*/
public VisibilityLabelService getVisibilityLabelService() {
// By the time this method is called, the singleton instance of visibilityLabelService should
// have been created. And it will be created as getVisibilityLabelService(Configuration conf)
// is called from VC#start() and that will be the 1st thing core code do with any CP.
if (this.visibilityLabelService == null) {
throw new IllegalStateException("VisibilityLabelService not yet instantiated");
}
return this.visibilityLabelService;
} | 3.68 |
flink_RowUtils_deepToStringRow | /**
* Converts a row to a string representation. This method supports all external and most
* internal conversion classes of the table ecosystem.
*/
static String deepToStringRow(
RowKind kind,
@Nullable Object[] fieldByPosition,
@Nullable Map<String, Object> fieldByName) {
final StringBuilder sb = new StringBuilder();
if (fieldByPosition != null) {
if (USE_LEGACY_TO_STRING) {
deepToStringArrayLegacy(sb, fieldByPosition);
} else {
sb.append(kind.shortString());
deepToStringArray(sb, fieldByPosition);
}
} else {
assert fieldByName != null;
sb.append(kind.shortString());
deepToStringMap(sb, fieldByName);
}
return sb.toString();
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionCounter | /**
* Add a new evaluator to the counter statistics.
* @param key key of this statistic
* @param eval evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionCounter(String key,
ToLongFunction<String> eval) {
activeInstance().addCounterFunction(key, eval::applyAsLong);
return this;
} | 3.68 |
hbase_PairOfSameType_getFirst | /**
* Return the first element stored in the pair.
*/
public T getFirst() {
return first;
} | 3.68 |
hbase_Put_addColumn | /**
* Add the specified column and value, with the specified timestamp as its version to this Put
* operation.
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
* @param value column value
*/
public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
if (ts < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
}
List<Cell> list = getCellList(family);
KeyValue kv = createPutKeyValue(family, qualifier, ts, value, null);
list.add(kv);
return this;
} | 3.68 |
hbase_HRegion_dropPrepareFlushIfPossible | /**
* If all stores ended up dropping their snapshots, we can safely drop the prepareFlushResult
*/
private void dropPrepareFlushIfPossible() {
if (writestate.flushing) {
boolean canDrop = true;
if (prepareFlushResult.storeFlushCtxs != null) {
for (Entry<byte[], StoreFlushContext> entry : prepareFlushResult.storeFlushCtxs
.entrySet()) {
HStore store = getStore(entry.getKey());
if (store == null) {
continue;
}
if (store.getSnapshotSize().getDataSize() > 0) {
canDrop = false;
break;
}
}
}
// this means that all the stores in the region has finished flushing, but the WAL marker
// may not have been written or we did not receive it yet.
if (canDrop) {
writestate.flushing = false;
this.prepareFlushResult = null;
}
}
} | 3.68 |
hadoop_AbfsRestOperation_signRequest | /**
* Sign an operation.
* @param httpOperation operation to sign
* @param bytesToSign how many bytes to sign for shared key auth.
* @throws IOException failure
*/
@VisibleForTesting
public void signRequest(final AbfsHttpOperation httpOperation, int bytesToSign) throws IOException {
switch(client.getAuthType()) {
case Custom:
case OAuth:
LOG.debug("Authenticating request with OAuth2 access token");
httpOperation.getConnection().setRequestProperty(HttpHeaderConfigurations.AUTHORIZATION,
client.getAccessToken());
break;
case SAS:
// do nothing; the SAS token should already be appended to the query string
httpOperation.setMaskForSAS(); //mask sig/oid from url for logs
break;
case SharedKey:
default:
// sign the HTTP request
LOG.debug("Signing request with shared key");
// sign the HTTP request
client.getSharedKeyCredentials().signRequest(
httpOperation.getConnection(),
bytesToSign);
break;
}
} | 3.68 |
hadoop_QueryResult_getTimestamp | /**
* The timetamp in driver time of this query.
*
* @return Timestamp in driver time.
*/
public long getTimestamp() {
return this.timestamp;
} | 3.68 |
flink_SqlWindowTableFunction_argumentMustBeScalar | /**
* {@inheritDoc}
*
* <p>Overrides because the first parameter of table-value function windowing is an explicit
* TABLE parameter, which is not scalar.
*/
@Override
public boolean argumentMustBeScalar(int ordinal) {
return ordinal != 0;
} | 3.68 |
flink_OptionalFailure_getUnchecked | /** @return same as {@link #get()} but throws a {@link FlinkRuntimeException}. */
public T getUnchecked() throws FlinkRuntimeException {
if (value != null) {
return value;
}
checkNotNull(failureCause);
throw new FlinkRuntimeException(failureCause);
} | 3.68 |
querydsl_BeanMap_setBean | /**
* Sets the bean to be operated on by this map. The given value may
* be null, in which case this map will be empty.
*
* @param newBean the new bean to operate on
*/
public void setBean(Object newBean) {
bean = newBean;
reinitialise();
} | 3.68 |
hudi_KafkaConnectUtils_getCommitMetadataForLatestInstant | /**
* Get the Metadata from the latest commit file.
*
* @param metaClient The {@link HoodieTableMetaClient} to get access to the meta data.
* @return An Optional {@link HoodieCommitMetadata} containing the meta data from the latest commit file.
*/
public static Option<HoodieCommitMetadata> getCommitMetadataForLatestInstant(HoodieTableMetaClient metaClient) {
HoodieTimeline timeline = metaClient.getActiveTimeline().getCommitsTimeline()
.filterCompletedInstants()
.filter(instant -> (metaClient.getTableType() == HoodieTableType.COPY_ON_WRITE && instant.getAction().equals(HoodieActiveTimeline.COMMIT_ACTION))
|| (metaClient.getTableType() == HoodieTableType.MERGE_ON_READ && instant.getAction().equals(HoodieActiveTimeline.DELTA_COMMIT_ACTION))
);
Option<HoodieInstant> latestInstant = timeline.lastInstant();
if (latestInstant.isPresent()) {
try {
byte[] data = timeline.getInstantDetails(latestInstant.get()).get();
return Option.of(HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class));
} catch (Exception e) {
throw new HoodieException("Failed to read schema from commit metadata", e);
}
} else {
return Option.empty();
}
} | 3.68 |
framework_AbstractMedia_play | /**
* Starts playback of the media.
*/
public void play() {
getRpcProxy(MediaControl.class).play();
} | 3.68 |
hbase_TableSnapshotInputFormat_setInput | /**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param job the job to configure
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restoreDir
* can be deleted.
* @param splitAlgo split algorithm to generate splits from region
* @param numSplitsPerRegion how many input splits to generate per one region
* @throws IOException if an error occurs
*/
public static void setInput(JobConf job, String snapshotName, Path restoreDir,
RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo,
numSplitsPerRegion);
} | 3.68 |
flink_Tuple24_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21,
T22 f22,
T23 f23) {
return new Tuple24<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18,
f19, f20, f21, f22, f23);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.