name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HBackupFileSystem_getTableBackupDir | /**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
* @param backupRootDir backup root directory
* @param backupId backup id
* @param tableName table name
* @return backupPath String for the particular table
*/
public static String getTableBackupDir(String backupRootDir, String backupId,
TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR;
} | 3.68 |
zilla_ManyToOneRingBuffer_abort | /**
* {@inheritDoc}
*/
public void abort(final int index)
{
final int recordIndex = computeRecordIndex(index);
final AtomicBuffer buffer = this.buffer;
final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex);
buffer.putInt(typeOffset(recordIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
} | 3.68 |
flink_HiveParserQBParseInfo_getClusterByForClause | /** Get the Cluster By AST for the clause. */
public HiveParserASTNode getClusterByForClause(String clause) {
return destToClusterby.get(clause);
} | 3.68 |
hbase_RawAsyncTableImpl_mutateRow | // We need the MultiRequest when constructing the org.apache.hadoop.hbase.client.MultiResponse,
// so here I write a new method as I do not want to change the abstraction of call method.
@SuppressWarnings("unchecked")
private <RES, RESP> CompletableFuture<RESP> mutateRow(HBaseRpcController controller,
HRegionLocation loc, ClientService.Interface stub, RowMutations mutation,
Converter<MultiRequest, byte[], RowMutations> reqConvert, Function<RES, RESP> respConverter) {
CompletableFuture<RESP> future = new CompletableFuture<>();
try {
byte[] regionName = loc.getRegion().getRegionName();
MultiRequest req = reqConvert.convert(regionName, mutation);
stub.multi(controller, req, new RpcCallback<MultiResponse>() {
@Override
public void run(MultiResponse resp) {
if (controller.failed()) {
future.completeExceptionally(controller.getFailed());
} else {
try {
org.apache.hadoop.hbase.client.MultiResponse multiResp =
ResponseConverter.getResults(req, resp, controller.cellScanner());
ConnectionUtils.updateStats(conn.getStatisticsTracker(), conn.getConnectionMetrics(),
loc.getServerName(), multiResp);
Throwable ex = multiResp.getException(regionName);
if (ex != null) {
future.completeExceptionally(ex instanceof IOException
? ex
: new IOException(
"Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex));
} else {
future.complete(
respConverter.apply((RES) multiResp.getResults().get(regionName).result.get(0)));
}
} catch (IOException e) {
future.completeExceptionally(e);
}
}
}
});
} catch (IOException e) {
future.completeExceptionally(e);
}
return future;
} | 3.68 |
flink_KeyedOperatorTransformation_window | /**
* Windows this transformation into a {@code WindowedOperatorTransformation}, which bootstraps
* state that can be restored by a {@code WindowOperator}. Elements are put into windows by a
* {@link WindowAssigner}. The grouping of elements is done both by key and by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code
* Trigger} that is used if a {@code Trigger} is not specified.
*
* @param assigner The {@code WindowAssigner} that assigns elements to windows.
*/
public <W extends Window> WindowedOperatorTransformation<T, K, W> window(
WindowAssigner<? super T, W> assigner) {
return new WindowedOperatorTransformation<>(
dataSet, operatorMaxParallelism, timestamper, keySelector, keyType, assigner);
} | 3.68 |
druid_SQLBinaryOpExpr_mergeEqual | /**
* only for parameterized output
*
* @param a
* @param b
* @return
*/
private static boolean mergeEqual(SQLExpr a, SQLExpr b) {
if (!(a instanceof SQLBinaryOpExpr)) {
return false;
}
if (!(b instanceof SQLBinaryOpExpr)) {
return false;
}
SQLBinaryOpExpr binaryA = (SQLBinaryOpExpr) a;
SQLBinaryOpExpr binaryB = (SQLBinaryOpExpr) b;
if (binaryA.operator != SQLBinaryOperator.Equality) {
return false;
}
if (binaryB.operator != SQLBinaryOperator.Equality) {
return false;
}
if (!(binaryA.right instanceof SQLLiteralExpr || binaryA.right instanceof SQLVariantRefExpr)) {
return false;
}
if (!(binaryB.right instanceof SQLLiteralExpr || binaryB.right instanceof SQLVariantRefExpr)) {
return false;
}
return binaryA.left.equals(binaryB.left);
} | 3.68 |
hbase_ZKProcedureCoordinator_start | /**
* Start monitoring znodes in ZK - subclass hook to start monitoring znodes they are about.
* @return true if succeed, false if encountered initialization errors.
*/
@Override
final public boolean start(final ProcedureCoordinator coordinator) {
if (this.coordinator != null) {
throw new IllegalStateException(
"ZKProcedureCoordinator already started and already has listener installed");
}
this.coordinator = coordinator;
try {
this.zkProc = new ZKProcedureUtil(watcher, procedureType) {
@Override
public void nodeCreated(String path) {
if (!isInProcedurePath(path)) return;
LOG.debug("Node created: " + path);
logZKTree(this.baseZNode);
if (isAcquiredPathNode(path)) {
// node wasn't present when we created the watch so zk event triggers acquire
coordinator.memberAcquiredBarrier(ZKUtil.getNodeName(ZKUtil.getParent(path)),
ZKUtil.getNodeName(path));
} else if (isReachedPathNode(path)) {
// node was absent when we created the watch so zk event triggers the finished barrier.
// TODO Nothing enforces that acquire and reached znodes from showing up in wrong order.
String procName = ZKUtil.getNodeName(ZKUtil.getParent(path));
String member = ZKUtil.getNodeName(path);
// get the data from the procedure member
try {
byte[] dataFromMember = ZKUtil.getData(watcher, path);
// ProtobufUtil.isPBMagicPrefix will check null
if (dataFromMember != null && dataFromMember.length > 0) {
if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) {
ForeignException ee = new ForeignException(coordName,
"Failed to get data from finished node or data is illegally formatted:" + path);
coordinator.abortProcedure(procName, ee);
} else {
dataFromMember = Arrays.copyOfRange(dataFromMember,
ProtobufUtil.lengthOfPBMagic(), dataFromMember.length);
LOG.debug("Finished data from procedure '{}' member '{}': {}", procName, member,
new String(dataFromMember, StandardCharsets.UTF_8));
coordinator.memberFinishedBarrier(procName, member, dataFromMember);
}
} else {
coordinator.memberFinishedBarrier(procName, member, dataFromMember);
}
} catch (KeeperException e) {
ForeignException ee = new ForeignException(coordName, e);
coordinator.abortProcedure(procName, ee);
} catch (InterruptedException e) {
ForeignException ee = new ForeignException(coordName, e);
coordinator.abortProcedure(procName, ee);
}
} else if (isAbortPathNode(path)) {
abort(path);
} else {
LOG.debug("Ignoring created notification for node:" + path);
}
}
};
zkProc.clearChildZNodes();
} catch (KeeperException e) {
LOG.error("Unable to start the ZK-based Procedure Coordinator rpcs.", e);
return false;
}
LOG.debug("Starting controller for procedure member=" + coordName);
return true;
} | 3.68 |
flink_ConfigurationUtils_canBePrefixMap | /**
* Maps can be represented in two ways.
*
* <p>With constant key space:
*
* <pre>
* avro-confluent.properties = schema: 1, other-prop: 2
* </pre>
*
* <p>Or with variable key space (i.e. prefix notation):
*
* <pre>
* avro-confluent.properties.schema = 1
* avro-confluent.properties.other-prop = 2
* </pre>
*/
public static boolean canBePrefixMap(ConfigOption<?> configOption) {
return configOption.getClazz() == Map.class && !configOption.isList();
} | 3.68 |
flink_RocksDBMemoryConfiguration_setUseManagedMemory | /**
* Configures RocksDB to use the managed memory of a slot. See {@link
* RocksDBOptions#USE_MANAGED_MEMORY} for details.
*/
public void setUseManagedMemory(boolean useManagedMemory) {
this.useManagedMemory = useManagedMemory;
} | 3.68 |
hudi_DataSourceUtils_createPayload | /**
* Create a payload class via reflection, do not ordering/precombine value.
*/
public static HoodieRecordPayload createPayload(String payloadClass, GenericRecord record)
throws IOException {
try {
return (HoodieRecordPayload) ReflectionUtils.loadClass(payloadClass,
new Class<?>[] {Option.class}, Option.of(record));
} catch (Throwable e) {
throw new IOException("Could not create payload for class: " + payloadClass, e);
}
} | 3.68 |
hadoop_DateSplitter_dateToString | /**
* Given a Date 'd', format it as a string for use in a SQL date
* comparison operation.
* @param d the date to format.
* @return the string representing this date in SQL with any appropriate
* quotation characters, etc.
*/
protected String dateToString(Date d) {
return "'" + d.toString() + "'";
} | 3.68 |
flink_WorksetNode_getOperator | /**
* Gets the contract object for this data source node.
*
* @return The contract.
*/
@Override
public WorksetPlaceHolder<?> getOperator() {
return (WorksetPlaceHolder<?>) super.getOperator();
} | 3.68 |
hbase_Increment_hasFamilies | /**
* Method for checking if any families have been inserted into this Increment
* @return true if familyMap is non empty false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.68 |
hadoop_ParsedTaskAttempt_incorporateCounters | /** incorporate event counters */
public void incorporateCounters(JhCounters counters) {
Map<String, Long> countersMap =
JobHistoryUtils.extractCounters(counters);
putCounters(countersMap);
super.incorporateCounters(counters);
} | 3.68 |
flink_KvStateClientProxyHandler_getKvStateLookupInfo | /**
* Lookup the {@link KvStateLocation} for the given job and queryable state name.
*
* <p>The job manager will be queried for the location only if forced or no cached location can
* be found. There are no guarantees about
*
* @param jobId JobID the state instance belongs to.
* @param queryableStateName Name under which the state instance has been published.
* @param forceUpdate Flag to indicate whether to force a update via the lookup service.
* @return Future holding the KvStateLocation
*/
private CompletableFuture<KvStateLocation> getKvStateLookupInfo(
final JobID jobId, final String queryableStateName, final boolean forceUpdate) {
final Tuple2<JobID, String> cacheKey = new Tuple2<>(jobId, queryableStateName);
final CompletableFuture<KvStateLocation> cachedFuture = lookupCache.get(cacheKey);
if (!forceUpdate && cachedFuture != null && !cachedFuture.isCompletedExceptionally()) {
LOG.debug(
"Retrieving location for state={} of job={} from the cache.",
queryableStateName,
jobId);
return cachedFuture;
}
final KvStateLocationOracle kvStateLocationOracle = proxy.getKvStateLocationOracle(jobId);
if (kvStateLocationOracle != null) {
LOG.debug(
"Retrieving location for state={} of job={} from the key-value state location oracle.",
queryableStateName,
jobId);
final CompletableFuture<KvStateLocation> location = new CompletableFuture<>();
lookupCache.put(cacheKey, location);
kvStateLocationOracle
.requestKvStateLocation(jobId, queryableStateName)
.whenComplete(
(KvStateLocation kvStateLocation, Throwable throwable) -> {
if (throwable != null) {
if (ExceptionUtils.stripCompletionException(throwable)
instanceof FlinkJobNotFoundException) {
// if the jobId was wrong, remove the entry from the cache.
lookupCache.remove(cacheKey);
}
location.completeExceptionally(throwable);
} else {
location.complete(kvStateLocation);
}
});
return location;
} else {
return FutureUtils.completedExceptionally(
new UnknownLocationException(
"Could not retrieve location of state="
+ queryableStateName
+ " of job="
+ jobId
+ ". Potential reasons are: i) the state is not ready, or ii) the job does not exist."));
}
} | 3.68 |
hbase_HRegion_worthPreFlushing | /** Returns True if its worth doing a flush before we put up the close flag. */
private boolean worthPreFlushing() {
return this.memStoreSizing.getDataSize()
> this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5);
} | 3.68 |
hadoop_WordMean_map | /**
* Emits 2 key-value pairs for counting the word and its length. Outputs are
* (Text, LongWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
this.wordLen.set(string.length());
context.write(LENGTH, this.wordLen);
context.write(COUNT, ONE);
}
} | 3.68 |
flink_FileMergingSnapshotManagerBuilder_setIOExecutor | /**
* Set the executor for io operation in manager. If null(default), all io operation will be
* executed synchronously.
*/
public FileMergingSnapshotManagerBuilder setIOExecutor(@Nullable Executor ioExecutor) {
this.ioExecutor = ioExecutor;
return this;
} | 3.68 |
graphhopper_AlternativeRoute_getFirstShareEE | /**
* Extract path until we stumble over an existing traversal id
*/
SPTEntry getFirstShareEE(SPTEntry startEE, boolean reverse) {
while (startEE.parent != null) {
// TODO we could make use of traversal ID directly if stored in SPTEntry
int tid = traversalMode.createTraversalId(graph.getEdgeIteratorState(startEE.edge, startEE.parent.adjNode), reverse);
if (isAlreadyExisting(tid))
return startEE;
startEE = startEE.parent;
}
return startEE;
} | 3.68 |
hbase_RpcServer_authorize | /**
* Authorize the incoming client connection.
* @param user client user
* @param connection incoming connection
* @param addr InetAddress of incoming connection
* @throws AuthorizationException when the client isn't authorized to talk the protocol
*/
public synchronized void authorize(UserGroupInformation user, ConnectionHeader connection,
InetAddress addr) throws AuthorizationException {
if (authorize) {
Class<?> c = getServiceInterface(services, connection.getServiceName());
authManager.authorize(user, c, getConf(), addr);
}
} | 3.68 |
hadoop_CounterGroupFactory_newGroup | /**
* Create a new counter group
* @param name of the group
* @param displayName of the group
* @param limits the counters limits policy object
* @return a new counter group
*/
public G newGroup(String name, String displayName, Limits limits) {
FrameworkGroupFactory<G> gf = fmap.get(name);
if (gf != null) return gf.newGroup(name);
if (name.equals(FS_GROUP_NAME)) {
return newFileSystemGroup();
} else if (s2i.get(name) != null) {
return newFrameworkGroup(s2i.get(name));
}
return newGenericGroup(name, displayName, limits);
} | 3.68 |
hbase_MasterObserver_preBalanceRSGroup | /**
* Called before a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param groupName group name
*/
default void preBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, BalanceRequest request) throws IOException {
} | 3.68 |
streampipes_StreamRequirementsBuilder_build | /**
* Finishes the stream requirements definition.
*
* @return an object of type {@link org.apache.streampipes.sdk.helpers.CollectedStreamRequirements}
* that contains all defined property requirements and static properties.
*/
public CollectedStreamRequirements build() {
SpDataStream stream = new SpDataStream();
stream.setEventSchema(new EventSchema(eventProperties));
return new CollectedStreamRequirements(stream, mappingProperties);
} | 3.68 |
hbase_CompactionConfiguration_getCompactionRatio | /** Returns Ratio used for compaction */
public double getCompactionRatio() {
return compactionRatio;
} | 3.68 |
zxing_PDF417Writer_bitMatrixFromBitArray | /**
* This takes an array holding the values of the PDF 417
*
* @param input a byte array of information with 0 is black, and 1 is white
* @param margin border around the barcode
* @return BitMatrix of the input
*/
private static BitMatrix bitMatrixFromBitArray(byte[][] input, int margin) {
// Creates the bit matrix with extra space for whitespace
BitMatrix output = new BitMatrix(input[0].length + 2 * margin, input.length + 2 * margin);
output.clear();
for (int y = 0, yOutput = output.getHeight() - margin - 1; y < input.length; y++, yOutput--) {
byte[] inputY = input[y];
for (int x = 0; x < input[0].length; x++) {
// Zero is white in the byte matrix
if (inputY[x] == 1) {
output.set(x + margin, yOutput);
}
}
}
return output;
} | 3.68 |
flink_ProjectOperator_projectTuple24 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>
ProjectOperator<
T,
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
projectTuple24() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
tType =
new TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(fTypes);
return new ProjectOperator<
T,
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
rocketmq-connect_AvroData_avroSchemaForUnderlyingTypeIfOptional | /**
* Connect optional fields are represented as a unions (null & type) in Avro
* Return the Avro schema of the actual type in the Union (instead of the union itself)
*/
private static org.apache.avro.Schema avroSchemaForUnderlyingTypeIfOptional(Schema schema, org.apache.avro.Schema avroSchema) {
if (schema != null && schema.isOptional()) {
if (avroSchema.getType() == org.apache.avro.Schema.Type.UNION) {
for (org.apache.avro.Schema typeSchema : avroSchema.getTypes()) {
if (!typeSchema.getType().equals(org.apache.avro.Schema.Type.NULL)
&& crossReferenceSchemaNames(schema, typeSchema)) {
return typeSchema;
}
}
} else {
throw new ConnectException(
"An optional schema should have an Avro Union type, not "
+ schema.getFieldType());
}
}
return avroSchema;
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_replaceJobNode | /**
* Replace data.
*
* @param node node
* @param value to be replaced data
*/
public void replaceJobNode(final String node, final Object value) {
regCenter.persist(jobNodePath.getFullPath(node), value.toString());
} | 3.68 |
flink_InputSelection_fairSelectNextIndexOutOf2 | /**
* Fairly select one of the two inputs for reading. When {@code inputMask} includes two inputs
* and both inputs are available, alternately select one of them. Otherwise, select the
* available one of {@code inputMask}, or return {@link InputSelection#NONE_AVAILABLE} to
* indicate no input is selected.
*
* <p>Note that this supports only two inputs for performance reasons.
*
* @param selectionMask The mask of inputs that are selected. Note -1 for this is interpreted as
* all of the 32 inputs are available.
* @param availableInputsMask The mask of all available inputs.
* @param lastReadInputIndex The index of last read input.
* @return the index of the input for reading or {@link InputSelection#NONE_AVAILABLE} (if
* {@code inputMask} is empty or the inputs in {@code inputMask} are unavailable).
*/
public static int fairSelectNextIndexOutOf2(
int selectionMask, int availableInputsMask, int lastReadInputIndex) {
int combineMask = availableInputsMask & selectionMask;
if (combineMask == 3) {
return lastReadInputIndex == 0 ? 1 : 0;
} else if (combineMask >= 0 && combineMask < 3) {
return combineMask - 1;
}
throw new UnsupportedOperationException("Only two inputs are supported.");
} | 3.68 |
dubbo_RpcServiceContext_copyOf | /**
* Only part of the properties are copied, the others are either not used currently or can be got from invocation.
* Also see {@link RpcContextAttachment#copyOf(boolean)}
*
* @param needCopy
* @return a shallow copy of RpcServiceContext
*/
public RpcServiceContext copyOf(boolean needCopy) {
if (needCopy) {
RpcServiceContext copy = new RpcServiceContext();
copy.arguments = this.arguments;
copy.consumerUrl = this.consumerUrl;
copy.invocation = this.invocation;
copy.invokers = this.invokers;
copy.invoker = this.invoker;
copy.localAddress = this.localAddress;
copy.methodName = this.methodName;
copy.needPrintRouterSnapshot = this.needPrintRouterSnapshot;
copy.parameterTypes = this.parameterTypes;
copy.remoteAddress = this.remoteAddress;
copy.remoteApplicationName = this.remoteApplicationName;
copy.request = this.request;
copy.response = this.response;
copy.url = this.url;
copy.urls = this.urls;
return copy;
} else {
return this;
}
} | 3.68 |
flink_NFACompiler_checkPatternNameUniqueness | /**
* Check if the given pattern's name is already used or not. If yes, it throws a {@link
* MalformedPatternException}.
*
* @param pattern The pattern to be checked
*/
private void checkPatternNameUniqueness(final Pattern pattern) {
if (pattern instanceof GroupPattern) {
Pattern patternToCheck = ((GroupPattern) pattern).getRawPattern();
while (patternToCheck != null) {
checkPatternNameUniqueness(patternToCheck);
patternToCheck = patternToCheck.getPrevious();
}
} else {
stateNameHandler.checkNameUniqueness(pattern.getName());
}
} | 3.68 |
hbase_SnapshotScannerHDFSAclHelper_getGlobalRootPaths | /**
* return paths that user will global permission will visit
* @return the path list
*/
List<Path> getGlobalRootPaths() {
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
} | 3.68 |
hbase_MasterProcedureScheduler_waitTableExclusiveLock | /**
* Suspend the procedure if the specified table is already locked. Other operations in the
* table-queue will be executed after the lock is released.
* @param procedure the procedure trying to acquire the lock
* @param table Table to lock
* @return true if the procedure has to wait for the table to be available
*/
public boolean waitTableExclusiveLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final String namespace = table.getNamespaceAsString();
final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
final LockAndQueue tableLock = locking.getTableLock(table);
if (!namespaceLock.trySharedLock(procedure)) {
waitProcedure(namespaceLock, procedure);
logLockedResource(LockedResourceType.NAMESPACE, namespace);
return true;
}
if (!tableLock.tryExclusiveLock(procedure)) {
namespaceLock.releaseSharedLock();
waitProcedure(tableLock, procedure);
logLockedResource(LockedResourceType.TABLE, table.getNameAsString());
return true;
}
removeFromRunQueue(tableRunQueue, getTableQueue(table),
() -> procedure + " held the exclusive lock");
return false;
} finally {
schedUnlock();
}
} | 3.68 |
flink_FlinkConnection_getAutoCommit | // TODO We currently do not support this, but we can't throw a SQLException here because we want
// to support jdbc tools such as beeline and sqlline.
@Override
public boolean getAutoCommit() throws SQLException {
return true;
} | 3.68 |
hadoop_TimelinePutResponse_getErrors | /**
* Get a list of {@link TimelinePutError} instances
*
* @return a list of {@link TimelinePutError} instances
*/
@XmlElement(name = "errors")
public List<TimelinePutError> getErrors() {
return errors;
} | 3.68 |
zxing_HybridBinarizer_calculateBlackPoints | /**
* Calculates a single black point for each block of pixels and saves it away.
* See the following thread for a discussion of this algorithm:
* http://groups.google.com/group/zxing/browse_thread/thread/d06efa2c35a7ddc0
*/
private static int[][] calculateBlackPoints(byte[] luminances,
int subWidth,
int subHeight,
int width,
int height) {
int maxYOffset = height - BLOCK_SIZE;
int maxXOffset = width - BLOCK_SIZE;
int[][] blackPoints = new int[subHeight][subWidth];
for (int y = 0; y < subHeight; y++) {
int yoffset = y << BLOCK_SIZE_POWER;
if (yoffset > maxYOffset) {
yoffset = maxYOffset;
}
for (int x = 0; x < subWidth; x++) {
int xoffset = x << BLOCK_SIZE_POWER;
if (xoffset > maxXOffset) {
xoffset = maxXOffset;
}
int sum = 0;
int min = 0xFF;
int max = 0;
for (int yy = 0, offset = yoffset * width + xoffset; yy < BLOCK_SIZE; yy++, offset += width) {
for (int xx = 0; xx < BLOCK_SIZE; xx++) {
int pixel = luminances[offset + xx] & 0xFF;
sum += pixel;
// still looking for good contrast
if (pixel < min) {
min = pixel;
}
if (pixel > max) {
max = pixel;
}
}
// short-circuit min/max tests once dynamic range is met
if (max - min > MIN_DYNAMIC_RANGE) {
// finish the rest of the rows quickly
for (yy++, offset += width; yy < BLOCK_SIZE; yy++, offset += width) {
for (int xx = 0; xx < BLOCK_SIZE; xx++) {
sum += luminances[offset + xx] & 0xFF;
}
}
}
}
// The default estimate is the average of the values in the block.
int average = sum >> (BLOCK_SIZE_POWER * 2);
if (max - min <= MIN_DYNAMIC_RANGE) {
// If variation within the block is low, assume this is a block with only light or only
// dark pixels. In that case we do not want to use the average, as it would divide this
// low contrast area into black and white pixels, essentially creating data out of noise.
//
// The default assumption is that the block is light/background. Since no estimate for
// the level of dark pixels exists locally, use half the min for the block.
average = min / 2;
if (y > 0 && x > 0) {
// Correct the "white background" assumption for blocks that have neighbors by comparing
// the pixels in this block to the previously calculated black points. This is based on
// the fact that dark barcode symbology is always surrounded by some amount of light
// background for which reasonable black point estimates were made. The bp estimated at
// the boundaries is used for the interior.
// The (min < bp) is arbitrary but works better than other heuristics that were tried.
int averageNeighborBlackPoint =
(blackPoints[y - 1][x] + (2 * blackPoints[y][x - 1]) + blackPoints[y - 1][x - 1]) / 4;
if (min < averageNeighborBlackPoint) {
average = averageNeighborBlackPoint;
}
}
}
blackPoints[y][x] = average;
}
}
return blackPoints;
} | 3.68 |
morf_AddColumn_apply | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
* @return a new {@link Schema} which results from the addition of the new column to the relevant table in <var>schema</var>
*/
@Override
public Schema apply(Schema schema) {
Table original = schema.getTable(tableName);
List<String> columns = new ArrayList<>();
for (Column column : original.columns()) {
if (column.getName().equalsIgnoreCase(newColumnDefinition.getName())) {
throw new IllegalStateException("Column [" + newColumnDefinition.getName() + "] is already present on table [" + tableName + "] so cannot be added.");
}
columns.add(column.getName());
}
columns.add(newColumnDefinition.getName());
return new TableOverrideSchema(schema, new AlteredTable(original, columns, Arrays.asList(new Column[] {newColumnDefinition})));
} | 3.68 |
druid_MySqlStatementParser_parseCreateProcedure | /**
* parse create procedure statement
*/
public SQLCreateProcedureStatement parseCreateProcedure() {
/**
* CREATE OR REPALCE PROCEDURE SP_NAME(parameter_list) BEGIN block_statement END
*/
SQLCreateProcedureStatement stmt = new SQLCreateProcedureStatement();
stmt.setDbType(dbType);
if (lexer.token() == Token.CREATE) {
lexer.nextToken();
if (lexer.token() == Token.OR) {
lexer.nextToken();
accept(Token.REPLACE);
stmt.setOrReplace(true);
}
}
if (lexer.identifierEquals(FnvHash.Constants.DEFINER)) {
lexer.nextToken();
accept(Token.EQ);
SQLName definer = this.getExprParser().userName();
stmt.setDefiner(definer);
}
accept(Token.PROCEDURE);
stmt.setName(this.exprParser.name());
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
parserParameters(stmt.getParameters(), stmt);
accept(Token.RPAREN);
}
for (; ; ) {
if (lexer.token() == Token.COMMENT) {
lexer.nextToken();
stmt.setComment(this.exprParser.charExpr());
}
if (lexer.identifierEquals(FnvHash.Constants.LANGUAGE)) {
lexer.nextToken();
acceptIdentifier("SQL");
stmt.setLanguageSql(true);
}
if (lexer.identifierEquals(FnvHash.Constants.DETERMINISTIC)) {
lexer.nextToken();
stmt.setDeterministic(true);
continue;
}
if (lexer.identifierEquals(FnvHash.Constants.CONTAINS) || lexer.token() == Token.CONTAINS) {
lexer.nextToken();
acceptIdentifier("SQL");
stmt.setContainsSql(true);
continue;
}
if (lexer.identifierEquals(FnvHash.Constants.SQL)) {
lexer.nextToken();
acceptIdentifier("SECURITY");
SQLName authid = this.exprParser.name();
stmt.setAuthid(authid);
}
break;
}
SQLStatement block;
if (lexer.token() == Token.BEGIN) {
block = this.parseBlock();
} else {
block = this.parseStatement();
}
stmt.setBlock(block);
return stmt;
} | 3.68 |
hmily_HmilyRepositoryEventPublisher_asyncPublishEvent | /**
* Async publish event.
*
* @param hmilyTransaction the hmily transaction
* @param type the type
*/
public void asyncPublishEvent(final HmilyTransaction hmilyTransaction, final int type) {
HmilyRepositoryEvent event = new HmilyRepositoryEvent();
event.setType(type);
event.setHmilyTransaction(hmilyTransaction);
event.setTransId(hmilyTransaction.getTransId());
disruptor.getProvider().onData(event);
} | 3.68 |
flink_ExecutionVertex_finishPartitionsIfNeeded | /**
* Mark partition finished if needed.
*
* @return list of finished partitions.
*/
@VisibleForTesting
public List<IntermediateResultPartition> finishPartitionsIfNeeded() {
List<IntermediateResultPartition> finishedPartitions = null;
MarkPartitionFinishedStrategy markPartitionFinishedStrategy =
getExecutionGraphAccessor().getMarkPartitionFinishedStrategy();
for (IntermediateResultPartition partition : resultPartitions.values()) {
if (markPartitionFinishedStrategy.needMarkPartitionFinished(
partition.getResultType())) {
partition.markFinished();
if (finishedPartitions == null) {
finishedPartitions = new LinkedList<>();
}
finishedPartitions.add(partition);
}
}
if (finishedPartitions == null) {
return Collections.emptyList();
} else {
return finishedPartitions;
}
} | 3.68 |
morf_SqlServerDialect_needsCollation | /**
* @param dataType a data type to examine
* @return true if this data type should have COLLATE set.
*/
private static boolean needsCollation(DataType dataType) {
return dataType == DataType.STRING || dataType == DataType.CLOB;
} | 3.68 |
flink_DataStreamUtils_collectRecordsFromUnboundedStream | /** @deprecated Please use {@link DataStream#executeAndCollect()}. */
@Deprecated
public static <E> List<E> collectRecordsFromUnboundedStream(
final ClientAndIterator<E> client, final int numElements) {
checkNotNull(client, "client");
checkArgument(numElements > 0, "numElement must be > 0");
final ArrayList<E> result = new ArrayList<>(numElements);
final Iterator<E> iterator = client.iterator;
while (iterator.hasNext()) {
result.add(iterator.next());
if (result.size() == numElements) {
return result;
}
}
throw new IllegalArgumentException(
String.format(
"The stream ended before reaching the requested %d records. Only %d records were received.",
numElements, result.size()));
} | 3.68 |
hadoop_OBSListing_requestNextBatch | /**
* Try to retrieve another batch. Note that for the initial batch, {@link
* ObjectListingIterator} does not generate a request; it simply returns the
* initial set.
*
* @return true if a new batch was created.
* @throws IOException IO problems
*/
private boolean requestNextBatch() throws IOException {
// look for more object listing batches being available
while (source.hasNext()) {
// if available, retrieve it and build the next status
if (buildNextStatusBatch(source.next())) {
// this batch successfully generated entries matching
// the filters/acceptors;
// declare that the request was successful
return true;
} else {
LOG.debug(
"All entries in batch were filtered...continuing");
}
}
// if this code is reached, it means that all remaining
// object lists have been retrieved, and there are no new entries
// to return.
return false;
} | 3.68 |
morf_DatabaseDumper_dump | /**
* Dumps the current database state to a file.
*
* @param outputRootDirectory The directory where database dumps are to be written to.
* @param dumpName A string to prepend to the dump name, to aid in identifying it.
*
* @throws IOException If the output directory cannot be created.
*/
public void dump(File outputRootDirectory, String dumpName) throws IOException {
log.warn("********************************************************");
log.warn("***********************WARNING!!!!!!********************");
log.warn("**************DATABASE DUMP BEING EXECUTED**************");
log.warn("********************************************************");
log.warn("***IF YOU DO NOT EXPECT THIS MESSAGE CONTACT SUPPORT****");
log.warn("********************************************************");
if (!outputRootDirectory.exists()) {
boolean success = outputRootDirectory.mkdirs();
if(!success) {
throw new IOException(String.format("Could not create root directory: [%s]", outputRootDirectory.getPath()));
}
} else if(outputRootDirectory.isFile()) {
throw new IllegalArgumentException(String.format("Input file: [%s] was not a directory.", outputRootDirectory.getPath()));
}
// Create the input
DataSetProducer input = new DatabaseDataSetProducer(connectionResources);
// Create the output
DateFormat dateFormat = new SimpleDateFormat("yyyyMMdd-HHmm");
File outputFile = new File(outputRootDirectory, dumpName + "-" + dateFormat.format(System.currentTimeMillis()) + ".zip");
boolean success = outputFile.createNewFile();
if(!success) {
throw new IOException(String.format("Could not create output file: [%s]", outputFile.getPath()));
}
log.info(String.format("Output file will be: [%s]", outputFile.getAbsolutePath()));
DataSetConsumer output = new XmlDataSetConsumer(outputFile);
// Run the extraction.
log.info(String.format("Starting database dump at [%s]", DateFormat.getInstance().format(System.currentTimeMillis())));
new DataSetConnector(input, output).connect();
log.info(String.format("Completed database dump at [%s]", DateFormat.getInstance().format(System.currentTimeMillis())));
} | 3.68 |
Activiti_CommandContext_exception | /**
* Stores the provided exception on this {@link CommandContext} instance.
* That exception will be rethrown at the end of closing the {@link CommandContext} instance.
* <p>
* If there is already an exception being stored, a 'masked exception' message will be logged.
*/
public void exception(Throwable exception) {
if (this.exception == null) {
this.exception = exception;
} else {
log.error("masked exception in command context. for root cause, see below as it will be rethrown later.",
exception);
LogMDC.clear();
}
} | 3.68 |
hbase_RegionReplicationBufferManager_decrease | /**
* Called after you ship the edits out.
*/
public void decrease(long size) {
pendingSize.addAndGet(-size);
} | 3.68 |
hudi_CleanPlanner_getFilesToCleanKeepingLatestCommits | /**
* Selects the versions for file for cleaning, such that it
* <p>
* - Leaves the latest version of the file untouched - For older versions, - It leaves all the commits untouched which
* has occurred in last <code>config.getCleanerCommitsRetained()</code> commits - It leaves ONE commit before this
* window. We assume that the max(query execution time) == commit_batch_time * config.getCleanerCommitsRetained().
* This is 5 hours by default (assuming ingestion is running every 30 minutes). This is essential to leave the file
* used by the query that is running for the max time.
* <p>
* This provides the effect of having lookback into all changes that happened in the last X commits. (eg: if you
* retain 10 commits, and commit batch time is 30 mins, then you have 5 hrs of lookback)
* <p>
* This policy is the default.
*
* @return A {@link Pair} whose left is boolean indicating whether partition itself needs to be deleted,
* and right is a list of {@link CleanFileInfo} about the files in the partition that needs to be deleted.
*/
private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestCommits(String partitionPath,
int commitsRetained, Option<HoodieInstant> earliestCommitToRetain, HoodieCleaningPolicy policy) {
LOG.info("Cleaning " + partitionPath + ", retaining latest " + commitsRetained + " commits. ");
List<CleanFileInfo> deletePaths = new ArrayList<>();
// Collect all the datafiles savepointed by all the savepoints
List<String> savepointedFiles = hoodieTable.getSavepointTimestamps().stream()
.flatMap(this::getSavepointedDataFiles)
.collect(Collectors.toList());
// determine if we have enough commits, to start cleaning.
boolean toDeletePartition = false;
if (commitTimeline.countInstants() > commitsRetained) {
HoodieInstant earliestInstant = earliestCommitToRetain.get();
// all replaced file groups before earliestCommitToRetain are eligible to clean
deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, earliestCommitToRetain));
// add active files
List<HoodieFileGroup> fileGroups = fileSystemView.getAllFileGroupsStateless(partitionPath).collect(Collectors.toList());
for (HoodieFileGroup fileGroup : fileGroups) {
List<FileSlice> fileSliceList = fileGroup.getAllFileSlices().collect(Collectors.toList());
if (fileSliceList.isEmpty()) {
continue;
}
String lastVersion = fileSliceList.get(0).getBaseInstantTime();
String lastVersionBeforeEarliestCommitToRetain =
getLatestVersionBeforeCommit(fileSliceList, earliestInstant);
// Ensure there are more than 1 version of the file (we only clean old files from updates)
// i.e., always spare the last commit.
for (FileSlice aSlice : fileSliceList) {
Option<HoodieBaseFile> aFile = aSlice.getBaseFile();
String fileCommitTime = aSlice.getBaseInstantTime();
if (isFileSliceExistInSavepointedFiles(aSlice, savepointedFiles)) {
// do not clean up a savepoint data file
continue;
}
if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) {
// Do not delete the latest commit and also the last commit before the earliest commit we
// are retaining
// The window of commit retain == max query run time. So a query could be running which
// still
// uses this file.
if (fileCommitTime.equals(lastVersion) || (fileCommitTime.equals(lastVersionBeforeEarliestCommitToRetain))) {
// move on to the next file
continue;
}
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS) {
// This block corresponds to KEEP_LATEST_BY_HOURS policy
// Do not delete the latest commit.
if (fileCommitTime.equals(lastVersion)) {
// move on to the next file
continue;
}
}
// Always keep the last commit
if (!isFileSliceNeededForPendingMajorOrMinorCompaction(aSlice) && HoodieTimeline
.compareTimestamps(earliestInstant.getTimestamp(), HoodieTimeline.GREATER_THAN, fileCommitTime)) {
// this is a commit, that should be cleaned.
aFile.ifPresent(hoodieDataFile -> {
deletePaths.add(new CleanFileInfo(hoodieDataFile.getPath(), false));
if (hoodieDataFile.getBootstrapBaseFile().isPresent() && config.shouldCleanBootstrapBaseFile()) {
deletePaths.add(new CleanFileInfo(hoodieDataFile.getBootstrapBaseFile().get().getPath(), true));
}
});
// clean the log files for the commits, which contain cdc log files in cdc scenario
// and normal log files for mor tables.
deletePaths.addAll(aSlice.getLogFiles().map(lf -> new CleanFileInfo(lf.getPath().toString(), false))
.collect(Collectors.toList()));
}
}
}
// if there are no valid file groups
// and no pending data files under the partition [IMPORTANT],
// and no subsequent replace commit after the earliest retained commit
// mark it to be deleted
if (fileGroups.isEmpty()
&& !hasPendingFiles(partitionPath)
&& noSubsequentReplaceCommit(earliestInstant.getTimestamp(), partitionPath)) {
toDeletePartition = true;
}
}
return Pair.of(toDeletePartition, deletePaths);
} | 3.68 |
zxing_DataMask_unmaskBitMatrix | /**
* <p>Implementations of this method reverse the data masking process applied to a QR Code and
* make its bits ready to read.</p>
*
* @param bits representation of QR Code bits
* @param dimension dimension of QR Code, represented by bits, being unmasked
*/
final void unmaskBitMatrix(BitMatrix bits, int dimension) {
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension; j++) {
if (isMasked(i, j)) {
bits.flip(j, i);
}
}
}
} | 3.68 |
hmily_PropertyName_isValidElement | /**
* Is valid element boolean.
*
* @param elementValue the element value
* @return the boolean
*/
static boolean isValidElement(final CharSequence elementValue) {
for (int i = 0; i < elementValue.length(); i++) {
char ch = elementValue.charAt(i);
if (!isValidChar(ch, i)) {
return false;
}
}
return true;
} | 3.68 |
framework_VRadioButtonGroup_setTabIndex | /**
* Sets the tabulator index for the container element that holds the radio
* buttons. It represents the entire radio button group within the browser's
* focus cycle.
*
* @param tabIndex
* tabulator index for the radio button group
*/
public void setTabIndex(int tabIndex) {
for (Widget anOptionsContainer : getWidget()) {
FocusWidget widget = (FocusWidget) anOptionsContainer;
widget.setTabIndex(tabIndex);
}
} | 3.68 |
hbase_BucketCache_doEvictBlock | /**
* Evict the {@link BlockCacheKey} and {@link BucketEntry} from {@link BucketCache#backingMap} and
* {@link BucketCache#ramCache}. <br/>
* NOTE:When Evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
* @param cacheKey {@link BlockCacheKey} to evict.
* @param bucketEntry {@link BucketEntry} matched {@link BlockCacheKey} to evict.
* @return true to indicate whether we've evicted successfully or not.
*/
private boolean doEvictBlock(BlockCacheKey cacheKey, BucketEntry bucketEntry,
boolean evictedByEvictionProcess) {
if (!cacheEnabled) {
return false;
}
boolean existedInRamCache = removeFromRamCache(cacheKey);
if (bucketEntry == null) {
bucketEntry = backingMap.get(cacheKey);
}
final BucketEntry bucketEntryToUse = bucketEntry;
if (bucketEntryToUse == null) {
if (existedInRamCache && evictedByEvictionProcess) {
cacheStats.evicted(0, cacheKey.isPrimary());
}
return existedInRamCache;
} else {
return bucketEntryToUse.withWriteLock(offsetLock, () -> {
if (backingMap.remove(cacheKey, bucketEntryToUse)) {
LOG.debug("removed key {} from back map in the evict process", cacheKey);
blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, evictedByEvictionProcess);
return true;
}
return false;
});
}
} | 3.68 |
zxing_LuminanceSource_getHeight | /**
* @return The height of the bitmap.
*/
public final int getHeight() {
return height;
} | 3.68 |
morf_SchemaHomology_matches | /**
* Check two objects match, writing a difference if they don't.
*
* @param description The description to log when the match fails
* @param value1 The first value
* @param value2 The second value
*/
private void matches(String description, Object value1, Object value2) {
if (ObjectUtils.notEqual(value1, value2)) {
difference(String.format("%s does not match: [%s] in %s, [%s] in %s", description, value1, schema1Name, value2, schema2Name));
}
} | 3.68 |
hbase_CompressionState_readKey | /**
* Analyze the key and fill the state assuming we know previous state. Uses mark() and reset() in
* ByteBuffer to avoid moving the position.
* <p>
* This method overrides all the fields of this instance, except {@link #prevOffset}, which is
* usually manipulated directly by encoders and decoders.
* @param in Buffer at the position where key starts
* @param keyLength Length of key in bytes
* @param valueLength Length of values in bytes
* @param commonPrefix how many first bytes are common with previous KeyValue
* @param previousState State from previous KeyValue
*/
void readKey(ByteBuffer in, int keyLength, int valueLength, int commonPrefix,
CompressionState previousState) {
this.keyLength = keyLength;
this.valueLength = valueLength;
// fill the state
in.mark(); // mark beginning of key
if (commonPrefix < KeyValue.ROW_LENGTH_SIZE) {
rowLength = in.getShort();
ByteBufferUtils.skip(in, rowLength);
familyLength = in.get();
qualifierLength = keyLength - rowLength - familyLength - KeyValue.KEY_INFRASTRUCTURE_SIZE;
ByteBufferUtils.skip(in, familyLength + qualifierLength);
} else {
rowLength = previousState.rowLength;
familyLength = previousState.familyLength;
qualifierLength = previousState.qualifierLength + keyLength - previousState.keyLength;
ByteBufferUtils.skip(in, (KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE) + rowLength
+ familyLength + qualifierLength);
}
readTimestamp(in);
type = in.get();
in.reset();
} | 3.68 |
flink_FutureUtils_unsupportedOperationFuture | /**
* Returns an exceptionally completed future with an {@link UnsupportedOperationException}.
*
* @param <T> type of the future
* @return exceptionally completed future
*/
public static <T> CompletableFuture<T> unsupportedOperationFuture() {
return (CompletableFuture<T>) UNSUPPORTED_OPERATION_FUTURE;
} | 3.68 |
flink_FlinkContainersSettings_getNumSlotsPerTaskManager | /**
* Gets number slots per task manager.
*
* @return The number slots per task manager.
*/
public int getNumSlotsPerTaskManager() {
return numSlotsPerTaskManager;
} | 3.68 |
hadoop_NvidiaGPUPluginForRuntimeV2_parseTopo | /**
* A typical sample topo output:
* GPU0 GPU1 GPU2 GPU3 CPU Affinity
* GPU0 X PHB SOC SOC 0-31
* GPU1 PHB X SOC SOC 0-31
* GPU2 SOC SOC X PHB 0-31
* GPU3 SOC SOC PHB X 0-31
*
*
* Legend:
*
* X = Self
* SOC = Connection traversing PCIe as well as the SMP link between
* CPU sockets(e.g. QPI)
* PHB = Connection traversing PCIe as well as a PCIe Host Bridge
* (typically the CPU)
* PXB = Connection traversing multiple PCIe switches
* (without traversing the PCIe Host Bridge)
* PIX = Connection traversing a single PCIe switch
* NV# = Connection traversing a bonded set of # NVLinks」
* */
public void parseTopo(String topo,
Map<String, Integer> deviceLinkToWeight) {
String[] lines = topo.split("\n");
int rowMinor;
int colMinor;
String legend;
String tempType;
for (String oneLine : lines) {
oneLine = oneLine.trim();
if (oneLine.isEmpty()) {
continue;
}
// To the end. No more metrics info
if (oneLine.startsWith("Legend")) {
break;
}
// Skip header
if (oneLine.contains("Affinity")) {
continue;
}
String[] tokens = oneLine.split(("\\s+"));
String name = tokens[0];
rowMinor = Integer.parseInt(name.substring(name.lastIndexOf("U") + 1));
for (int i = 1; i < tokens.length; i++) {
tempType = tokens[i];
colMinor = i - 1;
// self, skip
if (tempType.equals("X")) {
continue;
}
if (tempType.equals("SOC") || tempType.equals("SYS")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkCrossCPUSocket,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PHB") || tempType.equals("NODE")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkSameCPUSocket,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PXB")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkMultiSwitch,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("PIX")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkSingleSwitch,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV1")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink1,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV2")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink2,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV3")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink3,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV4")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink4,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV5")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink5,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV6")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink6,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV7")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink7,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV8")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink8,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
if (tempType.equals("NV9")) {
populateGraphEdgeWeight(DeviceLinkType.P2PLinkNVLink9,
rowMinor, colMinor, deviceLinkToWeight);
continue;
}
} // end one line handling
}
} | 3.68 |
hudi_InternalSchemaUtils_collectTypeChangedCols | /**
* Collect all type changed cols to build a colPosition -> (newColType, oldColType) map.
* only collect top level col changed. eg: a is a nest field(record(b int, d long), now a.b is changed from int to long,
* only a will be collected, a.b will excluded.
*
* @param schema a type changed internalSchema
* @param oldSchema an old internalSchema.
* @return a map.
*/
public static Map<Integer, Pair<Type, Type>> collectTypeChangedCols(InternalSchema schema, InternalSchema oldSchema) {
Set<Integer> ids = schema.getAllIds();
Set<Integer> otherIds = oldSchema.getAllIds();
Map<Integer, Pair<Type, Type>> result = new HashMap<>();
ids.stream().filter(f -> otherIds.contains(f)).forEach(f -> {
if (!schema.findType(f).equals(oldSchema.findType(f))) {
String[] fieldNameParts = schema.findFullName(f).split("\\.");
String[] otherFieldNameParts = oldSchema.findFullName(f).split("\\.");
String parentName = fieldNameParts[0];
String otherParentName = otherFieldNameParts[0];
if (fieldNameParts.length == otherFieldNameParts.length && schema.findIdByName(parentName) == oldSchema.findIdByName(otherParentName)) {
int index = schema.findIdByName(parentName);
int position = schema.getRecord().fields().stream().map(s -> s.fieldId()).collect(Collectors.toList()).indexOf(index);
if (!result.containsKey(position)) {
result.put(position, Pair.of(schema.findType(parentName), oldSchema.findType(otherParentName)));
}
}
}
});
return result;
} | 3.68 |
hmily_HmilyXaException_getMessage | /**
* Gets message.
*
* @param xaException the xa exception
* @return the message
*/
public static String getMessage(final XAException xaException) {
int errorCode = xaException.errorCode;
String s = ERROR_CODES.get(errorCode);
return "errorCode:" + errorCode + ":" + s;
} | 3.68 |
hbase_FutureUtils_get | /**
* A helper class for getting the result of a Future with timeout, and convert the error to an
* {@link IOException}.
*/
public static <T> T get(Future<T> future, long timeout, TimeUnit unit) throws IOException {
try {
return future.get(timeout, unit);
} catch (InterruptedException e) {
throw (IOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
throw rethrow(e.getCause());
} catch (TimeoutException e) {
throw new TimeoutIOException(e);
}
} | 3.68 |
open-banking-gateway_HbciAuthorizationPossibleErrorHandler_handlePossibleAuthorizationError | /**
* Swallows retryable (like wrong password) authorization exceptions.
* @param tryAuthorize Authorization function to call
* @param onFail Fallback function to call if retryable exception occurred.
*/
public void handlePossibleAuthorizationError(Runnable tryAuthorize, Consumer<MultibankingException> onFail) {
try {
tryAuthorize.run();
} catch (MultibankingException ex) {
rethrowIfNotAuthorizationErrorCode(ex);
onFail.accept(ex);
}
} | 3.68 |
flink_DataStream_countWindowAll | /**
* Windows this {@code DataStream} into sliding count windows.
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param size The size of the windows in number of elements.
* @param slide The slide interval in number of elements.
*/
public AllWindowedStream<T, GlobalWindow> countWindowAll(long size, long slide) {
return windowAll(GlobalWindows.create())
.evictor(CountEvictor.of(size))
.trigger(CountTrigger.of(slide));
} | 3.68 |
hadoop_MappingRuleResult_createSkipResult | /**
* Generator method for skip results.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createSkipResult() {
return RESULT_SKIP;
} | 3.68 |
hbase_MutableRegionInfo_getRegionName | /**
* @return the regionName as an array of bytes.
* @see #getRegionNameAsString()
*/
@Override
public byte[] getRegionName() {
return regionName;
} | 3.68 |
framework_VScrollTable_removeRowFromUnsentSelectionRanges | /**
* Removes a key from a range if the key is found in a selected range
*
* @param key
* The key to remove
*/
private void removeRowFromUnsentSelectionRanges(VScrollTableRow row) {
Collection<SelectionRange> newRanges = null;
for (Iterator<SelectionRange> iterator = selectedRowRanges
.iterator(); iterator.hasNext();) {
SelectionRange range = iterator.next();
if (range.inRange(row)) {
// Split the range if given row is in range
Collection<SelectionRange> splitranges = range.split(row);
if (newRanges == null) {
newRanges = new ArrayList<SelectionRange>();
}
newRanges.addAll(splitranges);
iterator.remove();
}
}
if (newRanges != null) {
selectedRowRanges.addAll(newRanges);
}
} | 3.68 |
hudi_HoodieLogBlock_getRecordPositions | /**
* @return A {@link Roaring64NavigableMap} bitmap containing the record positions in long type
* if the {@link HeaderMetadataType#RECORD_POSITIONS} block header exists; otherwise, an empty
* {@link Roaring64NavigableMap} bitmap.
* @throws IOException upon I/O error.
*/
public Roaring64NavigableMap getRecordPositions() throws IOException {
if (!logBlockHeader.containsKey(HeaderMetadataType.RECORD_POSITIONS)) {
return new Roaring64NavigableMap();
}
return LogReaderUtils.decodeRecordPositionsHeader(logBlockHeader.get(HeaderMetadataType.RECORD_POSITIONS));
} | 3.68 |
framework_SQLContainer_removeRowIdChangeListener | /**
* Removes a RowIdChangeListener from the QueryDelegate.
*
* @param listener
*/
public void removeRowIdChangeListener(RowIdChangeListener listener) {
if (queryDelegate instanceof QueryDelegate.RowIdChangeNotifier) {
((QueryDelegate.RowIdChangeNotifier) queryDelegate)
.removeListener(listener);
}
} | 3.68 |
Activiti_BpmnActivityBehavior_performDefaultOutgoingBehavior | /**
* Performs the default outgoing BPMN 2.0 behavior, which is having parallel paths of executions for the outgoing sequence flow.
* <p>
* More precisely: every sequence flow that has a condition which evaluates to true (or which doesn't have a condition), is selected for continuation of the process instance. If multiple sequencer
* flow are selected, multiple, parallel paths of executions are created.
*/
public void performDefaultOutgoingBehavior(ExecutionEntity activityExecution) {
performOutgoingBehavior(activityExecution,
true,
false);
} | 3.68 |
pulsar_Metrics_create | /**
* Creates a metrics object with the dimensions map immutable.
*
* @param dimensionMap
* @return
*/
public static Metrics create(Map<String, String> dimensionMap) {
// make the dimensions map unmodifiable and immutable;
Map<String, String> map = new TreeMap<>();
map.putAll(dimensionMap);
return new Metrics(Collections.unmodifiableMap(map));
} | 3.68 |
hadoop_HistoryServerStateStoreService_serviceStop | /**
* Shutdown the state storage.
*
* @throws IOException
*/
@Override
public void serviceStop() throws IOException {
closeStorage();
} | 3.68 |
flink_WatermarkStrategy_noWatermarks | /**
* Creates a watermark strategy that generates no watermarks at all. This may be useful in
* scenarios that do pure processing-time based stream processing.
*/
static <T> WatermarkStrategy<T> noWatermarks() {
return (ctx) -> new NoWatermarksGenerator<>();
} | 3.68 |
dubbo_MulticastRegistry_clean | /**
* Remove the expired providers, only when "clean" parameter is true.
*/
private void clean() {
if (admin) {
for (Set<URL> providers : new HashSet<Set<URL>>(received.values())) {
for (URL url : new HashSet<URL>(providers)) {
if (isExpired(url)) {
if (logger.isWarnEnabled()) {
logger.warn(REGISTRY_SOCKET_EXCEPTION, "", "", "Clean expired provider " + url);
}
doUnregister(url);
}
}
}
}
} | 3.68 |
hbase_VisibilityUtils_getDataToWriteToZooKeeper | /**
* Creates the labels data to be written to zookeeper.
* @return Bytes form of labels and their ordinal details to be written to zookeeper.
*/
public static byte[] getDataToWriteToZooKeeper(Map<String, Integer> existingLabels) {
VisibilityLabelsRequest.Builder visReqBuilder = VisibilityLabelsRequest.newBuilder();
for (Entry<String, Integer> entry : existingLabels.entrySet()) {
VisibilityLabel.Builder visLabBuilder = VisibilityLabel.newBuilder();
visLabBuilder.setLabel(ByteString.copyFrom(Bytes.toBytes(entry.getKey())));
visLabBuilder.setOrdinal(entry.getValue());
visReqBuilder.addVisLabel(visLabBuilder.build());
}
return ProtobufUtil.prependPBMagic(visReqBuilder.build().toByteArray());
} | 3.68 |
hbase_TerminatedWrapper_encode | /**
* Write instance {@code val} into buffer {@code dst}.
* @throws IllegalArgumentException when the encoded representation of {@code val} contains the
* {@code term} sequence.
*/
@Override
public int encode(PositionedByteRange dst, T val) {
final int start = dst.getPosition();
int written = wrapped.encode(dst, val);
PositionedByteRange b = dst.shallowCopy();
b.setLength(dst.getPosition());
b.setPosition(start);
if (-1 != terminatorPosition(b)) {
dst.setPosition(start);
throw new IllegalArgumentException("Encoded value contains terminator sequence.");
}
dst.put(term);
return written + term.length;
} | 3.68 |
zxing_WifiConfigManager_changeNetworkWPA | // Adding a WPA or WPA2 network
private static void changeNetworkWPA(WifiManager wifiManager, WifiParsedResult wifiResult) {
WifiConfiguration config = changeNetworkCommon(wifiResult);
// Hex passwords that are 64 bits long are not to be quoted.
config.preSharedKey = quoteNonHex(wifiResult.getPassword(), 64);
config.allowedAuthAlgorithms.set(WifiConfiguration.AuthAlgorithm.OPEN);
config.allowedProtocols.set(WifiConfiguration.Protocol.WPA); // For WPA
config.allowedProtocols.set(WifiConfiguration.Protocol.RSN); // For WPA2
config.allowedKeyManagement.set(WifiConfiguration.KeyMgmt.WPA_PSK);
config.allowedKeyManagement.set(WifiConfiguration.KeyMgmt.WPA_EAP);
config.allowedPairwiseCiphers.set(WifiConfiguration.PairwiseCipher.TKIP);
config.allowedPairwiseCiphers.set(WifiConfiguration.PairwiseCipher.CCMP);
config.allowedGroupCiphers.set(WifiConfiguration.GroupCipher.TKIP);
config.allowedGroupCiphers.set(WifiConfiguration.GroupCipher.CCMP);
updateNetwork(wifiManager, config);
} | 3.68 |
morf_HumanReadableStatementHelper_generateAnalyseTableFromString | /**
* Generates human-readable "Analyse Table" string.
*
* @param tableName The table to analyse.
*/
public static String generateAnalyseTableFromString(String tableName) {
return String.format("Analyse table %s", tableName);
} | 3.68 |
framework_Form_setInvalidAllowed | /**
* Should the validabtable object accept invalid values.
*
* @see Validatable#setInvalidAllowed(boolean)
*/
@Override
public void setInvalidAllowed(boolean invalidValueAllowed)
throws UnsupportedOperationException {
throw new UnsupportedOperationException();
} | 3.68 |
morf_SqlDialect_prepareIntegerParameter | /**
* Overridable behaviour for mapping an integer parameter to a prepared statement.
*
* @param statement The statement.
* @param integerVal The integer value.
* @param parameter The parameter to map to.
* @throws SQLException If an exception occurs setting the parameter.
*/
protected void prepareIntegerParameter(NamedParameterPreparedStatement statement, Integer integerVal, SqlParameter parameter) throws SQLException {
if (integerVal == null) {
statement.setObject(parameter, null);
} else {
statement.setInt(parameter, integerVal);
}
} | 3.68 |
flink_ExternalResourceUtils_createStaticExternalResourceInfoProvider | /**
* Instantiate {@link StaticExternalResourceInfoProvider} for all of enabled external resources.
*/
@VisibleForTesting
static ExternalResourceInfoProvider createStaticExternalResourceInfoProvider(
Map<String, Long> externalResourceAmountMap,
Map<String, ExternalResourceDriver> externalResourceDrivers) {
final Map<String, Set<? extends ExternalResourceInfo>> externalResources = new HashMap<>();
for (Map.Entry<String, ExternalResourceDriver> externalResourceDriverEntry :
externalResourceDrivers.entrySet()) {
final String resourceName = externalResourceDriverEntry.getKey();
final ExternalResourceDriver externalResourceDriver =
externalResourceDriverEntry.getValue();
if (externalResourceAmountMap.containsKey(resourceName)) {
try {
final Set<? extends ExternalResourceInfo> externalResourceInfos;
externalResourceInfos =
externalResourceDriver.retrieveResourceInfo(
externalResourceAmountMap.get(resourceName));
externalResources.put(resourceName, externalResourceInfos);
} catch (Exception e) {
LOG.warn(
"Failed to retrieve information of external resource {}.",
resourceName,
e);
}
} else {
LOG.warn("Could not found legal amount configuration for {}.", resourceName);
}
}
return new StaticExternalResourceInfoProvider(externalResources);
} | 3.68 |
rocketmq-connect_WrapperStatusListener_onResume | /**
* Invoked after the task has been resumed.
*
* @param id The id of the task
*/
@Override
public void onResume(ConnectorTaskId id) {
managementService.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation()));
} | 3.68 |
framework_SingleSelectionModelImpl_setSelectedFromClient | /**
* Sets the selection based on a client request. Does nothing if the select
* component is {@linkplain SingleSelect#isReadOnly()} or if the selection
* would not change. Otherwise updates the selection and fires a selection
* change event with {@code isUserOriginated == true}.
*
* @param key
* the key of the item to select or {@code null} to clear
* selection
*/
protected void setSelectedFromClient(String key) {
if (!isUserSelectionAllowed()) {
throw new IllegalStateException("Client tried to update selection"
+ " although user selection is disallowed");
}
T item = getData(key);
if (isSelected(item)) {
return;
}
T oldSelection = selectedItem;
doSetSelected(item);
fireEvent(new SingleSelectionEvent<>(getGrid(), asSingleSelect(),
oldSelection, true));
} | 3.68 |
morf_TableOutputter_tableHasUnsupportedColumns | /**
* Indicates if the table has a column with a column type which we can't
* output to a spreadsheet.
*
* @param table The table metadata.
* @return
*/
boolean tableHasUnsupportedColumns(Table table) {
return Iterables.any(table.columns(), new Predicate<Column>() {
@Override
public boolean apply(Column column) {
return !supportedDataTypes.contains(column.getType());
}
});
} | 3.68 |
flink_RocksDBPriorityQueueConfig_getPriorityQueueStateType | /**
* Gets the type of the priority queue state. It will fall back to the default value if it is
* not explicitly set.
*/
public PriorityQueueStateType getPriorityQueueStateType() {
return priorityQueueStateType == null
? TIMER_SERVICE_FACTORY.defaultValue()
: priorityQueueStateType;
} | 3.68 |
framework_AbstractInMemoryContainer_setAllItemIds | /**
* Internal helper method to set the internal list of all item identifiers.
* Should not be used outside this class except for implementing clone(),
* may disappear from future versions.
*
* @param allItemIds
*/
@Deprecated
protected void setAllItemIds(List<ITEMIDTYPE> allItemIds) {
this.allItemIds = allItemIds;
} | 3.68 |
dubbo_ZookeeperRegistry_fetchLatestAddresses | /**
* When zookeeper connection recovered from a connection loss, it needs to fetch the latest provider list.
* re-register watcher is only a side effect and is not mandate.
*/
private void fetchLatestAddresses() {
// subscribe
Map<URL, Set<NotifyListener>> recoverSubscribed = new HashMap<>(getSubscribed());
if (!recoverSubscribed.isEmpty()) {
if (logger.isInfoEnabled()) {
logger.info("Fetching the latest urls of " + recoverSubscribed.keySet());
}
for (Map.Entry<URL, Set<NotifyListener>> entry : recoverSubscribed.entrySet()) {
URL url = entry.getKey();
for (NotifyListener listener : entry.getValue()) {
removeFailedSubscribed(url, listener);
addFailedSubscribed(url, listener);
}
}
}
} | 3.68 |
hadoop_NameCache_size | /**
* Size of the cache
* @return Number of names stored in the cache
*/
int size() {
return cache.size();
} | 3.68 |
flink_TopologyGraph_calculateMaximumDistance | /**
* Calculate the maximum distance of the currently added nodes from the nodes without inputs.
* The smallest distance is 0 (which are exactly the nodes without inputs) and the distances of
* other nodes are the largest distances in their inputs plus 1.
*
* <p>Distance of a node is defined as the number of edges one needs to go through from the
* nodes without inputs to this node.
*/
Map<ExecNode<?>, Integer> calculateMaximumDistance() {
Map<ExecNode<?>, Integer> result = new HashMap<>();
Map<TopologyNode, Integer> inputsVisitedMap = new HashMap<>();
Queue<TopologyNode> queue = new LinkedList<>();
for (TopologyNode node : nodes.values()) {
if (node.inputs.size() == 0) {
queue.offer(node);
}
}
while (!queue.isEmpty()) {
TopologyNode node = queue.poll();
int dist = -1;
for (TopologyNode input : node.inputs) {
dist =
Math.max(
dist,
Preconditions.checkNotNull(
result.get(input.execNode),
"The distance of an input node is not calculated. This is a bug."));
}
dist++;
result.put(node.execNode, dist);
for (TopologyNode output : node.outputs) {
int inputsVisited =
inputsVisitedMap.compute(output, (k, v) -> v == null ? 1 : v + 1);
if (inputsVisited == output.inputs.size()) {
queue.offer(output);
}
}
}
return result;
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_create | /**
* Create an FSDataOutputStream at the specified path.
*
* @return return Generics Type B.
*/
public B create() {
flags.add(CreateFlag.CREATE);
return getThisBuilder();
} | 3.68 |
hbase_CompositeImmutableSegment_getHeapSize | /** Returns The heap size of this segment. */
@Override
public long getHeapSize() {
long result = 0;
for (ImmutableSegment s : segments) {
result += s.getHeapSize();
}
return result;
} | 3.68 |
flink_BinaryRowData_anyNull | /** The bit is 1 when the field is null. Default is 0. */
@Override
public boolean anyNull() {
// Skip the header.
if ((segments[0].getLong(0) & FIRST_BYTE_ZERO) != 0) {
return true;
}
for (int i = 8; i < nullBitsSizeInBytes; i += 8) {
if (segments[0].getLong(i) != 0) {
return true;
}
}
return false;
} | 3.68 |
pulsar_ClientCredentialsFlow_fromParameters | /**
* Constructs a {@link ClientCredentialsFlow} from configuration parameters.
* @param params
* @return
*/
public static ClientCredentialsFlow fromParameters(Map<String, String> params) {
URL issuerUrl = parseParameterUrl(params, CONFIG_PARAM_ISSUER_URL);
String privateKeyUrl = parseParameterString(params, CONFIG_PARAM_KEY_FILE);
// These are optional parameters, so we only perform a get
String scope = params.get(CONFIG_PARAM_SCOPE);
String audience = params.get(CONFIG_PARAM_AUDIENCE);
return ClientCredentialsFlow.builder()
.issuerUrl(issuerUrl)
.audience(audience)
.privateKey(privateKeyUrl)
.scope(scope)
.build();
} | 3.68 |
framework_DateField_getTimeZone | /**
* Gets the time zone used by this field. The time zone is used to convert
* the absolute time in a Date object to a logical time displayed in the
* selector and to convert the select time back to a Date object.
*
* If {@code null} is returned, the current default time zone returned by
* {@code TimeZone.getDefault()} is used.
*
* @return the current time zone
*/
public TimeZone getTimeZone() {
return timeZone;
} | 3.68 |
flink_PartitionCommitPolicyFactory_createPolicyChain | /** Create a policy chain. */
public List<PartitionCommitPolicy> createPolicyChain(
ClassLoader cl, Supplier<FileSystem> fsSupplier) {
if (policyKind == null) {
return Collections.emptyList();
}
String[] policyStrings = policyKind.split(",");
return Arrays.stream(policyStrings)
.map(
name -> {
switch (name.toLowerCase()) {
case PartitionCommitPolicy.METASTORE:
return new MetastoreCommitPolicy();
case PartitionCommitPolicy.SUCCESS_FILE:
return new SuccessFileCommitPolicy(
successFileName, fsSupplier.get());
case PartitionCommitPolicy.CUSTOM:
try {
if (parameters != null && !parameters.isEmpty()) {
String[] paramStrings =
parameters.toArray(new String[0]);
Class<?>[] classes = new Class<?>[parameters.size()];
for (int i = 0; i < parameters.size(); i++) {
classes[i] = String.class;
}
return (PartitionCommitPolicy)
cl.loadClass(customClass)
.getConstructor(classes)
.newInstance((Object[]) paramStrings);
} else {
return (PartitionCommitPolicy)
cl.loadClass(customClass).newInstance();
}
} catch (ClassNotFoundException
| IllegalAccessException
| InstantiationException
| NoSuchMethodException
| InvocationTargetException e) {
throw new RuntimeException(
"Can not create new instance for custom class from "
+ customClass,
e);
}
default:
throw new UnsupportedOperationException(
"Unsupported policy: " + name);
}
})
.collect(Collectors.toList());
} | 3.68 |
morf_OracleDialect_dropSequence | /**
* Returns a SQL statement to safely drop a sequence, if it exists.
*
* @param table Table for which the sequence should be dropped.
* @return SQL string.
*/
private String dropSequence(Table table) {
String sequenceName = sequenceName(table.getName());
return new StringBuilder("DECLARE \n")
.append(" query CHAR(255); \n")
.append("BEGIN \n")
.append(" select queryField into query from SYS.DUAL D left outer join (\n")
.append(" select concat('drop sequence ").append(schemaNamePrefix()).append("', sequence_name) as queryField \n")
.append(" from ALL_SEQUENCES S \n")
.append(" where S.sequence_owner='").append(getSchemaName().toUpperCase()).append("' AND S.sequence_name = '").append(sequenceName.toUpperCase()).append("' \n")
.append(" ) on 1 = 1; \n")
.append(" IF query is not null THEN \n")
.append(" execute immediate query; \n")
.append(" END IF; \n")
.append("END;")
.toString();
} | 3.68 |
flink_InternalSerializers_create | /**
* Creates a {@link TypeSerializer} for internal data structures of the given {@link RowType}.
*/
public static <T> RowDataSerializer create(RowType type) {
return (RowDataSerializer) createInternal(type);
} | 3.68 |
framework_Table_getCurrentPageFirstItemIndex | /**
* Getter for property currentPageFirstItem.
*
* @return the Value of property currentPageFirstItem.
*/
public int getCurrentPageFirstItemIndex() {
return currentPageFirstItemIndex;
} | 3.68 |
hbase_ReplicationSource_getTotalReplicatedEdits | // Visible for testing purpose
public long getTotalReplicatedEdits() {
return totalReplicatedEdits.get();
} | 3.68 |
AreaShop_AreaShop_error | /**
* Print an error to the console.
* @param message The message to print
*/
public static void error(Object... message) {
AreaShop.getInstance().getLogger().severe(StringUtils.join(message, " "));
} | 3.68 |
hadoop_JsonSerialization_getName | /**
* Get the simple name of the class type to be marshalled.
* @return the name of the class being marshalled
*/
public String getName() {
return classType.getSimpleName();
} | 3.68 |
hbase_RegionCoprocessorHost_postOpen | /**
* Invoked after a region open
*/
public void postOpen() {
if (coprocEnvironments.isEmpty()) {
return;
}
try {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postOpen(this);
}
});
} catch (IOException e) {
LOG.warn(e.toString(), e);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.