name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_TableEnvironmentInternal_explainInternal | /**
* Returns the AST of this table and the execution plan to compute the result of this table.
*
* @param operations The operations to be explained.
* @param extraDetails The extra explain details which the explain result should include, e.g.
* estimated cost, changelog mode for streaming
* @return AST and the execution plan.
*/
default String explainInternal(List<Operation> operations, ExplainDetail... extraDetails) {
return explainInternal(operations, ExplainFormat.TEXT, extraDetails);
} | 3.68 |
querydsl_SurfaceExpression_pointOnSurface | /**
* A Point guaranteed to be on this Surface.
*
* @return point on surface
*/
public PointExpression<Point> pointOnSurface() {
if (pointOnSurface == null) {
pointOnSurface = GeometryExpressions.pointOperation(SpatialOps.POINT_ON_SURFACE, mixin);
}
return pointOnSurface;
} | 3.68 |
flink_RocksDBNativeMetricOptions_fromConfig | /** Creates a {@link RocksDBNativeMetricOptions} based on an external configuration. */
public static RocksDBNativeMetricOptions fromConfig(ReadableConfig config) {
RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
configurePropertyMetrics(options, config);
configureStatisticsMetrics(options, config);
return options;
} | 3.68 |
hbase_HBaseServerBase_getStartcode | /** Returns time stamp in millis of when this server was started */
public long getStartcode() {
return this.startcode;
} | 3.68 |
hadoop_MRProtoUtils_convertToProtoFormat | /*
* TaskType
*/
public static TaskTypeProto convertToProtoFormat(TaskType e) {
return TaskTypeProto.valueOf(e.name());
} | 3.68 |
hadoop_InMemoryConfigurationStore_getLogs | /**
* Configuration mutations not logged (i.e. not persisted) but directly
* confirmed. As such, a list of persisted configuration mutations does not
* exist.
* @return null Configuration mutation list not applicable for this store.
*/
@Override
protected LinkedList<LogMutation> getLogs() {
// Unimplemented.
return null;
} | 3.68 |
hbase_CachedClusterId_getClusterId | /**
* Returns a cached copy of the cluster ID. null if the cache is not populated.
*/
private String getClusterId() {
if (!isClusterIdSet.get()) {
return null;
}
// It is ok to read without a lock since clusterId is immutable once set.
return clusterId.toString();
} | 3.68 |
rocketmq-connect_BufferedRecords_add | /**
* add record
*
* @param record
* @return
* @throws SQLException
*/
public List<ConnectRecord> add(ConnectRecord record) throws SQLException {
recordValidator.validate(record);
final List<ConnectRecord> flushed = new ArrayList<>();
boolean schemaChanged = false;
if (!Objects.equals(keySchema, record.getKeySchema())) {
keySchema = record.getKeySchema();
schemaChanged = true;
}
if (isNull(record.getSchema())) {
// For deletes, value and optionally value schema come in as null.
// We don't want to treat this as a schema change if key schemas is the same
// otherwise we flush unnecessarily.
if (config.isDeleteEnabled()) {
deletesInBatch = true;
}
} else if (Objects.equals(schema, record.getSchema())) {
if (config.isDeleteEnabled() && deletesInBatch) {
// flush so an insert after a delete of same record isn't lost
flushed.addAll(flush());
}
} else {
// value schema is not null and has changed. This is a real schema change.
schema = record.getSchema();
schemaChanged = true;
}
if (schemaChanged) {
// Each batch needs to have the same schemas, so get the buffered records out
flushed.addAll(flush());
// re-initialize everything that depends on the record schema
final SchemaPair schemaPair = new SchemaPair(
record.getKeySchema(),
record.getSchema(),
record.getExtensions()
);
// extract field
fieldsMetadata = FieldsMetadata.extract(
tableId.tableName(),
config.pkMode,
config.getPkFields(),
config.getFieldsWhitelist(),
schemaPair
);
}
// set deletesInBatch if schema value is not null
if (isNull(record.getData()) && config.isDeleteEnabled()) {
deletesInBatch = true;
}
records.add(record);
if (records.size() >= config.getBatchSize()) {
flushed.addAll(flush());
}
return flushed;
} | 3.68 |
flink_RestClientConfiguration_getIdlenessTimeout | /** {@see RestOptions#IDLENESS_TIMEOUT}. */
public long getIdlenessTimeout() {
return idlenessTimeout;
} | 3.68 |
flink_FileIOChannel_getPath | /** Returns the path to the underlying temporary file. */
public String getPath() {
return path.getAbsolutePath();
} | 3.68 |
rocketmq-connect_ExpressionBuilder_append | /**
* Append to this builder's expression the specified object surrounded by quotes. If the object
* is {@link Expressable}, then this builder delegates to the object's
* {@link Expressable#appendTo(ExpressionBuilder, boolean)} method. Otherwise, the string
* representation of the object is appended to the expression.
*
* @param obj the object to be appended
* @param transform the transform that should be used on the supplied object to obtain the
* representation that is appended to the expression; may be null
* @param <T> the type of object to transform before appending.
* @return this builder to enable methods to be chained; never null
*/
public <T> ExpressionBuilder append(
T obj,
Transform<T> transform
) {
if (transform != null) {
transform.apply(this, obj);
} else {
append(obj);
}
return this;
} | 3.68 |
flink_RequestJobsOverview_readResolve | /** Preserve the singleton property by returning the singleton instance */
private Object readResolve() {
return INSTANCE;
} | 3.68 |
hbase_BaseSourceImpl_incGauge | /**
* Add some amount to a gauge.
* @param gaugeName The name of the gauge to increment.
* @param delta The amount to increment the gauge by.
*/
@Override
public void incGauge(String gaugeName, long delta) {
MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L);
gaugeInt.incr(delta);
} | 3.68 |
querydsl_TypeCategory_isSubCategoryOf | /**
* transitive and reflexive subCategoryOf check
*
* @param ancestor
* @return
*/
public boolean isSubCategoryOf(TypeCategory ancestor) {
if (this == ancestor) {
return true;
} else if (superType == null) {
return false;
} else {
return superType == ancestor || superType.isSubCategoryOf(ancestor);
}
} | 3.68 |
pulsar_TokenClient_buildClientCredentialsBody | /**
* Constructing http request parameters.
* @param req object with relevant request parameters
* @return Generate the final request body from a map.
*/
String buildClientCredentialsBody(ClientCredentialsExchangeRequest req) {
Map<String, String> bodyMap = new TreeMap<>();
bodyMap.put("grant_type", "client_credentials");
bodyMap.put("client_id", req.getClientId());
bodyMap.put("client_secret", req.getClientSecret());
// Only set audience and scope if they are non-empty.
if (!StringUtils.isBlank(req.getAudience())) {
bodyMap.put("audience", req.getAudience());
}
if (!StringUtils.isBlank(req.getScope())) {
bodyMap.put("scope", req.getScope());
}
return bodyMap.entrySet().stream()
.map(e -> {
try {
return URLEncoder.encode(e.getKey(), "UTF-8") + '=' + URLEncoder.encode(e.getValue(), "UTF-8");
} catch (UnsupportedEncodingException e1) {
throw new RuntimeException(e1);
}
})
.collect(Collectors.joining("&"));
} | 3.68 |
hadoop_RouterClientMetrics_incInvokedConcurrent | /**
* Increase the concurrent metrics based on the method being invoked.
* @param method concurrently invoked method
*/
public void incInvokedConcurrent(Method method){
switch (method.getName()) {
case "setReplication":
concurrentSetReplicationOps.incr();
break;
case "setPermission":
concurrentSetPermissionOps.incr();
break;
case "setOwner":
concurrentSetOwnerOps.incr();
break;
case "rename":
concurrentRenameOps.incr();
break;
case "rename2":
concurrentRename2Ops.incr();
break;
case "delete":
concurrentDeleteOps.incr();
break;
case "mkdirs":
concurrentMkdirsOps.incr();
break;
case "renewLease":
concurrentRenewLeaseOps.incr();
break;
case "getListing":
concurrentGetListingOps.incr();
break;
case "getFileInfo":
concurrentGetFileInfoOps.incr();
break;
case "getStats":
concurrentGetStatsOps.incr();
break;
case "getDatanodeReport":
concurrentGetDatanodeReportOps.incr();
break;
case "setSafeMode":
concurrentSetSafeModeOps.incr();
break;
case "restoreFailedStorage":
concurrentRestoreFailedStorageOps.incr();
break;
case "saveNamespace":
concurrentSaveNamespaceOps.incr();
break;
case "rollEdits":
concurrentRollEditsOps.incr();
break;
case "refreshNodes":
concurrentRefreshNodesOps.incr();
break;
case "finalizeUpgrade":
concurrentFinalizeUpgradeOps.incr();
break;
case "rollingUpgrade":
concurrentRollingUpgradeOps.incr();
break;
case "metaSave":
concurrentMetaSaveOps.incr();
break;
case "listCorruptFileBlocks":
concurrentListCorruptFileBlocksOps.incr();
break;
case "setBalancerBandwidth":
concurrentSetBalancerBandwidthOps.incr();
break;
case "getContentSummary":
concurrentGetContentSummaryOps.incr();
break;
case "modifyAclEntries":
concurrentModifyAclEntriesOps.incr();
break;
case "removeAclEntries":
concurrentRemoveAclEntriesOps.incr();
break;
case "removeDefaultAcl":
concurrentRemoveDefaultAclOps.incr();
break;
case "removeAcl":
concurrentRemoveAclOps.incr();
break;
case "setAcl":
concurrentSetAclOps.incr();
break;
case "setXAttr":
concurrentSetXAttrOps.incr();
break;
case "removeXAttr":
concurrentRemoveXAttrOps.incr();
break;
case "getCurrentEditLogTxid":
concurrentGetCurrentEditLogTxidOps.incr();
break;
case "getReplicatedBlockStats":
concurrentGetReplicatedBlockStatsOps.incr();
break;
case "setQuota":
concurrentSetQuotaOps.incr();
break;
case "getQuotaUsage":
concurrentGetQuotaUsageOps.incr();
break;
case "getSlowDatanodeReport":
concurrentGetSlowDatanodeReportOps.incr();
break;
default :
concurrentOtherOps.incr();
}
} | 3.68 |
hudi_HiveSchemaUtil_createHiveMap | /**
* Create a 'Map' schema from Parquet map field.
*/
private static String createHiveMap(String keyType, String valueType, boolean doFormat) {
return (doFormat ? "MAP< " : "MAP<") + keyType + (doFormat ? ", " : ",") + valueType + ">";
} | 3.68 |
hbase_ServerManager_unregisterListener | /**
* Remove the listener from the notification list.
* @param listener The ServerListener to unregister
*/
public boolean unregisterListener(final ServerListener listener) {
return this.listeners.remove(listener);
} | 3.68 |
querydsl_SimpleExpression_neAny | /**
* Create a {@code this != any right} expression
*
* @param right
* @return this != any right
*/
public BooleanExpression neAny(CollectionExpression<?, ? super T> right) {
return ne(ExpressionUtils.any(right));
} | 3.68 |
hbase_BucketAllocator_totalBytes | /**
* Combined {@link #totalCount()} * {@link #itemSize()}
*/
public long totalBytes() {
return totalCount * itemSize;
} | 3.68 |
hadoop_PeriodicRLESparseResourceAllocation_getTimePeriod | /**
* Get time period of PeriodicRLESparseResourceAllocation.
*
* @return timePeriod time period represented in ms.
*/
public long getTimePeriod() {
return this.timePeriod;
} | 3.68 |
pulsar_TxnBatchedPositionImpl_equals | /**
* It's exactly the same as {@link PositionImpl},make sure that when {@link TxnBatchedPositionImpl} used as the key
* of map same as {@link PositionImpl}. {@link #batchSize} and {@link #batchIndex} should not be involved in
* calculate, just like {@link PositionImpl#ackSet} is not involved in calculate.
* Note: In {@link java.util.concurrent.ConcurrentSkipListMap}, it use the {@link Comparable#compareTo(Object)} to
* determine whether the keys are the same. In {@link java.util.HashMap}, it use the
* {@link Object#hashCode()} & {@link Object#equals(Object)} to determine whether the keys are the same.
*/
@Override
public boolean equals(Object o) {
return super.equals(o);
} | 3.68 |
hadoop_TwoColumnLayout_footer | /**
* @return the class that will render the footer.
*/
protected Class<? extends SubView> footer() {
return FooterBlock.class;
} | 3.68 |
framework_PureGWTTestApplication_createMenuPath | /**
* Create a menu path, if one doesn't already exist, and return the last
* menu in the series.
*
* @param path
* a varargs list or array of strings describing a menu path,
* e.g. "File", "Recent", "User Files", which would result in the
* File menu having a submenu called "Recent" which would have a
* submenu called "User Files".
* @return the last Menu object specified by the path
*/
private Menu createMenuPath(String... path) {
Menu m = menu;
for (String p : path) {
Menu sub = m.getChildMenu(p);
if (sub == null) {
sub = new Menu(p);
m.addChildMenu(sub);
}
m = sub;
}
return m;
} | 3.68 |
querydsl_AntMetaDataExporter_addTypeMapping | /**
* Adds TypeMapping instance, called by Ant
*/
public void addTypeMapping(TypeMapping mapping) {
typeMappings.add(mapping);
} | 3.68 |
hbase_BoundedRecoveredHFilesOutputSink_createRecoveredHFileWriter | /**
* @return Returns a base HFile without compressions or encodings; good enough for recovery given
* hfile has metadata on how it was written.
*/
private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName,
long seqId, String familyName, boolean isMetaTable) throws IOException {
Path outputDir = WALSplitUtil.tryCreateRecoveredHFilesDir(walSplitter.rootFS, walSplitter.conf,
tableName, regionName, familyName);
StoreFileWriter.Builder writerBuilder =
new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS)
.withOutputDir(outputDir);
HFileContext hFileContext =
new HFileContextBuilder().withChecksumType(StoreUtils.getChecksumType(walSplitter.conf))
.withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)).withCellComparator(
isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)
.build();
return writerBuilder.withFileContext(hFileContext).build();
} | 3.68 |
hudi_AvroSchemaConverter_recordTypesOfSameNumFields | /**
* Returns true if all the types are RECORD type with same number of fields.
*/
private static boolean recordTypesOfSameNumFields(List<Schema> types) {
if (types == null || types.size() == 0) {
return false;
}
if (types.stream().anyMatch(s -> s.getType() != Schema.Type.RECORD)) {
return false;
}
int numFields = types.get(0).getFields().size();
return types.stream().allMatch(s -> s.getFields().size() == numFields);
} | 3.68 |
pulsar_AutoConsumeSchema_unwrapInternalSchema | /**
* Get a specific schema version, fetching from the Registry if it is not loaded yet.
* This method is not intended to be used by applications.
* @param schemaVersion the version
* @return the Schema at the specific version
* @see #atSchemaVersion(byte[])
*/
public Schema<?> unwrapInternalSchema(byte[] schemaVersion) {
fetchSchemaIfNeeded(BytesSchemaVersion.of(schemaVersion));
return getInternalSchema(schemaVersion);
} | 3.68 |
shardingsphere-elasticjob_IpUtils_getIp | /**
* Get IP address for localhost.
*
* @return IP address for localhost
*/
public static String getIp() {
if (null != cachedIpAddress) {
return cachedIpAddress;
}
NetworkInterface networkInterface = findNetworkInterface();
if (null != networkInterface) {
Enumeration<InetAddress> ipAddresses = networkInterface.getInetAddresses();
while (ipAddresses.hasMoreElements()) {
InetAddress ipAddress = ipAddresses.nextElement();
if (isValidAddress(ipAddress) && isPreferredAddress(ipAddress)) {
cachedIpAddress = ipAddress.getHostAddress();
return cachedIpAddress;
}
}
}
throw new HostException("ip is null");
} | 3.68 |
morf_SqlServerMetaDataProvider_setAdditionalColumnMetadata | /**
* SQL Server does not return information on auto-increment columns in any way which JDBC can pick up,
* so we need a customised method for fetching this information.
*
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#setAdditionalColumnMetadata(RealName, ColumnBuilder, ResultSet)
*/
@Override
protected ColumnBuilder setAdditionalColumnMetadata(RealName tableName, ColumnBuilder columnBuilder, ResultSet columnMetaData)
throws SQLException {
if (identityColumns.containsKey(tableName.getDbName())) {
Map<String, Integer> tableAutoNumStarts = identityColumns.get(tableName.getDbName());
if (tableAutoNumStarts.containsKey(columnBuilder.getName())) {
return columnBuilder.autoNumbered(tableAutoNumStarts.get(columnBuilder.getName()));
}
}
return columnBuilder;
} | 3.68 |
hadoop_OBSFileSystem_getScheme | /**
* Return the protocol scheme for the FileSystem.
*
* @return "obs"
*/
@Override
public String getScheme() {
return "obs";
} | 3.68 |
hbase_AvlUtil_appendList | /**
* Append a list of nodes to the tree
* @param head the head of the current linked list
* @param otherHead the head of the list to append to the current list
* @return the new head of the current list
*/
public static <TNode extends AvlLinkedNode> TNode appendList(TNode head, TNode otherHead) {
if (head == null) return otherHead;
if (otherHead == null) return head;
TNode tail = (TNode) head.iterPrev;
TNode otherTail = (TNode) otherHead.iterPrev;
tail.iterNext = otherHead;
otherHead.iterPrev = tail;
otherTail.iterNext = head;
head.iterPrev = otherTail;
return head;
} | 3.68 |
Activiti_Builder_main | /**
* Dump out abstract syntax tree for a given expression
*
* @param args array with one element, containing the expression string
*/
public static void main(String[] args) {
if (args.length != 1) {
System.err.println(
"usage: java " +
Builder.class.getName() +
" <expression string>"
);
System.exit(1);
}
PrintWriter out = new PrintWriter(System.out);
Tree tree = null;
try {
tree = new Builder(Feature.METHOD_INVOCATIONS).build(args[0]);
} catch (TreeBuilderException e) {
System.out.println(e.getMessage());
System.exit(0);
}
NodePrinter.dump(out, tree.getRoot());
if (
!tree.getFunctionNodes().iterator().hasNext() &&
!tree.getIdentifierNodes().iterator().hasNext()
) {
ELContext context = new ELContext() {
@Override
public VariableMapper getVariableMapper() {
return null;
}
@Override
public FunctionMapper getFunctionMapper() {
return null;
}
@Override
public ELResolver getELResolver() {
return null;
}
};
out.print(">> ");
try {
out.println(
tree
.getRoot()
.getValue(new Bindings(null, null), context, null)
);
} catch (ELException e) {
out.println(e.getMessage());
}
}
out.flush();
} | 3.68 |
morf_Function_leftTrim | /**
* Helper method to create an instance of the "leftTrim" SQL function,
* which will result in argument having leading spaces removed.
*
* @param expression the field to evaluate.
* @return an instance of the leftTrim function.
*/
public static Function leftTrim(AliasedField expression) {
return new Function(FunctionType.LEFT_TRIM, expression);
} | 3.68 |
flink_CliFrontend_handleParametrizationException | /**
* Displays an optional exception message for incorrect program parametrization.
*
* @param e The exception to display.
* @return The return code for the process.
*/
private static int handleParametrizationException(ProgramParametrizationException e) {
LOG.error("Program has not been parametrized properly.", e);
System.err.println(e.getMessage());
return 1;
} | 3.68 |
hudi_MarkerUtils_getAllMarkerDir | /**
* Gets all marker directories.
*
* @param tempPath Temporary folder under .hoodie.
* @param fs File system to use.
* @return All marker directories.
* @throws IOException upon error.
*/
public static List<Path> getAllMarkerDir(Path tempPath, FileSystem fs) throws IOException {
return Arrays.stream(fs.listStatus(tempPath)).map(FileStatus::getPath).collect(Collectors.toList());
} | 3.68 |
hadoop_InterruptEscalator_run | /**
* Shutdown callback: stop the service and set an atomic boolean
* if it stopped within the shutdown time.
*/
@Override
public void run() {
if (service != null) {
service.stop();
serviceWasShutdown.set(
service.waitForServiceToStop(shutdownTimeMillis));
} else {
serviceWasShutdown.set(true);
}
} | 3.68 |
framework_Notification_getDescription | /**
* Gets the description part of the notification message.
*
* @return The message description
*/
public String getDescription() {
return getState(false).description;
} | 3.68 |
flink_LookupCacheManager_getInstance | /** Get the shared instance of {@link LookupCacheManager}. */
public static synchronized LookupCacheManager getInstance() {
if (instance == null) {
instance = new LookupCacheManager();
}
return instance;
} | 3.68 |
hbase_CellChunkImmutableSegment_useIndexChunks | // Assuming we are going to use regular data chunks as index chunks,
// we check here how much free space will remain in the last allocated chunk
// (the least occupied one).
// If the percentage of its remaining free space is above the INDEX_CHUNK_UNUSED_SPACE
// threshold, then we will use index chunks (which are smaller) instead.
private ChunkCreator.ChunkType useIndexChunks(int numOfCells) {
int dataChunkSize = ChunkCreator.getInstance().getChunkSize();
int numOfCellsInChunk = calcNumOfCellsInChunk(dataChunkSize);
int cellsInLastChunk = numOfCells % numOfCellsInChunk;
if (cellsInLastChunk == 0) { // There is no free space in the last chunk and thus,
return ChunkCreator.ChunkType.DATA_CHUNK; // no need to use index chunks.
} else {
int chunkSpace = dataChunkSize - ChunkCreator.SIZEOF_CHUNK_HEADER;
int freeSpaceInLastChunk = chunkSpace - cellsInLastChunk * ClassSize.CELL_CHUNK_MAP_ENTRY;
if (freeSpaceInLastChunk > INDEX_CHUNK_UNUSED_SPACE_PRECENTAGE * chunkSpace) {
return ChunkCreator.ChunkType.INDEX_CHUNK;
}
return ChunkCreator.ChunkType.DATA_CHUNK;
}
} | 3.68 |
hadoop_MapTaskAttemptInfo_getMapRuntime | /**
* Get the runtime for the <b>map</b> phase of the map-task attempt.
*
* @return the runtime for the <b>map</b> phase of the map-task attempt
*/
public long getMapRuntime() {
return runtime;
} | 3.68 |
framework_LayoutManager_getMarginWidth | /**
* Gets the combined left & right margin of the given element, provided that
* they have been measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured margin for
* @return the measured left+right margin of the element in pixels.
*/
public int getMarginWidth(Element element) {
return getMarginLeft(element) + getMarginRight(element);
} | 3.68 |
flink_SecurityOptions_isRestSSLAuthenticationEnabled | /** Checks whether mutual SSL authentication for the external REST endpoint is enabled. */
public static boolean isRestSSLAuthenticationEnabled(Configuration sslConfig) {
checkNotNull(sslConfig, "sslConfig");
return isRestSSLEnabled(sslConfig) && sslConfig.getBoolean(SSL_REST_AUTHENTICATION_ENABLED);
} | 3.68 |
flink_AbstractFileSource_processStaticFileSet | /**
* Sets this source to bounded (batch) mode.
*
* <p>In this mode, the source processes the files that are under the given paths when the
* application is started. Once all files are processed, the source will finish.
*
* <p>This setting is also the default behavior. This method is mainly here to "switch back"
* to bounded (batch) mode, or to make it explicit in the source construction.
*/
public SELF processStaticFileSet() {
this.continuousSourceSettings = null;
return self();
} | 3.68 |
streampipes_SimpleEstimator_isLowQuality | /**
* Given the statistics of the document before and after applying the {@link BoilerpipeExtractor},
* can we regard the extraction quality (too) low?
*
* Works well with {@link DefaultExtractor}, {@link ArticleExtractor} and others.
*
* @param dsBefore
* @param dsAfter
* @return true if low quality is to be expected.
*/
public boolean isLowQuality(final TextDocumentStatistics dsBefore,
final TextDocumentStatistics dsAfter) {
if (dsBefore.getNumWords() < 90 || dsAfter.getNumWords() < 70) {
return true;
}
if (dsAfter.avgNumWords() < 25) {
return true;
}
return false;
} | 3.68 |
querydsl_DateTimeExpression_second | /**
* Create a seconds expression (range 0-59)
*
* @return second
*/
public NumberExpression<Integer> second() {
if (seconds == null) {
seconds = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.SECOND, mixin);
}
return seconds;
} | 3.68 |
flink_MapValue_keySet | /*
* (non-Javadoc)
* @see java.util.Map#keySet()
*/
@Override
public Set<K> keySet() {
return this.map.keySet();
} | 3.68 |
framework_VAbstractDropHandler_updateAcceptRules | /**
* Implementor/user of {@link VAbstractDropHandler} must pass the UIDL
* painted by {@link AcceptCriterion} to this method. Practically the
* details about {@link AcceptCriterion} are saved.
*
* @param uidl
* the accept criterion UIDL
*/
public void updateAcceptRules(UIDL uidl) {
criterioUIDL = uidl;
/*
* supports updating the accept rule root directly or so that it is
* contained in given uidl node
*/
if (!uidl.getTag().equals("-ac")) {
Iterator<Object> childIterator = uidl.iterator();
while (!uidl.getTag().equals("-ac") && childIterator.hasNext()) {
uidl = (UIDL) childIterator.next();
}
}
acceptCriteria = VAcceptCriteria.get(uidl.getStringAttribute("name"));
if (acceptCriteria == null) {
throw new IllegalArgumentException(
"No accept criteria found with given name "
+ uidl.getStringAttribute("name"));
}
} | 3.68 |
framework_Criterion_getValue | /**
* Gets the value of the payload to be compared.
*
* @return value of the payload to be compared
*/
public String getValue() {
return value;
} | 3.68 |
flink_MurmurHashUtil_hashBytesByWords | /**
* Hash bytes in MemorySegment, length must be aligned to 4 bytes.
*
* @param segment segment.
* @param offset offset for MemorySegment
* @param lengthInBytes length in MemorySegment
* @return hash code
*/
public static int hashBytesByWords(MemorySegment segment, int offset, int lengthInBytes) {
return hashBytesByWords(segment, offset, lengthInBytes, DEFAULT_SEED);
} | 3.68 |
dubbo_ConfigUtils_getSystemProperty | /**
* System environment -> System properties
*
* @param key key
* @return value
*/
public static String getSystemProperty(String key) {
String value = System.getenv(key);
if (StringUtils.isEmpty(value)) {
value = System.getProperty(key);
}
return value;
} | 3.68 |
querydsl_HibernateInsertClause_setLockMode | /**
* Set the lock mode for the given path.
* @return the current object
*/
@SuppressWarnings("unchecked")
public HibernateInsertClause setLockMode(Path<?> path, LockMode lockMode) {
lockModes.put(path, lockMode);
return this;
} | 3.68 |
flink_HiveTableUtil_extractRowType | /** Create the Hive table's row type. */
public static DataType extractRowType(
HiveConf hiveConf,
Table hiveTable,
HiveMetastoreClientWrapper client,
HiveShim hiveShim) {
Tuple4<List<FieldSchema>, List<FieldSchema>, Set<String>, Optional<UniqueConstraint>>
hiveTableInfo = extractHiveTableInfo(hiveConf, hiveTable, client, hiveShim);
Tuple2<String[], DataType[]> types =
extractColumnInformation(
Stream.of(hiveTableInfo.f0, hiveTableInfo.f1)
.flatMap(Collection::stream)
.collect(Collectors.toList()),
hiveTableInfo.f2);
return DataTypes.ROW(
IntStream.range(0, types.f0.length)
.mapToObj(i -> DataTypes.FIELD(types.f0[i], types.f1[i]))
.collect(Collectors.toList()));
} | 3.68 |
querydsl_OrderSpecifier_isAscending | /**
* Get whether the order is ascending or not
*
* @return ascending order
*/
public boolean isAscending() {
return order == Order.ASC;
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_stop | /**
* Gracefully shutdown the thread pool. An ongoing HRegion.flush() should not be interrupted
* (see HBASE-13877)
*/
void stop() {
if (this.stopped) return;
this.stopped = true;
this.executor.shutdown();
} | 3.68 |
hbase_Constraints_getKeyValueForClass | /**
* Get the kv {@link Entry} in the descriptor builder for the specified class
* @param builder {@link TableDescriptorBuilder} to read
* @param clazz To search for
* @return The {@link Pair} of {@literal <key, value>} in the table, if that class is present.
* {@code null} otherwise.
*/
private static Pair<String, String> getKeyValueForClass(TableDescriptorBuilder builder,
Class<? extends Constraint> clazz) {
// get the serialized version of the constraint
String key = serializeConstraintClass(clazz);
String value = builder.getValue(key);
return value == null ? null : new Pair<>(key, value);
} | 3.68 |
rocketmq-connect_PatternFilter_filter | /**
* filter map
*
* @param record
* @param map
* @return
*/
R filter(R record, Map map) {
for (Object field : map.keySet()) {
if (!this.fields.contains(field)) {
continue;
}
Object value = map.get(field);
if (value instanceof String) {
String input = (String) value;
if (this.pattern.matcher(input).matches()) {
return null;
}
}
}
return record;
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_generateMissingFavoredNodeSingleRack | /*
* Generate FN for a single rack scenario, don't generate from one of the excluded nodes. Helps
* when we would like to find a replacement node.
*/
private ServerName generateMissingFavoredNodeSingleRack(List<ServerName> favoredNodes,
List<ServerName> excludeNodes) throws IOException {
ServerName newServer = null;
Set<ServerName> excludeFNSet = Sets.newHashSet(favoredNodes);
if (excludeNodes != null && excludeNodes.size() > 0) {
excludeFNSet.addAll(excludeNodes);
}
if (favoredNodes.size() < FAVORED_NODES_NUM) {
newServer = this.getOneRandomServer(this.uniqueRackList.get(0), excludeFNSet);
}
return newServer;
} | 3.68 |
hadoop_FileIoProvider_dirSync | /**
* Sync the given directory changes to durable device.
* @throws IOException
*/
public void dirSync(@Nullable FsVolumeSpi volume, File dir)
throws IOException {
final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
try {
faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
IOUtils.fsync(dir);
profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
} catch (Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hbase_HRegion_dropMemStoreContents | /**
* Be careful, this method will drop all data in the memstore of this region. Currently, this
* method is used to drop memstore to prevent memory leak when replaying recovered.edits while
* opening region.
*/
private MemStoreSize dropMemStoreContents() throws IOException {
MemStoreSizing totalFreedSize = new NonThreadSafeMemStoreSizing();
this.updatesLock.writeLock().lock();
try {
for (HStore s : stores.values()) {
MemStoreSize memStoreSize = doDropStoreMemStoreContentsForSeqId(s, HConstants.NO_SEQNUM);
LOG.info("Drop memstore for Store " + s.getColumnFamilyName() + " in region "
+ this.getRegionInfo().getRegionNameAsString() + " , dropped memstoresize: ["
+ memStoreSize + " }");
totalFreedSize.incMemStoreSize(memStoreSize);
}
return totalFreedSize.getMemStoreSize();
} finally {
this.updatesLock.writeLock().unlock();
} | 3.68 |
hbase_HBaseZKTestingUtility_cleanupTestDir | /** Returns True if we removed the test dirs */
@Override
public boolean cleanupTestDir() {
boolean ret = super.cleanupTestDir();
if (deleteDir(this.clusterTestDir)) {
this.clusterTestDir = null;
return ret;
}
return false;
} | 3.68 |
querydsl_SerializerBase_serializeConstant | /**
* Serialize the constant as parameter to the query. The default implementation writes the
* label name for the constants. Some dialects may replace this by indexed based or
* positional parameterization.
* Dialects may also use this to prefix the parameter with for example ":" or "?".
*
* @param parameterIndex index at which this constant occurs in {@link #getConstants()}
* @param constantLabel label under which this constant occurs in {@link #getConstantToLabel()}
*/
protected void serializeConstant(int parameterIndex, String constantLabel) {
append(constantLabel);
} | 3.68 |
flink_RequestedGlobalProperties_parameterizeChannel | /**
* Parametrizes the ship strategy fields of a channel such that the channel produces the desired
* global properties.
*
* @param channel The channel to parametrize.
* @param globalDopChange Flag indicating whether the parallelism changes between sender and
* receiver.
* @param exchangeMode The mode of data exchange (pipelined, always batch, batch only on
* shuffle, ...)
* @param breakPipeline Indicates whether this data exchange should break pipelines (unless
* pipelines are forced).
*/
public void parameterizeChannel(
Channel channel,
boolean globalDopChange,
ExecutionMode exchangeMode,
boolean breakPipeline) {
// safety check. Fully replicated input must be preserved.
if (channel.getSource().getGlobalProperties().isFullyReplicated()
&& !(this.partitioning == PartitioningProperty.FULL_REPLICATION
|| this.partitioning == PartitioningProperty.ANY_DISTRIBUTION)) {
throw new CompilerException(
"Fully replicated input must be preserved "
+ "and may not be converted into another global property.");
}
// if we request nothing, then we need no special strategy. forward, if the number of
// instances remains
// the same, randomly repartition otherwise
if (isTrivial() || this.partitioning == PartitioningProperty.ANY_DISTRIBUTION) {
ShipStrategyType shipStrategy =
globalDopChange ? ShipStrategyType.PARTITION_RANDOM : ShipStrategyType.FORWARD;
DataExchangeMode em =
DataExchangeMode.select(exchangeMode, shipStrategy, breakPipeline);
channel.setShipStrategy(shipStrategy, em);
return;
}
final GlobalProperties inGlobals = channel.getSource().getGlobalProperties();
// if we have no global parallelism change, check if we have already compatible global
// properties
if (!globalDopChange && isMetBy(inGlobals)) {
DataExchangeMode em =
DataExchangeMode.select(exchangeMode, ShipStrategyType.FORWARD, breakPipeline);
channel.setShipStrategy(ShipStrategyType.FORWARD, em);
return;
}
// if we fall through the conditions until here, we need to re-establish
ShipStrategyType shipType;
FieldList partitionKeys;
boolean[] sortDirection;
Partitioner<?> partitioner;
switch (this.partitioning) {
case FULL_REPLICATION:
shipType = ShipStrategyType.BROADCAST;
partitionKeys = null;
sortDirection = null;
partitioner = null;
break;
case ANY_PARTITIONING:
case HASH_PARTITIONED:
shipType = ShipStrategyType.PARTITION_HASH;
partitionKeys = Utils.createOrderedFromSet(this.partitioningFields);
sortDirection = null;
partitioner = null;
break;
case RANGE_PARTITIONED:
shipType = ShipStrategyType.PARTITION_RANGE;
partitionKeys = this.ordering.getInvolvedIndexes();
sortDirection = this.ordering.getFieldSortDirections();
partitioner = null;
if (this.dataDistribution != null) {
channel.setDataDistribution(this.dataDistribution);
}
break;
case FORCED_REBALANCED:
shipType = ShipStrategyType.PARTITION_FORCED_REBALANCE;
partitionKeys = null;
sortDirection = null;
partitioner = null;
break;
case CUSTOM_PARTITIONING:
shipType = ShipStrategyType.PARTITION_CUSTOM;
partitionKeys = Utils.createOrderedFromSet(this.partitioningFields);
sortDirection = null;
partitioner = this.customPartitioner;
break;
default:
throw new CompilerException(
"Invalid partitioning to create through a data exchange: "
+ this.partitioning.name());
}
DataExchangeMode exMode = DataExchangeMode.select(exchangeMode, shipType, breakPipeline);
channel.setShipStrategy(shipType, partitionKeys, sortDirection, partitioner, exMode);
} | 3.68 |
morf_HumanReadableStatementHelper_generateInCriterionString | /**
* Generates a string describing the IN binary operator. This may generate either a list of literal
* values, such as "x in ('1', '2')", or a sub-select statement. For the single-field sub-select form
* a more readable "x in Foo table" form is produced rather than "x in (select x from foo)".
*
* @param criterion the item to describe.
* @param invert {@code true} for the NOT IN operation, {@code false} for the IN operation.
* @return the string.
*/
private static String generateInCriterionString(final Criterion criterion, final boolean invert) {
final StringBuilder sb = new StringBuilder();
sb.append(generateFieldSymbolString(criterion.getField()));
sb.append(" is ");
if (invert) {
sb.append("not ");
}
sb.append("in ");
final SelectStatement source = criterion.getSelectStatement();
if (source == null) {
// List of literals
sb.append('(');
final List<?> values = (List<?>)criterion.getValue();
boolean comma = false;
for (Object value : values) {
if (comma) {
sb.append(", ");
} else {
comma = true;
}
sb.append(generateCriterionValueString(value));
}
sb.append(')');
} else {
if (source.getFields().size () == 1 && source.getTable() != null && (source.getJoins() == null || source.getJoins().isEmpty())) {
sb.append(source.getTable().getName()).append(generateWhereClause(source.getWhereCriterion()));
} else {
sb.append(generateSelectStatementString(source, false));
}
}
return sb.toString();
} | 3.68 |
zilla_HpackContext_staticIndex8 | // Index in static table for the given name of length 8
private static int staticIndex8(DirectBuffer name)
{
switch (name.getByte(7))
{
case 'e':
if (STATIC_TABLE[42].name.equals(name)) // if-range
{
return 42;
}
break;
case 'h':
if (STATIC_TABLE[39].name.equals(name)) // if-match
{
return 39;
}
break;
case 'n':
if (STATIC_TABLE[46].name.equals(name)) // location
{
return 46;
}
break;
}
return -1;
} | 3.68 |
flink_IntermediateResult_getPartitionById | /**
* Returns the partition with the given ID.
*
* @param resultPartitionId ID of the partition to look up
* @throws NullPointerException If partition ID <code>null</code>
* @throws IllegalArgumentException Thrown if unknown partition ID
* @return Intermediate result partition with the given ID
*/
public IntermediateResultPartition getPartitionById(
IntermediateResultPartitionID resultPartitionId) {
// Looks ups the partition number via the helper map and returns the
// partition. Currently, this happens infrequently enough that we could
// consider removing the map and scanning the partitions on every lookup.
// The lookup (currently) only happen when the producer of an intermediate
// result cannot be found via its registered execution.
Integer partitionNumber =
partitionLookupHelper.get(
checkNotNull(resultPartitionId, "IntermediateResultPartitionID"));
if (partitionNumber != null) {
return partitions[partitionNumber];
} else {
throw new IllegalArgumentException(
"Unknown intermediate result partition ID " + resultPartitionId);
}
} | 3.68 |
flink_WorkerResourceSpec_setExtendedResources | /**
* Add the given extended resources. This will discard all the previous added extended
* resources.
*/
public Builder setExtendedResources(Collection<ExternalResource> extendedResources) {
this.extendedResources =
extendedResources.stream()
.collect(
Collectors.toMap(
ExternalResource::getName, Function.identity()));
return this;
} | 3.68 |
flink_StreamTaskNetworkInputFactory_create | /**
* Factory method for {@link StreamTaskNetworkInput} or {@link RescalingStreamTaskNetworkInput}
* depending on {@link InflightDataRescalingDescriptor}.
*/
public static <T> StreamTaskInput<T> create(
CheckpointedInputGate checkpointedInputGate,
TypeSerializer<T> inputSerializer,
IOManager ioManager,
StatusWatermarkValve statusWatermarkValve,
int inputIndex,
InflightDataRescalingDescriptor rescalingDescriptorinflightDataRescalingDescriptor,
Function<Integer, StreamPartitioner<?>> gatePartitioners,
TaskInfo taskInfo,
CanEmitBatchOfRecordsChecker canEmitBatchOfRecords) {
return rescalingDescriptorinflightDataRescalingDescriptor.equals(
InflightDataRescalingDescriptor.NO_RESCALE)
? new StreamTaskNetworkInput<>(
checkpointedInputGate,
inputSerializer,
ioManager,
statusWatermarkValve,
inputIndex,
canEmitBatchOfRecords)
: new RescalingStreamTaskNetworkInput<>(
checkpointedInputGate,
inputSerializer,
ioManager,
statusWatermarkValve,
inputIndex,
rescalingDescriptorinflightDataRescalingDescriptor,
gatePartitioners,
taskInfo,
canEmitBatchOfRecords);
} | 3.68 |
hudi_StreamerUtil_getPayloadConfig | /**
* Returns the payload config with given configuration.
*/
public static HoodiePayloadConfig getPayloadConfig(Configuration conf) {
return HoodiePayloadConfig.newBuilder()
.withPayloadClass(conf.getString(FlinkOptions.PAYLOAD_CLASS_NAME))
.withPayloadOrderingField(conf.getString(FlinkOptions.PRECOMBINE_FIELD))
.withPayloadEventTimeField(conf.getString(FlinkOptions.PRECOMBINE_FIELD))
.build();
} | 3.68 |
hbase_MetricsREST_incrementFailedScanRequests | /**
* @param inc How much to add to failedScanCount.
*/
public void incrementFailedScanRequests(final int inc) {
source.incrementFailedScanRequests(inc);
} | 3.68 |
hadoop_CredentialInitializationException_retryable | /**
* This exception is not going to go away if you try calling it again.
* @return false, always.
*/
@Override
public boolean retryable() {
return false;
} | 3.68 |
hbase_ThriftUtilities_addAttributes | /**
* Adds all the attributes into the Operation object
*/
private static void addAttributes(OperationWithAttributes op,
Map<ByteBuffer, ByteBuffer> attributes) {
if (attributes == null || attributes.isEmpty()) {
return;
}
for (Map.Entry<ByteBuffer, ByteBuffer> entry : attributes.entrySet()) {
String name = Bytes.toStringBinary(getBytes(entry.getKey()));
byte[] value = getBytes(entry.getValue());
op.setAttribute(name, value);
}
} | 3.68 |
pulsar_AuthenticationSasl_isRoleTokenExpired | // role token exists but expired return true
private boolean isRoleTokenExpired(Map<String, String> responseHeaders) {
if ((saslRoleToken != null)
&& (responseHeaders != null)
// header type match
&& (responseHeaders.get(SASL_HEADER_TYPE) != null && responseHeaders.get(SASL_HEADER_TYPE)
.equalsIgnoreCase(SASL_TYPE_VALUE))
// header state expired
&& (responseHeaders.get(SASL_HEADER_STATE) != null && responseHeaders.get(SASL_HEADER_STATE)
.equalsIgnoreCase(SASL_AUTH_ROLE_TOKEN_EXPIRED))) {
return true;
} else {
return false;
}
} | 3.68 |
hadoop_Abfss_finalize | /**
* Close the file system; the FileContext API doesn't have an explicit close.
*/
@Override
protected void finalize() throws Throwable {
fsImpl.close();
super.finalize();
} | 3.68 |
framework_DefaultErrorHandler_findAbstractComponent | /**
* Returns the AbstractComponent associated with the given error if such can
* be found.
*
* @param event
* The error to investigate
* @return The {@link AbstractComponent} to error relates to or null if
* could not be determined or if the error does not relate to any
* AbstractComponent.
*/
public static AbstractComponent findAbstractComponent(
com.vaadin.server.ErrorEvent event) {
if (event instanceof ConnectorErrorEvent) {
Component c = findComponent(
((ConnectorErrorEvent) event).getConnector());
if (c instanceof AbstractComponent) {
return (AbstractComponent) c;
}
}
return null;
} | 3.68 |
framework_DefaultItemSorter_compare | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.ItemSorter#compare(java.lang.Object,
* java.lang.Object)
*/
@Override
public int compare(Object o1, Object o2) {
Item item1 = container.getItem(o1);
Item item2 = container.getItem(o2);
/*
* Items can be null if the container is filtered. Null is considered
* "less" than not-null.
*/
if (item1 == null) {
if (item2 == null) {
return 0;
} else {
return 1;
}
} else if (item2 == null) {
return -1;
}
for (int i = 0; i < sortPropertyIds.length; i++) {
int result = compareProperty(sortPropertyIds[i], sortDirections[i],
item1, item2);
// If order can be decided
if (result != 0) {
return result;
}
}
return 0;
} | 3.68 |
flink_MemorySegment_size | /**
* Gets the size of the memory segment, in bytes.
*
* @return The size of the memory segment.
*/
public int size() {
return size;
} | 3.68 |
dubbo_SerializableClassRegistry_registerClass | /**
* only supposed to be called at startup time
*
* @param clazz object type
* @param serializer object serializer
*/
public static void registerClass(Class<?> clazz, Object serializer) {
if (clazz == null) {
throw new IllegalArgumentException("Class registered to kryo cannot be null!");
}
REGISTRATIONS.put(clazz, serializer);
} | 3.68 |
flink_SpillingThread_getMergingIterator | /**
* Returns an iterator that iterates over the merged result from all given channels.
*
* @param channelIDs The channels that are to be merged and returned.
* @param inputSegments The buffers to be used for reading. The list contains for each channel
* one list of input segments. The size of the <code>inputSegments</code> list must be equal
* to that of the <code>channelIDs</code> list.
* @return An iterator over the merged records of the input channels.
* @throws IOException Thrown, if the readers encounter an I/O problem.
*/
private MergeIterator<E> getMergingIterator(
final List<ChannelWithBlockCount> channelIDs,
final List<List<MemorySegment>> inputSegments,
List<FileIOChannel> readerList,
MutableObjectIterator<E> largeRecords)
throws IOException {
// create one iterator per channel id
LOG.debug("Performing merge of {} sorted streams.", channelIDs.size());
final List<MutableObjectIterator<E>> iterators = new ArrayList<>(channelIDs.size() + 1);
for (int i = 0; i < channelIDs.size(); i++) {
final ChannelWithBlockCount channel = channelIDs.get(i);
final List<MemorySegment> segsForChannel = inputSegments.get(i);
// create a reader. if there are multiple segments for the reader, issue multiple
// together per I/O request
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel.getChannel());
readerList.add(reader);
spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(reader);
spillChannelManager.unregisterChannelToBeRemovedAtShutdown(channel.getChannel());
// wrap channel reader as a view, to get block spanning record deserialization
final ChannelReaderInputView inView =
new ChannelReaderInputView(
reader, segsForChannel, channel.getBlockCount(), false);
iterators.add(new ChannelReaderInputViewIterator<>(inView, null, this.serializer));
}
if (largeRecords != null) {
iterators.add(largeRecords);
}
return new MergeIterator<>(iterators, this.comparator);
} | 3.68 |
hadoop_BlockStorageMovementCommand_getBlockPoolId | /**
* Returns block pool ID.
*/
public String getBlockPoolId() {
return blockPoolId;
} | 3.68 |
hbase_PrivateCellUtil_getRowAsInt | /**
* Converts the rowkey bytes of the given cell into an int value
* @return rowkey as int
*/
public static int getRowAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toInt(((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition());
}
return Bytes.toInt(cell.getRowArray(), cell.getRowOffset());
} | 3.68 |
flink_CheckpointStorageLoader_createDefaultCheckpointStorage | /**
* Creates a default checkpoint storage instance if none was explicitly configured. For
* backwards compatibility, the default storage will be {@link FileSystemCheckpointStorage} if a
* checkpoint directory was configured, {@link
* org.apache.flink.runtime.state.storage.JobManagerCheckpointStorage} otherwise.
*
* @param config The configuration to load the checkpoint storage from
* @param classLoader The class loader that should be used to load the checkpoint storage
* @param logger Optionally, a logger to log actions to (may be null)
* @return The instantiated checkpoint storage.
* @throws IllegalConfigurationException May be thrown by the CheckpointStorageFactory when
* creating / configuring the checkpoint storage in the factory.
*/
private static CheckpointStorage createDefaultCheckpointStorage(
ReadableConfig config, ClassLoader classLoader, @Nullable Logger logger) {
if (config.getOptional(CheckpointingOptions.CHECKPOINTS_DIRECTORY).isPresent()) {
return createFileSystemCheckpointStorage(config, classLoader, logger);
}
return createJobManagerCheckpointStorage(config, classLoader, logger);
} | 3.68 |
hbase_ColumnRangeFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnRangeFilter)) {
return false;
}
ColumnRangeFilter other = (ColumnRangeFilter) o;
return Bytes.equals(this.getMinColumn(), other.getMinColumn())
&& this.getMinColumnInclusive() == other.getMinColumnInclusive()
&& Bytes.equals(this.getMaxColumn(), other.getMaxColumn())
&& this.getMaxColumnInclusive() == other.getMaxColumnInclusive();
} | 3.68 |
hbase_RegionScannerImpl_populateResult | /**
* Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is
* reached, or remainingResultSize (if not -1) is reaced
* @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call.
* @return state of last call to {@link KeyValueHeap#next()}
*/
private boolean populateResult(List<Cell> results, KeyValueHeap heap,
ScannerContext scannerContext, Cell currentRowCell) throws IOException {
Cell nextKv;
boolean moreCellsInRow = false;
boolean tmpKeepProgress = scannerContext.getKeepProgress();
// Scanning between column families and thus the scope is between cells
LimitScope limitScope = LimitScope.BETWEEN_CELLS;
do {
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation.
region.checkInterrupt();
// We want to maintain any progress that is made towards the limits while scanning across
// different column families. To do this, we toggle the keep progress flag on during calls
// to the StoreScanner to ensure that any progress made thus far is not wiped away.
scannerContext.setKeepProgress(true);
heap.next(results, scannerContext);
scannerContext.setKeepProgress(tmpKeepProgress);
nextKv = heap.peek();
moreCellsInRow = moreCellsInRow(nextKv, currentRowCell);
if (!moreCellsInRow) {
incrementCountOfRowsScannedMetric(scannerContext);
}
if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
} else if (scannerContext.checkSizeLimit(limitScope)) {
ScannerContext.NextState state =
moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED;
return scannerContext.setScannerState(state).hasMoreValues();
} else if (scannerContext.checkTimeLimit(limitScope)) {
ScannerContext.NextState state =
moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW : NextState.TIME_LIMIT_REACHED;
return scannerContext.setScannerState(state).hasMoreValues();
}
} while (moreCellsInRow);
return nextKv != null;
} | 3.68 |
hbase_TableRecordReader_restart | /**
* Restart from survivable exceptions by creating a new scanner.
* @param firstRow The first row to start at.
* @throws IOException When restarting fails.
*/
public void restart(byte[] firstRow) throws IOException {
this.recordReaderImpl.restart(firstRow);
} | 3.68 |
framework_TextArea_setWordwrap | /**
* Sets the text area's word-wrap mode on or off.
*
* @param wordwrap
* the boolean value specifying if the text area should be in
* word-wrap mode.
*/
public void setWordwrap(boolean wordwrap) {
getState().wordwrap = wordwrap;
} | 3.68 |
AreaShop_AreaShop_getFeatureManager | /**
* Get the FeatureManager.
* Manages region specific features.
* @return The FeatureManager
*/
public FeatureManager getFeatureManager() {
return featureManager;
} | 3.68 |
framework_BootstrapPageResponse_setDateHeader | /**
* Properly formats a timestamp as a date in a header that will be included
* in the HTTP response. If the header had already been set, the new value
* overwrites the previous one.
*
* @see #setHeader(String, String)
* @see VaadinResponse#setDateHeader(String, long)
*
* @param name
* the name of the header
* @param timestamp
* the number of milliseconds since epoch
*/
public void setDateHeader(String name, long timestamp) {
headers.put(name, Long.valueOf(timestamp));
} | 3.68 |
hadoop_YarnClient_getApplications | /**
* <p>
* Get a list of ApplicationReports that match the given
* {@link GetApplicationsRequest}.
*</p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(ApplicationId)}.
* </p>
*
* @param request the request object to get the list of applications.
* @return The list of ApplicationReports that match the request
* @throws YarnException Exception specific to YARN.
* @throws IOException Exception mostly related to connection errors.
*/
public List<ApplicationReport> getApplications(GetApplicationsRequest request)
throws YarnException, IOException {
throw new UnsupportedOperationException(
"The sub-class extending " + YarnClient.class.getName()
+ " is expected to implement this !");
} | 3.68 |
pulsar_RangeCache_removeRange | /**
*
* @param first
* @param last
* @param lastInclusive
* @return an pair of ints, containing the number of removed entries and the total size
*/
public Pair<Integer, Long> removeRange(Key first, Key last, boolean lastInclusive) {
Map<Key, Value> subMap = entries.subMap(first, true, last, lastInclusive);
int removedEntries = 0;
long removedSize = 0;
for (Key key : subMap.keySet()) {
Value value = entries.remove(key);
if (value == null) {
continue;
}
removedSize += weighter.getSize(value);
value.release();
++removedEntries;
}
size.addAndGet(-removedSize);
return Pair.of(removedEntries, removedSize);
} | 3.68 |
flink_TypeMappingUtils_getProctimeAttribute | /** Returns the proctime attribute of the [[TableSource]] if it is defined. */
private static Optional<String> getProctimeAttribute(TableSource<?> tableSource) {
if (tableSource instanceof DefinedProctimeAttribute) {
return Optional.ofNullable(
((DefinedProctimeAttribute) tableSource).getProctimeAttribute());
} else {
return Optional.empty();
}
} | 3.68 |
hbase_HBaseTestingUtility_getSupportedCompressionAlgorithms | /**
* Get supported compression algorithms.
* @return supported compression algorithms.
*/
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
for (String algoName : allAlgos) {
try {
Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
algo.getCompressor();
supportedAlgos.add(algo);
} catch (Throwable t) {
// this algo is not available
}
}
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
} | 3.68 |
framework_VTwinColSelect_clearInternalWidths | /** For internal use only. May be removed or replaced in the future. */
public void clearInternalWidths() {
String colWidth = DEFAULT_COLUMN_COUNT + "em";
String containerWidth = 2 * DEFAULT_COLUMN_COUNT + 4 + "em";
// Caption wrapper width == optionsSelect + buttons +
// selectionsSelect
String captionWrapperWidth = 2 * DEFAULT_COLUMN_COUNT + 4 - 0.5 + "em";
optionsListBox.setWidth(colWidth);
if (optionsCaption != null) {
optionsCaption.setWidth(colWidth);
}
selectionsListBox.setWidth(colWidth);
if (selectionsCaption != null) {
selectionsCaption.setWidth(colWidth);
}
buttons.setWidth("3.5em");
optionsContainer.setWidth(containerWidth);
captionWrapper.setWidth(captionWrapperWidth);
} | 3.68 |
hadoop_FederationCache_buildSubClusterInfoResponse | /**
* Build SubClusterInfo Response.
*
* @param filterInactiveSubClusters whether to filter out inactive sub-clusters.
* @return SubClusterInfo Response.
* @throws YarnException exceptions from yarn servers.
*/
private CacheResponse<SubClusterInfo> buildSubClusterInfoResponse(
final boolean filterInactiveSubClusters) throws YarnException {
GetSubClustersInfoRequest request = GetSubClustersInfoRequest.newInstance(
filterInactiveSubClusters);
GetSubClustersInfoResponse subClusters = stateStore.getSubClusters(request);
CacheResponse<SubClusterInfo> response = new SubClusterInfoCacheResponse();
response.setList(subClusters.getSubClusters());
return response;
} | 3.68 |
framework_VScrollTable_buildCaptionHtmlSnippet | /**
* Helper function to build html snippet for column or row headers.
*
* @param uidl
* possibly with values caption and icon
* @return html snippet containing possibly an icon + caption text
*/
protected String buildCaptionHtmlSnippet(UIDL uidl) {
String s = uidl.hasAttribute("caption")
? uidl.getStringAttribute("caption")
: "";
if (uidl.hasAttribute("icon")) {
Icon icon = client.getIcon(uidl.getStringAttribute("icon"));
icon.setAlternateText("icon");
s = icon.getElement().getString() + s;
}
return s;
} | 3.68 |
framework_AbsoluteLayout_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#writeDesign(org.jsoup.nodes.Node,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
AbsoluteLayout def = designContext.getDefaultInstance(this);
if (!designContext.shouldWriteChildren(this, def)) {
return;
}
// handle children
for (Component child : this) {
Element childElement = designContext.createElement(child);
design.appendChild(childElement);
// handle position
ComponentPosition position = getPosition(child);
writePositionAttribute(childElement, ATTR_TOP,
position.getTopUnits().getSymbol(), position.getTopValue());
writePositionAttribute(childElement, ATTR_RIGHT,
position.getRightUnits().getSymbol(),
position.getRightValue());
writePositionAttribute(childElement, ATTR_BOTTOM,
position.getBottomUnits().getSymbol(),
position.getBottomValue());
writePositionAttribute(childElement, ATTR_LEFT,
position.getLeftUnits().getSymbol(),
position.getLeftValue());
// handle z-index
if (position.getZIndex() >= 0) {
childElement.attr(ATTR_Z_INDEX,
String.valueOf(position.zIndex));
}
}
} | 3.68 |
hadoop_Cluster_getAllJobStatuses | /**
* Get job status for all jobs in the cluster.
* @return job status for all jobs in cluster
* @throws IOException
* @throws InterruptedException
*/
public JobStatus[] getAllJobStatuses() throws IOException, InterruptedException {
return client.getAllJobs();
} | 3.68 |
querydsl_AbstractFetchableMongodbQuery_fetch | /**
* Fetch with the specific fields
*
* @param paths fields to return
* @return results
*/
public List<K> fetch(Path<?>... paths) {
getQueryMixin().setProjection(paths);
return fetch();
} | 3.68 |
pulsar_ProtocolHandlerUtils_getProtocolHandlerDefinition | /**
* Retrieve the protocol handler definition from the provided handler nar package.
*
* @param narPath the path to the protocol handler NAR package
* @return the protocol handler definition
* @throws IOException when fail to load the protocol handler or get the definition
*/
public static ProtocolHandlerDefinition getProtocolHandlerDefinition(String narPath, String narExtractionDirectory)
throws IOException {
try (NarClassLoader ncl = NarClassLoaderBuilder.builder()
.narFile(new File(narPath))
.extractionDirectory(narExtractionDirectory)
.build()) {
return getProtocolHandlerDefinition(ncl);
}
} | 3.68 |
dubbo_DynamicConfiguration_getProperties | /**
* This method are mostly used to get a compound config file, such as a complete dubbo.properties file.
*
* @revision 2.7.4
*/
default String getProperties(String key, String group, long timeout) throws IllegalStateException {
return getConfig(key, group, timeout);
} | 3.68 |