name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DiskBalancerWorkItem_getBytesCopied | /**
* Returns bytes copied so far.
*
* @return long
*/
public long getBytesCopied() {
return bytesCopied;
} | 3.68 |
pulsar_ManagedLedgerImpl_getEstimatedBacklogSize | /**
* Get estimated backlog size from a specific position.
*/
public long getEstimatedBacklogSize(PositionImpl pos) {
if (pos == null) {
return 0;
}
return estimateBacklogFromPosition(pos);
} | 3.68 |
hadoop_TimelineWriteResponse_getEntityId | /**
* Get the entity Id.
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.68 |
hadoop_FederationStateStoreFacade_createInstance | /**
* Helper method to create instances of Object using the class name specified
* in the configuration object.
*
* @param conf the yarn configuration
* @param configuredClassName the configuration provider key
* @param defaultValue the default implementation class
* @param type the required interface/base class
* @param <T> The type of the instance to create
* @return the instances created
*/
@SuppressWarnings("unchecked")
public static <T> T createInstance(Configuration conf,
String configuredClassName, String defaultValue, Class<T> type) {
String className = conf.get(configuredClassName, defaultValue);
try {
Class<?> clusterResolverClass = conf.getClassByName(className);
if (type.isAssignableFrom(clusterResolverClass)) {
return (T) ReflectionUtils.newInstance(clusterResolverClass, conf);
} else {
throw new YarnRuntimeException("Class: " + className
+ " not instance of " + type.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate : " + className, e);
}
} | 3.68 |
pulsar_CmdUsageFormatter_appendCommands | /**
* This method is copied from DefaultUsageFormatter,
* but the ability to skip deprecated commands is added.
* @param out
* @param indentCount
* @param descriptionIndent
* @param indent
*/
@Override
public void appendCommands(StringBuilder out, int indentCount, int descriptionIndent, String indent) {
out.append(indent + " Commands:\n");
for (Map.Entry<JCommander.ProgramName, JCommander> commands : commander.getRawCommands().entrySet()) {
Object arg = commands.getValue().getObjects().get(0);
Parameters p = arg.getClass().getAnnotation(Parameters.class);
if (p == null || !p.hidden()) {
JCommander.ProgramName progName = commands.getKey();
String dispName = progName.getDisplayName();
//skip the deprecated command
if (deprecatedCommands.contains(dispName)) {
continue;
}
String description = indent + s(4) + dispName + s(6) + getCommandDescription(progName.getName());
wrapDescription(out, indentCount + descriptionIndent, description);
out.append("\n");
JCommander jc = commander.findCommandByAlias(progName.getName());
jc.getUsageFormatter().usage(out, indent + s(6));
out.append("\n");
}
}
} | 3.68 |
hbase_RestoreSnapshotHelper_cloneRegionInfo | /**
* Create a new {@link RegionInfo} from the snapshot region info. Keep the same startKey, endKey,
* regionId and split information but change the table name.
* @param snapshotRegionInfo Info for region to clone.
* @return the new HRegion instance
*/
public RegionInfo cloneRegionInfo(final RegionInfo snapshotRegionInfo) {
return cloneRegionInfo(tableDesc.getTableName(), snapshotRegionInfo);
} | 3.68 |
hadoop_PoolAlignmentContext_updateRequestState | /**
* Client side implementation for routers to provide state info in requests to
* namenodes.
*/
@Override
public void updateRequestState(RpcHeaderProtos.RpcRequestHeaderProto.Builder header) {
header.setStateId(poolLocalStateId.get());
} | 3.68 |
hbase_MultiTableSnapshotInputFormatImpl_generateSnapshotToRestoreDirMapping | /**
* Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a
* map from the snapshot to the restore directory.
* @param snapshots collection of snapshot names to restore
* @param baseRestoreDir base directory under which all snapshots in snapshots will be restored
* @return a mapping from snapshot name to the directory in which that snapshot has been restored
*/
private Map<String, Path> generateSnapshotToRestoreDirMapping(Collection<String> snapshots,
Path baseRestoreDir) {
Map<String, Path> rtn = Maps.newHashMap();
for (String snapshotName : snapshots) {
Path restoreSnapshotDir =
new Path(baseRestoreDir, snapshotName + "__" + UUID.randomUUID().toString());
rtn.put(snapshotName, restoreSnapshotDir);
}
return rtn;
} | 3.68 |
framework_ResourceWriter_write | /**
* Writes a JSON object containing registered resources.
*
* @param ui
* The {@link UI} whose resources to write.
* @param writer
* The {@link Writer} to use.
* @param target
* The {@link JsonPaintTarget} containing the resources.
* @throws IOException
*/
public void write(UI ui, Writer writer, JsonPaintTarget target)
throws IOException {
// Precache custom layouts
// TODO We should only precache the layouts that are not
// cached already (plagiate from usedPaintableTypes)
writer.write("{");
int resourceIndex = 0;
for (Object o : target.getUsedResources()) {
final String resource = (String) o;
InputStream is = null;
try {
is = ui.getSession().getService().getThemeResourceAsStream(ui,
ui.getTheme(), resource);
} catch (final Exception e) {
// FIXME: Handle exception
getLogger().log(Level.FINER,
"Failed to get theme resource stream.", e);
}
if (is != null) {
writer.write((resourceIndex++ > 0 ? ", " : "") + "\"" + resource
+ "\" : ");
final StringBuilder layout = new StringBuilder();
try (InputStreamReader r = new InputStreamReader(is,
StandardCharsets.UTF_8)) {
final char[] buffer = new char[20000];
int charsRead = 0;
while ((charsRead = r.read(buffer)) > 0) {
layout.append(buffer, 0, charsRead);
}
} catch (final IOException e) {
// FIXME: Handle exception
getLogger().log(Level.INFO, "Resource transfer failed", e);
}
writer.write("\""
+ JsonPaintTarget.escapeJSON(layout.toString()) + "\"");
} else {
// FIXME: Handle exception
getLogger().severe("CustomLayout not found: " + resource);
}
}
writer.write("}");
} | 3.68 |
hadoop_SplitCompressionInputStream_getAdjustedStart | /**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of start.
* @return The changed value of start
*/
public long getAdjustedStart() {
return start;
} | 3.68 |
hmily_TableMetaData_findColumnIndex | /**
* Find index of column.
*
* @param columnName column name
* @return index of column if found, otherwise -1
*/
public int findColumnIndex(final String columnName) {
for (int i = 0; i < columnNames.size(); i++) {
if (columnNames.get(i).equals(columnName)) {
return i;
}
}
return -1;
} | 3.68 |
morf_Function_now | /**
* Helper method to create an instance of the "now" SQL function that returns the
* current timestamp in UTC across all database platforms.
*
* @return an instance of a now function as a UTC timestamp.
*/
public static Function now() {
return new Function(FunctionType.NOW);
} | 3.68 |
framework_GridMultiSelect_deselect | /**
* Selects the given item. Depending on the implementation, may cause other
* items to be deselected. If the item is already selected, does nothing.
*
* @param item
* the item to select, not null
*/
public void deselect(T item) {
model.deselect(item);
} | 3.68 |
morf_HumanReadableStatementHelper_generateColumnDefaultValueClause | /**
* Generates a clause for any default column value.
*
* @param definition the column definition.
* @return a string containing a readable version of the default value
*/
private static String generateColumnDefaultValueClause(final Column definition) {
if (StringUtils.isEmpty(definition.getDefaultValue())) {
return "";
}
if (NumberUtils.isNumber(definition.getDefaultValue())) {
return ", set to " + definition.getDefaultValue();
} else {
return ", set to " + generateLiteral(definition.getDefaultValue());
}
} | 3.68 |
hbase_Bytes_putDouble | /**
* Put a double value out to the specified byte array position as the IEEE 754 double format.
* @param bytes byte array
* @param offset offset to write to
* @param d value
* @return New offset into array <code>bytes</code>
*/
public static int putDouble(byte[] bytes, int offset, double d) {
return putLong(bytes, offset, Double.doubleToLongBits(d));
} | 3.68 |
hbase_FlushTableSubprocedure_cleanup | /**
* Cancel threads if they haven't finished.
*/
@Override
public void cleanup(Exception e) {
LOG.info("Aborting all flush region subprocedure task threads for '" + table + "' due to error",
e);
try {
taskManager.cancelTasks();
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
} | 3.68 |
flink_TaskEventDispatcher_subscribeToEvent | /**
* Subscribes a listener to this dispatcher for events on a partition.
*
* @param partitionId ID of the partition to subscribe for (must be registered via {@link
* #registerPartition(ResultPartitionID)} first!)
* @param eventListener the event listener to subscribe
* @param eventType event type to subscribe to
*/
public void subscribeToEvent(
ResultPartitionID partitionId,
EventListener<TaskEvent> eventListener,
Class<? extends TaskEvent> eventType) {
checkNotNull(partitionId);
checkNotNull(eventListener);
checkNotNull(eventType);
TaskEventHandler taskEventHandler;
synchronized (registeredHandlers) {
taskEventHandler = registeredHandlers.get(partitionId);
}
if (taskEventHandler == null) {
throw new IllegalStateException(
"Partition " + partitionId + " not registered at task event dispatcher.");
}
taskEventHandler.subscribe(eventListener, eventType);
} | 3.68 |
morf_AbstractSqlDialectTest_testCaseWithStrings | /**
* Test an update statement with case in it.
*/
@Test
public void testCaseWithStrings() {
CaseStatement enabledWhenAutoRunIsT =
new CaseStatement(new FieldLiteral("DISABLED"),
new WhenCondition(eq(new FieldReference("autorunBackgroundProcess"),
new FieldLiteral("Y")),
new FieldLiteral("ENABLED")));
UpdateStatement stmt =
new UpdateStatement(new TableReference("BackgroundProcess"))
.set(enabledWhenAutoRunIsT.as("targetState"));
String value1 = varCharCast("'Y'");
String value2 = varCharCast("'ENABLED'");
String value3 = varCharCast("'DISABLED'");
assertEquals("Update with case statement",
"UPDATE " + tableName("BackgroundProcess") + " SET targetState = CASE WHEN (autorunBackgroundProcess = " + stringLiteralPrefix() + value1 +") THEN " + stringLiteralPrefix() + value2 + " ELSE " + stringLiteralPrefix() + value3 + " END",
testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_SourceReaderContext_currentParallelism | /**
* Get the current parallelism of this Source.
*
* @return the parallelism of the Source.
*/
default int currentParallelism() {
throw new UnsupportedOperationException();
} | 3.68 |
querydsl_BeanPath_createDateTime | /**
* Create a new DateTime path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <A extends Comparable> DateTimePath<A> createDateTime(String property, Class<? super A> type) {
return add(new DateTimePath<A>((Class) type, forProperty(property)));
} | 3.68 |
framework_AbstractListing_serializeDeclarativeRepresentation | /**
* Serializes an {@code item} to a string for saving declarative format.
* <p>
* Default implementation delegates a call to {@code item.toString()}.
*
* @see #deserializeDeclarativeRepresentation(String)
*
* @param item
* a data item
* @return string representation of the {@code item}.
*/
protected String serializeDeclarativeRepresentation(T item) {
return item.toString();
} | 3.68 |
querydsl_QueryBase_where | /**
* Add the given filter conditions
*
* <p>Skips null arguments</p>
*
* @param o filter conditions to be added
* @return the current object
*/
public Q where(Predicate... o) {
return queryMixin.where(o);
} | 3.68 |
hbase_BitSetNode_getActiveMinProcId | // ========================================================================
// Min/Max Helpers
// ========================================================================
public long getActiveMinProcId() {
long minProcId = start;
for (int i = 0; i < deleted.length; ++i) {
if (deleted[i] == 0) {
return minProcId;
}
if (deleted[i] != WORD_MASK) {
for (int j = 0; j < BITS_PER_WORD; ++j) {
if ((deleted[i] & (1L << j)) == 0) {
return minProcId + j;
}
}
}
minProcId += BITS_PER_WORD;
}
return Procedure.NO_PROC_ID;
} | 3.68 |
hadoop_S3ARemoteInputStream_close | /**
* Closes this stream and releases all acquired resources.
*
* @throws IOException if there is an IO error during this operation.
*/
@Override
public void close() throws IOException {
if (closed) {
return;
}
closed = true;
blockData = null;
reader.close();
reader = null;
remoteObject = null;
fpos.invalidate();
try {
client.close();
} finally {
streamStatistics.close();
}
client = null;
} | 3.68 |
framework_HeartbeatHandler_synchronizedHandleRequest | /**
* Handles a heartbeat request for the given session. Reads the GET
* parameter named {@link UIConstants#UI_ID_PARAMETER} to identify the UI.
* If the UI is found in the session, sets it
* {@link UI#getLastHeartbeatTimestamp() heartbeat timestamp} to the current
* time. Otherwise, writes a HTTP Not Found error to the response.
*/
@Override
public boolean synchronizedHandleRequest(VaadinSession session,
VaadinRequest request, VaadinResponse response) throws IOException {
UI ui = session.getService().findUI(request);
if (ui != null) {
ui.setLastHeartbeatTimestamp(System.currentTimeMillis());
// Ensure that the browser does not cache heartbeat responses.
// iOS 6 Safari requires this (#3226)
response.setHeader("Cache-Control", "no-cache");
// If Content-Type is not set, browsers assume text/html and may
// complain about the empty response body (#4167)
response.setHeader("Content-Type", "text/plain");
} else {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
"UI not found");
}
return true;
} | 3.68 |
pulsar_ManagedLedgerConfig_setPassword | /**
* @param password
* the password to set
*/
public ManagedLedgerConfig setPassword(String password) {
this.password = password.getBytes(StandardCharsets.UTF_8);
return this;
} | 3.68 |
morf_ConnectionResourcesBean_getDatabaseType | /**
* @see org.alfasoftware.morf.jdbc.ConnectionResources#getDatabaseType()
*/
@Override
public String getDatabaseType() {
return databaseType;
} | 3.68 |
hadoop_CachingBlockManager_requestPrefetch | /**
* Requests optional prefetching of the given block.
* The block is prefetched only if we can acquire a free buffer.
*
* @throws IllegalArgumentException if blockNumber is negative.
*/
@Override
public void requestPrefetch(int blockNumber) {
checkNotNegative(blockNumber, "blockNumber");
if (closed) {
return;
}
// We initiate a prefetch only if we can acquire a buffer from the shared pool.
BufferData data = bufferPool.tryAcquire(blockNumber);
if (data == null) {
return;
}
// Opportunistic check without locking.
if (!data.stateEqualsOneOf(BufferData.State.BLANK)) {
// The block is ready or being prefetched/cached.
return;
}
synchronized (data) {
// Reconfirm state after locking.
if (!data.stateEqualsOneOf(BufferData.State.BLANK)) {
// The block is ready or being prefetched/cached.
return;
}
BlockOperations.Operation op = ops.requestPrefetch(blockNumber);
PrefetchTask prefetchTask = new PrefetchTask(data, this, Instant.now());
Future<Void> prefetchFuture = futurePool.executeFunction(prefetchTask);
data.setPrefetch(prefetchFuture);
ops.end(op);
}
} | 3.68 |
flink_ParserImpl_parse | /**
* When parsing statement, it first uses {@link ExtendedParser} to parse statements. If {@link
* ExtendedParser} fails to parse statement, it uses the {@link CalciteParser} to parse
* statements.
*
* @param statement input statement.
* @return parsed operations.
*/
@Override
public List<Operation> parse(String statement) {
CalciteParser parser = calciteParserSupplier.get();
FlinkPlannerImpl planner = validatorSupplier.get();
Optional<Operation> command = EXTENDED_PARSER.parse(statement);
if (command.isPresent()) {
return Collections.singletonList(command.get());
}
// parse the sql query
// use parseSqlList here because we need to support statement end with ';' in sql client.
SqlNodeList sqlNodeList = parser.parseSqlList(statement);
List<SqlNode> parsed = sqlNodeList.getList();
Preconditions.checkArgument(parsed.size() == 1, "only single statement supported");
return Collections.singletonList(
SqlNodeToOperationConversion.convert(planner, catalogManager, parsed.get(0))
.orElseThrow(() -> new TableException("Unsupported query: " + statement)));
} | 3.68 |
pulsar_ConsumerConfiguration_setSubscriptionInitialPosition | /**
* @param subscriptionInitialPosition the initial position at which to set
* set cursor when subscribing to the topic first time
* Default is {@value InitialPosition.Latest}
*/
public ConsumerConfiguration setSubscriptionInitialPosition(
SubscriptionInitialPosition subscriptionInitialPosition) {
conf.setSubscriptionInitialPosition(subscriptionInitialPosition);
return this;
} | 3.68 |
hadoop_RoleModel_policy | /**
* From a set of statements, create a policy.
* @param statements statements
* @return the policy
*/
public static Policy policy(final List<RoleModel.Statement> statements) {
return new Policy(statements);
} | 3.68 |
querydsl_Expressions_asBoolean | /**
* Create a new BooleanExpression
*
* @param value boolean
* @return new BooleanExpression
*/
public static BooleanExpression asBoolean(boolean value) {
return asBoolean(constant(value));
} | 3.68 |
hudi_HoodieTable_getRollbackTimeline | /**
* Get rollback timeline.
*/
public HoodieTimeline getRollbackTimeline() {
return getActiveTimeline().getRollbackTimeline();
} | 3.68 |
flink_DeltaIterationBase_setBroadcastVariable | /**
* The DeltaIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param name Ignored.
* @param root Ignored.
*/
public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException(
"The DeltaIteration meta operator cannot have broadcast inputs.");
} | 3.68 |
hbase_CatalogFamilyFormat_getMergeRegions | /**
* Returns Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Map<String, RegionInfo> mergeRegionsWithName = getMergeRegionsWithName(cells);
return (mergeRegionsWithName == null) ? null : new ArrayList<>(mergeRegionsWithName.values());
} | 3.68 |
pulsar_TopicList_filterTopics | // get topics that match 'topicsPattern' from original topics list
// return result should contain only topic names, without partition part
public static List<String> filterTopics(List<String> original, String regex) {
Pattern topicsPattern = Pattern.compile(regex);
return filterTopics(original, topicsPattern);
} | 3.68 |
morf_InlineTableUpgrader_postUpgrade | /**
* Perform clear up after the main upgrade is completed.
*/
public void postUpgrade() {
sqlStatementWriter.writeSql(sqlDialect.truncateTableStatements(idTable));
sqlStatementWriter.writeSql(sqlDialect.dropStatements(idTable));
} | 3.68 |
hudi_HoodieMetaSyncOperations_addPartitionsToTable | /**
* Add partitions to the table in metastore.
*/
default void addPartitionsToTable(String tableName, List<String> partitionsToAdd) {
} | 3.68 |
flink_JobVertex_getParallelism | /**
* Gets the parallelism of the task.
*
* @return The parallelism of the task.
*/
public int getParallelism() {
return parallelism;
} | 3.68 |
hbase_MiniHBaseCluster_suspendRegionServer | /**
* Suspend the specified region server
* @param serverNumber Used as index into a list.
*/
public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) {
JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber);
LOG.info("Suspending {}", server.toString());
server.suspend();
return server;
} | 3.68 |
flink_KeyMap_iterator | /**
* Creates an iterator over the entries of this map.
*
* @return An iterator over the entries of this map.
*/
@Override
public Iterator<Entry<K, V>> iterator() {
return new Iterator<Entry<K, V>>() {
private final Entry<K, V>[] tab = KeyMap.this.table;
private Entry<K, V> nextEntry;
private int nextPos = 0;
@Override
public boolean hasNext() {
if (nextEntry != null) {
return true;
} else {
while (nextPos < tab.length) {
Entry<K, V> e = tab[nextPos++];
if (e != null) {
nextEntry = e;
return true;
}
}
return false;
}
}
@Override
public Entry<K, V> next() {
if (nextEntry != null || hasNext()) {
Entry<K, V> e = nextEntry;
nextEntry = nextEntry.next;
return e;
} else {
throw new NoSuchElementException();
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
} | 3.68 |
querydsl_AbstractSQLQuery_addListener | /**
* Add a listener
*
* @param listener listener to add
*/
public void addListener(SQLListener listener) {
listeners.add(listener);
} | 3.68 |
flink_MailboxProcessor_reportThrowable | /**
* Reports a throwable for rethrowing from the mailbox thread. This will clear and cancel all
* other pending mails.
*
* @param throwable to report by rethrowing from the mailbox loop.
*/
public void reportThrowable(Throwable throwable) {
sendControlMail(
() -> {
if (throwable instanceof Exception) {
throw (Exception) throwable;
} else if (throwable instanceof Error) {
throw (Error) throwable;
} else {
throw WrappingRuntimeException.wrapIfNecessary(throwable);
}
},
"Report throwable %s",
throwable);
} | 3.68 |
framework_VSlider_setUpdateValueOnClick | /**
* Specifies whether or not click event should update the Slider's value.
*
* @param updateValueOnClick
* {@code true} if a click should update slider's value,
* {@code false} otherwise
*/
public void setUpdateValueOnClick(boolean updateValueOnClick) {
this.updateValueOnClick = updateValueOnClick;
} | 3.68 |
hadoop_TypedBytesInput_readVectorHeader | /**
* Reads the header following a <code>Type.VECTOR</code> code.
* @return the number of elements in the vector
* @throws IOException
*/
public int readVectorHeader() throws IOException {
return in.readInt();
} | 3.68 |
dubbo_ReactorClientCalls_oneToOne | /**
* Implements a unary -> unary call as Mono -> Mono
*
* @param invoker invoker
* @param monoRequest the mono with request
* @param methodDescriptor the method descriptor
* @return the mono with response
*/
public static <TRequest, TResponse, TInvoker> Mono<TResponse> oneToOne(
Invoker<TInvoker> invoker, Mono<TRequest> monoRequest, StubMethodDescriptor methodDescriptor) {
try {
return Mono.create(emitter -> monoRequest.subscribe(
request -> StubInvocationUtil.unaryCall(
invoker, methodDescriptor, request, new StreamObserver<TResponse>() {
@Override
public void onNext(TResponse tResponse) {
emitter.success(tResponse);
}
@Override
public void onError(Throwable throwable) {
emitter.error(throwable);
}
@Override
public void onCompleted() {
// Do nothing
}
}),
emitter::error));
} catch (Throwable throwable) {
return Mono.error(throwable);
}
} | 3.68 |
flink_HadoopDelegationTokenConverter_serialize | /** Serializes delegation tokens. */
public static byte[] serialize(Credentials credentials) throws IOException {
try (DataOutputBuffer dob = new DataOutputBuffer()) {
credentials.writeTokenStorageToStream(dob);
return dob.getData();
}
} | 3.68 |
pulsar_AuthenticationState_isExpired | /**
* If the authentication state is expired, it will force the connection to be re-authenticated.
*/
default boolean isExpired() {
return false;
} | 3.68 |
cron-utils_FieldConstraintsBuilder_addLWSupport | /**
* Adds LW support.
*
* @return same FieldConstraintsBuilder instance
*/
public FieldConstraintsBuilder addLWSupport() {
specialChars.add(SpecialChar.LW);
return this;
} | 3.68 |
hadoop_ApplicationConstants_$$ | /**
* Expand the environment variable in platform-agnostic syntax. The
* parameter expansion marker "{{VAR}}" will be replaced with real parameter
* expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
* container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
* and %VAR% on Windows.
* @return expanded environment variable.
*/
@Public
@Unstable
public String $$() {
return PARAMETER_EXPANSION_LEFT + variable + PARAMETER_EXPANSION_RIGHT;
} | 3.68 |
flink_ExtractionUtils_createRawType | /** Creates a raw data type. */
@SuppressWarnings({"unchecked", "rawtypes"})
static DataType createRawType(
DataTypeFactory typeFactory,
@Nullable Class<? extends TypeSerializer<?>> rawSerializer,
@Nullable Class<?> conversionClass) {
if (rawSerializer != null) {
return DataTypes.RAW(
(Class) createConversionClass(conversionClass),
instantiateRawSerializer(rawSerializer));
}
return typeFactory.createRawDataType(createConversionClass(conversionClass));
} | 3.68 |
flink_CatalogTable_fromProperties | /**
* Creates an instance of {@link CatalogTable} from a map of string properties that were
* previously created with {@link ResolvedCatalogTable#toProperties()}.
*
* <p>Note that the serialization and deserialization of catalog tables are not symmetric. The
* framework will resolve functions and perform other validation tasks. A catalog implementation
* must not deal with this during a read operation.
*
* @param properties serialized version of a {@link CatalogTable} that includes schema,
* partition keys, and connector options
*/
static CatalogTable fromProperties(Map<String, String> properties) {
return CatalogPropertiesUtil.deserializeCatalogTable(properties);
} | 3.68 |
flink_AbstractUdfOperator_setBroadcastVariables | /**
* Clears all previous broadcast inputs and binds the given inputs as broadcast variables of
* this operator.
*
* @param inputs The {@code<name, root>} pairs to be set as broadcast inputs.
*/
public <T> void setBroadcastVariables(Map<String, Operator<T>> inputs) {
this.broadcastInputs.clear();
this.broadcastInputs.putAll(inputs);
} | 3.68 |
hudi_TableCommand_fetchTableSchema | /**
* Fetches table schema in avro format.
*/
@ShellMethod(key = "fetch table schema", value = "Fetches latest table schema")
public String fetchTableSchema(
@ShellOption(value = {"--outputFilePath"}, defaultValue = ShellOption.NULL,
help = "File path to write schema") final String outputFilePath) throws Exception {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(client);
Schema schema = tableSchemaResolver.getTableAvroSchema();
if (outputFilePath != null) {
LOG.info("Latest table schema : " + schema.toString(true));
writeToFile(outputFilePath, schema.toString(true));
return String.format("Latest table schema written to %s", outputFilePath);
} else {
return String.format("Latest table schema %s", schema.toString(true));
}
} | 3.68 |
hbase_ZKProcedureUtil_getAbortNode | /**
* Get the full znode path for the node used by the coordinator or member to trigger an abort of
* the global barrier acquisition or execution in subprocedures.
* @param controller controller running the procedure
* @param opInstanceName name of the running procedure instance (not the procedure description).
* @return full znode path to the abort znode
*/
public static String getAbortNode(ZKProcedureUtil controller, String opInstanceName) {
return ZNodePaths.joinZNode(controller.abortZnode, opInstanceName);
} | 3.68 |
hbase_FSVisitor_visitTableStoreFiles | /**
* Iterate over the table store files
* @param fs {@link FileSystem}
* @param tableDir {@link Path} to the table directory
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir,
final StoreFileVisitor visitor) throws IOException {
List<FileStatus> regions =
FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("No regions under directory:" + tableDir);
}
return;
}
for (FileStatus region : regions) {
visitRegionStoreFiles(fs, region.getPath(), visitor);
}
} | 3.68 |
hbase_CompactingMemStore_debug | // debug method
public void debug() {
String msg = "active size=" + getActive().getDataSize();
msg += " allow compaction is " + (allowCompaction.get() ? "true" : "false");
msg +=
" inMemoryCompactionInProgress is " + (inMemoryCompactionInProgress.get() ? "true" : "false");
LOG.debug(msg);
} | 3.68 |
flink_DecimalDataUtils_sign | /**
* SQL <code>SIGN</code> operator applied to BigDecimal values. preserve precision and scale.
*/
public static DecimalData sign(DecimalData b0) {
if (b0.isCompact()) {
return new DecimalData(b0.precision, b0.scale, signum(b0) * POW10[b0.scale], null);
} else {
return fromBigDecimal(BigDecimal.valueOf(signum(b0)), b0.precision, b0.scale);
}
} | 3.68 |
flink_StringValueUtils_setStringToTokenize | /**
* Sets the string to be tokenized and resets the state of the tokenizer.
*
* @param string The string value to be tokenized.
*/
public void setStringToTokenize(StringValue string) {
this.toTokenize = string;
this.pos = 0;
this.limit = string.length();
} | 3.68 |
hbase_MemStoreLABImpl_tryRetireChunk | /**
* Try to retire the current chunk if it is still <code>c</code>. Postcondition is that
* curChunk.get() != c
* @param c the chunk to retire
*/
private void tryRetireChunk(Chunk c) {
currChunk.compareAndSet(c, null);
// If the CAS succeeds, that means that we won the race
// to retire the chunk. We could use this opportunity to
// update metrics on external fragmentation.
//
// If the CAS fails, that means that someone else already
// retired the chunk for us.
} | 3.68 |
shardingsphere-elasticjob_JobFacade_clearMisfire | /**
* Clear misfire flag.
*
* @param shardingItems sharding items to be cleared misfire flag
*/
public void clearMisfire(final Collection<Integer> shardingItems) {
executionService.clearMisfire(shardingItems);
} | 3.68 |
hibernate-validator_CollectionHelper_getInitialCapacityFromExpectedSize | /**
* As the default loadFactor is of 0.75, we need to calculate the initial capacity from the expected size to avoid
* resizing the collection when we populate the collection with all the initial elements. We use a calculation
* similar to what is done in {@link HashMap#putAll(Map)}.
*
* @param expectedSize the expected size of the collection
* @return the initial capacity of the collection
*/
private static int getInitialCapacityFromExpectedSize(int expectedSize) {
if ( expectedSize < 3 ) {
return expectedSize + 1;
}
return (int) ( (float) expectedSize / 0.75f + 1.0f );
} | 3.68 |
hadoop_HsController_index | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#index()
*/
@Override
public void index() {
setTitle("JobHistory");
} | 3.68 |
hbase_MasterCoprocessorHost_postRollBackSplitRegionAction | /**
* Invoked just after the rollback of a failed split
* @param user the user
*/
public void postRollBackSplitRegionAction(final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postRollBackSplitRegionAction(this);
}
});
} | 3.68 |
morf_SchemaChangeSequence_addColumn | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#addColumn(java.lang.String, org.alfasoftware.morf.metadata.Column)
*/
@Override
public void addColumn(String tableName, Column definition) {
AddColumn addColumn = new AddColumn(tableName, definition);
visitor.visit(addColumn);
schemaAndDataChangeVisitor.visit(addColumn);
} | 3.68 |
flink_TableResultImpl_setPrintStyle | /** Specifies print style. Default is {@link TableauStyle} with max integer column width. */
public Builder setPrintStyle(PrintStyle printStyle) {
Preconditions.checkNotNull(printStyle, "printStyle should not be null");
this.printStyle = printStyle;
return this;
} | 3.68 |
hadoop_WebAppProxyServer_startServer | /**
* Start proxy server.
*
* @return proxy server instance.
*/
protected static WebAppProxyServer startServer(Configuration configuration)
throws Exception {
WebAppProxyServer proxy = new WebAppProxyServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
proxy.init(configuration);
proxy.start();
return proxy;
} | 3.68 |
hbase_EncodedDataBlock_getEncodedCompressedSize | /**
* Estimate size after second stage of compression (e.g. LZO).
* @param comprAlgo compression algorithm to be used for compression
* @param compressor compressor corresponding to the given compression algorithm
* @return Size after second stage of compression.
*/
public int getEncodedCompressedSize(Algorithm comprAlgo, Compressor compressor)
throws IOException {
byte[] compressedBytes = getEncodedData();
return getCompressedSize(comprAlgo, compressor, compressedBytes, 0, compressedBytes.length);
} | 3.68 |
zxing_BitArray_isRange | /**
* Efficient method to check if a range of bits is set, or not set.
*
* @param start start of range, inclusive.
* @param end end of range, exclusive
* @param value if true, checks that bits in range are set, otherwise checks that they are not set
* @return true iff all bits are set or not set in range, according to value argument
* @throws IllegalArgumentException if end is less than start or the range is not contained in the array
*/
public boolean isRange(int start, int end, boolean value) {
if (end < start || start < 0 || end > size) {
throw new IllegalArgumentException();
}
if (end == start) {
return true; // empty range matches
}
end--; // will be easier to treat this as the last actually set bit -- inclusive
int firstInt = start / 32;
int lastInt = end / 32;
for (int i = firstInt; i <= lastInt; i++) {
int firstBit = i > firstInt ? 0 : start & 0x1F;
int lastBit = i < lastInt ? 31 : end & 0x1F;
// Ones from firstBit to lastBit, inclusive
int mask = (2 << lastBit) - (1 << firstBit);
// Return false if we're looking for 1s and the masked bits[i] isn't all 1s (that is,
// equals the mask, or we're looking for 0s and the masked portion is not all 0s
if ((bits[i] & mask) != (value ? mask : 0)) {
return false;
}
}
return true;
} | 3.68 |
hadoop_LocatedFileStatus_equals | /** Compare if this object is equal to another object
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
@Override
public boolean equals(Object o) {
return super.equals(o);
} | 3.68 |
hudi_HoodieTableMetaClient_getMarkerFolderPath | /**
* Returns Marker folder path.
*
* @param instantTs Instant Timestamp
* @return
*/
public String getMarkerFolderPath(String instantTs) {
return String.format("%s%s%s", getTempFolderPath(), Path.SEPARATOR, instantTs);
} | 3.68 |
flink_ExtractionUtils_collectStructuredFields | /** Returns the fields of a class for a {@link StructuredType}. */
static List<Field> collectStructuredFields(Class<?> clazz) {
final List<Field> fields = new ArrayList<>();
while (clazz != Object.class) {
final Field[] declaredFields = clazz.getDeclaredFields();
Stream.of(declaredFields)
.filter(
field -> {
final int m = field.getModifiers();
return !Modifier.isStatic(m) && !Modifier.isTransient(m);
})
.forEach(fields::add);
clazz = clazz.getSuperclass();
}
return fields;
} | 3.68 |
flink_SSLUtils_createInternalServerSSLEngineFactory | /** Creates a SSLEngineFactory to be used by internal communication server endpoints. */
public static SSLHandlerFactory createInternalServerSSLEngineFactory(final Configuration config)
throws Exception {
SslContext sslContext = createInternalNettySSLContext(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException(
"SSL is not enabled for internal communication.");
}
return new SSLHandlerFactory(
sslContext,
config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT),
config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
} | 3.68 |
pulsar_Schema_decode | /**
* Decode a ByteBuffer into an object using a given version. <br/>
*
* @param data
* the ByteBuffer to decode
* @param schemaVersion
* the schema version to decode the object. null indicates using latest version.
* @return the deserialized object
*/
default T decode(ByteBuffer data, byte[] schemaVersion) {
if (data == null) {
return null;
}
return decode(getBytes(data), schemaVersion);
} | 3.68 |
framework_ApplicationConfiguration_isWidgetsetVersionSent | /**
* Checks whether the widget set version has been sent to the server. It is
* sent in the first UIDL request.
*
* @return <code>true</code> if browser information has already been sent
*/
public boolean isWidgetsetVersionSent() {
return widgetsetVersionSent;
} | 3.68 |
dubbo_Bytes_getMD5 | /**
* get md5.
*
* @param is input stream.
* @return MD5 byte array.
*/
public static byte[] getMD5(InputStream is) throws IOException {
return getMD5(is, 1024 * 8);
} | 3.68 |
hbase_Scan_setStartStopRowForPrefixScan | /**
* <p>
* Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey
* starts with the specified prefix.
* </p>
* <p>
* This is a utility method that converts the desired rowPrefix into the appropriate values for
* the startRow and stopRow to achieve the desired result.
* </p>
* <p>
* This can safely be used in combination with setFilter.
* </p>
* <p>
* <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such
* a combination will yield unexpected and even undefined results.
* </p>
* @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
*/
public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
if (rowPrefix == null) {
withStartRow(HConstants.EMPTY_START_ROW);
withStopRow(HConstants.EMPTY_END_ROW);
} else {
this.withStartRow(rowPrefix);
this.withStopRow(ClientUtil.calculateTheClosestNextRowKeyForPrefix(rowPrefix));
}
return this;
} | 3.68 |
open-banking-gateway_ExpirableDataConfig_deadLetterQueue | /**
* Expirable process results. They will be alive for some time and then if no handler consumes it - will be removed.
*/
@Bean
Map<String, InternalProcessResult> deadLetterQueue(@Qualifier(PROTOCOL_CACHE_BUILDER) CacheBuilder builder) {
return builder.build().asMap();
} | 3.68 |
flink_AbstractBytesHashMap_skipKey | /** @throws IOException when invalid memory address visited. */
void skipKey() throws IOException {
keySerializer.skipRecordFromPages(inView);
} | 3.68 |
hadoop_AMWebServices_checkAccess | /**
* check for job access.
*
* @param job
* the job that is being accessed
*/
void checkAccess(Job job, HttpServletRequest request) {
if (!hasAccess(job, request)) {
throw new WebApplicationException(Status.UNAUTHORIZED);
}
}
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 } | 3.68 |
hbase_ServerListener_serverRemoved | /**
* The server was removed from the cluster.
* @param serverName The remote servers name.
*/
default void serverRemoved(final ServerName serverName) {
} | 3.68 |
flink_SqlWindowTableFunction_checkTableAndDescriptorOperands | /**
* Checks whether the heading operands are in the form {@code (ROW, DESCRIPTOR, DESCRIPTOR
* ..., other params)}, returning whether successful, and throwing if any columns are not
* found.
*
* @param callBinding The call binding
* @param descriptorCount The number of descriptors following the first operand (e.g. the
* table)
* @return true if validation passes; throws if any columns are not found
*/
boolean checkTableAndDescriptorOperands(SqlCallBinding callBinding, int descriptorCount) {
final SqlNode operand0 = callBinding.operand(0);
final SqlValidator validator = callBinding.getValidator();
final RelDataType type = validator.getValidatedNodeType(operand0);
if (type.getSqlTypeName() != SqlTypeName.ROW) {
return false;
}
for (int i = 1; i < descriptorCount + 1; i++) {
final SqlNode operand = callBinding.operand(i);
if (operand.getKind() != SqlKind.DESCRIPTOR) {
return false;
}
validateColumnNames(
validator, type.getFieldNames(), ((SqlCall) operand).getOperandList());
}
return true;
} | 3.68 |
morf_InsertStatementBuilder_getFieldDefaults | /**
* Gets the field defaults that should be used when inserting new fields.
*
* @return a map of field names to field default values to use during this insert.
*/
Map<String, AliasedField> getFieldDefaults() {
return fieldDefaults;
} | 3.68 |
zxing_DecodeHintManager_splitQuery | /**
* <p>Split a query string into a list of name-value pairs.</p>
*
* <p>This is an alternative to the {@link Uri#getQueryParameterNames()} and
* {@link Uri#getQueryParameters(String)}, which are quirky and not suitable
* for exist-only Uri parameters.</p>
*
* <p>This method ignores multiple parameters with the same name and returns the
* first one only. This is technically incorrect, but should be acceptable due
* to the method of processing Hints: no multiple values for a hint.</p>
*
* @param query query to split
* @return name-value pairs
*/
private static Map<String,String> splitQuery(String query) {
Map<String,String> map = new HashMap<>();
int pos = 0;
while (pos < query.length()) {
if (query.charAt(pos) == '&') {
// Skip consecutive ampersand separators.
pos ++;
continue;
}
int amp = query.indexOf('&', pos);
int equ = query.indexOf('=', pos);
if (amp < 0) {
// This is the last element in the query, no more ampersand elements.
String name;
String text;
if (equ < 0) {
// No equal sign
name = query.substring(pos);
name = name.replace('+', ' '); // Preemptively decode +
name = Uri.decode(name);
text = "";
} else {
// Split name and text.
name = query.substring(pos, equ);
name = name.replace('+', ' '); // Preemptively decode +
name = Uri.decode(name);
text = query.substring(equ + 1);
text = text.replace('+', ' '); // Preemptively decode +
text = Uri.decode(text);
}
if (!map.containsKey(name)) {
map.put(name, text);
}
break;
}
if (equ < 0 || equ > amp) {
// No equal sign until the &: this is a simple parameter with no value.
String name = query.substring(pos, amp);
name = name.replace('+', ' '); // Preemptively decode +
name = Uri.decode(name);
if (!map.containsKey(name)) {
map.put(name, "");
}
pos = amp + 1;
continue;
}
String name = query.substring(pos, equ);
name = name.replace('+', ' '); // Preemptively decode +
name = Uri.decode(name);
String text = query.substring(equ + 1, amp);
text = text.replace('+', ' '); // Preemptively decode +
text = Uri.decode(text);
if (!map.containsKey(name)) {
map.put(name, text);
}
pos = amp + 1;
}
return map;
} | 3.68 |
MagicPlugin_MapController_resetAll | /**
* Resets all internal data.
*
* <p>Can be called prior to save() to permanently delete all map images.
* Can also be called prior to load() to load a fresh config file.
*/
public void resetAll() {
for (URLMap map : keyMap.values()) {
map.reset();
}
} | 3.68 |
hbase_RequestConverter_buildIsSplitOrMergeEnabledRequest | /**
* Creates a protocol buffer IsSplitOrMergeEnabledRequest
* @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return a IsSplitOrMergeEnabledRequest
*/
public static IsSplitOrMergeEnabledRequest
buildIsSplitOrMergeEnabledRequest(MasterSwitchType switchType) {
IsSplitOrMergeEnabledRequest.Builder builder = IsSplitOrMergeEnabledRequest.newBuilder();
builder.setSwitchType(convert(switchType));
return builder.build();
} | 3.68 |
hbase_RequestConverter_buildCompactRegionRequest | /**
* Create a CompactRegionRequest for a given region name
* @param regionName the name of the region to get info
* @param major indicator if it is a major compaction
* @return a CompactRegionRequest
*/
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
byte[] columnFamily) {
CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
builder.setMajor(major);
if (columnFamily != null) {
builder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily));
}
return builder.build();
} | 3.68 |
framework_PointerEvent_getWidth | /**
* Gets the width of the contact geometry of the pointer in CSS pixels.
*
* @return width in CSS pixels
*/
public final int getWidth() {
return getWidth(getNativeEvent());
} | 3.68 |
hbase_AvlUtil_getLast | /**
* Return the last node of the tree.
* @param root the current root of the tree
* @return the last (max) node of the tree
*/
public static <TNode extends AvlNode> TNode getLast(TNode root) {
if (root != null) {
while (root.avlRight != null) {
root = (TNode) root.avlRight;
}
}
return root;
} | 3.68 |
framework_SettingsView_hideOrShowButtons | // Hide or show buttons depending on whether date is modified or not
private void hideOrShowButtons() {
apply.setVisible(date.isModified());
cancel.setVisible(date.isModified());
} | 3.68 |
flink_RecoverableMultiPartUploadImpl_uploadPart | /**
* Adds a part to the uploads without any size limitations.
*
* <p>This method is non-blocking and does not wait for the part upload to complete.
*
* @param file The file with the part data.
* @throws IOException If this method throws an exception, the RecoverableS3MultiPartUpload
* should not be used any more, but recovered instead.
*/
@Override
public void uploadPart(RefCountedFSOutputStream file) throws IOException {
// this is to guarantee that nobody is
// writing to the file we are uploading.
checkState(file.isClosed());
final CompletableFuture<PartETag> future = new CompletableFuture<>();
uploadsInProgress.add(future);
final long partLength = file.getPos();
currentUploadInfo.registerNewPart(partLength);
file.retain(); // keep the file while the async upload still runs
uploadThreadPool.execute(new UploadTask(s3AccessHelper, currentUploadInfo, file, future));
} | 3.68 |
druid_PropertiesUtils_loadNameList | /**
* Pick the name of a JDBC url. Such as xxx.url, xxx is the name.
*/
public static List<String> loadNameList(Properties properties, String propertyPrefix) {
List<String> nameList = new ArrayList<String>();
Set<String> names = new HashSet<String>();
for (String n : properties.stringPropertyNames()) {
if (propertyPrefix != null && !propertyPrefix.isEmpty()
&& !n.startsWith(propertyPrefix)) {
continue;
}
if (n.endsWith(".url")) {
names.add(n.split("\\.url")[0]);
}
}
if (!names.isEmpty()) {
nameList.addAll(names);
}
return nameList;
} | 3.68 |
flink_ElementTriggers_every | /** Creates a new trigger that triggers on receiving of every element. */
public static <W extends Window> EveryElement<W> every() {
return new EveryElement<>();
} | 3.68 |
querydsl_SQLExpressions_covarPop | /**
* CORR returns the coefficient of correlation of a set of number pairs.
*
* @param expr1 first arg
* @param expr2 second arg
* @return corr(expr1, expr2)
*/
public static WindowOver<Double> covarPop(Expression<? extends Number> expr1, Expression<? extends Number> expr2) {
return new WindowOver<Double>(Double.class, SQLOps.COVARPOP, expr1, expr2);
} | 3.68 |
flink_FailedCheckpointStats_getEndToEndDuration | /** Returns the end to end duration until the checkpoint failure. */
@Override
public long getEndToEndDuration() {
return Math.max(0, failureTimestamp - triggerTimestamp);
} | 3.68 |
hadoop_JobTokenSecretManager_computeHash | /**
* Compute the HMAC hash of the message using the key
* @param msg the message to hash
* @param key the key to use
* @return the computed hash
*/
public static byte[] computeHash(byte[] msg, SecretKey key) {
return createPassword(msg, key);
} | 3.68 |
hbase_RegionServerObserver_postReplicateLogEntries | /**
* This will be called after executing replication request to shipping log entries.
* @param ctx the environment to interact with the framework and region server.
* @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage
* by AccessController. Do not use these hooks in custom co-processors.
*/
@Deprecated
default void postReplicateLogEntries(
final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.68 |
framework_FileDownloaderConnector_trigger | /**
* Called when the download should start.
*
* @since 8.4
*/
@Override
protected void trigger() {
final String url = getResourceUrl("dl");
if (url != null && !url.isEmpty()) {
BrowserInfo browser = BrowserInfo.get();
if (browser.isIOS()) {
Window.open(url, "_blank", "");
} else {
if (iframe != null) {
// make sure it is not on dom tree already, might start
// multiple downloads at once
iframe.removeFromParent();
}
iframe = Document.get().createIFrameElement();
Style style = iframe.getStyle();
style.setVisibility(Visibility.HIDDEN);
style.setHeight(0, Unit.PX);
style.setWidth(0, Unit.PX);
iframe.setFrameBorder(0);
iframe.setTabIndex(-1);
iframe.setSrc(url);
RootPanel.getBodyElement().appendChild(iframe);
}
}
} | 3.68 |
flink_StreamExecutionEnvironment_getExecutionEnvironment | /**
* Creates an execution environment that represents the context in which the program is
* currently executed. If the program is invoked standalone, this method returns a local
* execution environment, as returned by {@link #createLocalEnvironment(Configuration)}.
*
* <p>When executed from the command line the given configuration is stacked on top of the
* global configuration which comes from the {@code flink-conf.yaml}, potentially overriding
* duplicated options.
*
* @param configuration The configuration to instantiate the environment with.
* @return The execution environment of the context in which the program is executed.
*/
public static StreamExecutionEnvironment getExecutionEnvironment(Configuration configuration) {
return Utils.resolveFactory(threadLocalContextEnvironmentFactory, contextEnvironmentFactory)
.map(factory -> factory.createExecutionEnvironment(configuration))
.orElseGet(() -> StreamExecutionEnvironment.createLocalEnvironment(configuration));
} | 3.68 |
hudi_HoodieAvroHFileReader_getHFileReader | /**
* Instantiate a new reader for HFile files.
* @return an instance of {@link HFile.Reader}
*/
private HFile.Reader getHFileReader() {
if (content.isPresent()) {
return HoodieHFileUtils.createHFileReader(fs, path, content.get());
}
return HoodieHFileUtils.createHFileReader(fs, path, config, hadoopConf);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.