name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_MasterObserver_preModifyTable
/** * Called prior to modifying a table's properties. Called as part of modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table * @param newDescriptor after modify operation, table will have this descriptor */ default TableDescriptor preModifyTable(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) throws IOException { return newDescriptor; }
3.68
graphhopper_VectorTile_hasStringValue
/** * <pre> * Exactly one of these values must be present in a valid message * </pre> * * <code>optional string string_value = 1;</code> */ public boolean hasStringValue() { return ((bitField0_ & 0x00000001) == 0x00000001); }
3.68
hadoop_HCFSMountTableConfigLoader_load
/** * Loads the mount-table configuration from hadoop compatible file system and * add the configuration items to given configuration. Mount-table * configuration format should be suffixed with version number. * Format: {@literal mount-table.<versionNumber>.xml} * Example: mount-table.1.xml * When user wants to update mount-table, the expectation is to upload new * mount-table configuration file with monotonically increasing integer as * version number. This API loads the highest version number file. We can * also configure single file path directly. * * @param mountTableConfigPath : A directory path where mount-table files * stored or a mount-table file path. We recommend to configure * directory with the mount-table version files. * @param conf : to add the mount table as resource. */ @Override public void load(String mountTableConfigPath, Configuration conf) throws IOException { this.mountTable = new Path(mountTableConfigPath); String scheme = mountTable.toUri().getScheme(); FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme); try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) { RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(mountTable, false); LocatedFileStatus lfs = null; int higherVersion = -1; while (listFiles.hasNext()) { LocatedFileStatus curLfs = listFiles.next(); String cur = curLfs.getPath().getName(); String[] nameParts = cur.split(REGEX_DOT); if (nameParts.length < 2) { logInvalidFileNameFormat(cur); continue; // invalid file name } int curVersion = higherVersion; try { curVersion = Integer.parseInt(nameParts[nameParts.length - 2]); } catch (NumberFormatException nfe) { logInvalidFileNameFormat(cur); continue; } if (curVersion > higherVersion) { higherVersion = curVersion; lfs = curLfs; } } if (lfs == null) { // No valid mount table file found. // TODO: Should we fail? Currently viewfs init will fail if no mount // links anyway. LOGGER.warn("No valid mount-table file exist at: {}. At least one " + "mount-table file should present with the name format: " + "mount-table.<versionNumber>.xml", mountTableConfigPath); return; } // Latest version file. Path latestVersionMountTable = lfs.getPath(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Loading the mount-table {} into configuration.", latestVersionMountTable); } try (FSDataInputStream open = fs.open(latestVersionMountTable)) { Configuration newConf = new Configuration(false); newConf.addResource(open); // This will add configuration props as resource, instead of stream // itself. So, that stream can be closed now. conf.addResource(newConf); } } }
3.68
hbase_NettyRpcServer_createNettyServerRpcConnection
// will be overridden in tests @InterfaceAudience.Private protected NettyServerRpcConnection createNettyServerRpcConnection(Channel channel) { return new NettyServerRpcConnection(NettyRpcServer.this, channel); }
3.68
hadoop_SelectBinding_expandBackslashChars
/** * Perform escaping. * @param src source string. * @return the replaced value */ static String expandBackslashChars(String src) { return src.replace("\\n", "\n") .replace("\\\"", "\"") .replace("\\t", "\t") .replace("\\r", "\r") .replace("\\\"", "\"") // backslash substitution must come last .replace("\\\\", "\\"); }
3.68
hbase_MultiTableInputFormat_getConf
/** * Returns the current configuration. * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @Override public Configuration getConf() { return conf; }
3.68
pulsar_MessageImpl_create
// Constructor for out-going message public static <T> MessageImpl<T> create(MessageMetadata msgMetadata, ByteBuffer payload, Schema<T> schema, String topic) { @SuppressWarnings("unchecked") MessageImpl<T> msg = (MessageImpl<T>) RECYCLER.get(); msg.msgMetadata.clear(); msg.msgMetadata.copyFrom(msgMetadata); msg.messageId = null; msg.topic = topic; msg.cnx = null; msg.payload = Unpooled.wrappedBuffer(payload); msg.properties = null; msg.schema = schema; msg.schemaHash = SchemaHash.of(schema); msg.uncompressedSize = payload.remaining(); return msg; }
3.68
hbase_MasterObserver_preRenameRSGroup
/** * Called before rename rsgroup. * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name */ default void preRenameRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx, final String oldName, final String newName) throws IOException { }
3.68
framework_VaadinPortletService_handleSessionExpired
/* * (non-Javadoc) * * @see * com.vaadin.server.VaadinService#handleSessionExpired(com.vaadin.server * .VaadinRequest, com.vaadin.server.VaadinResponse) */ @Override protected void handleSessionExpired(VaadinRequest request, VaadinResponse response) { // TODO Figure out a better way to deal with // SessionExpiredExceptions getLogger().finest("A user session has expired"); }
3.68
flink_FlinkPreparingTableBase_getRowCount
/** Returns an estimate of the number of rows in the table. */ public double getRowCount() { Double rowCnt = getStatistic().getRowCount(); return rowCnt == null ? DEFAULT_ROWCOUNT : rowCnt; }
3.68
hbase_HFileWriterImpl_getMidpoint
/** * Try to return a Cell that falls between <code>left</code> and <code>right</code> but that is * shorter; i.e. takes up less space. This trick is used building HFile block index. Its an * optimization. It does not always work. In this case we'll just return the <code>right</code> * cell. * @return A cell that sorts between <code>left</code> and <code>right</code>. */ public static Cell getMidpoint(final CellComparator comparator, final Cell left, final Cell right) { if (right == null) { throw new IllegalArgumentException("right cell can not be null"); } if (left == null) { return right; } // If Cells from meta table, don't mess around. meta table Cells have schema // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip // out without trying to do this optimization. if (comparator instanceof MetaCellComparator) { return right; } byte[] midRow; boolean bufferBacked = left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell; if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getRowByteBuffer(), ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), ((ByteBufferExtendedCell) right).getRowByteBuffer(), ((ByteBufferExtendedCell) right).getRowPosition(), right.getRowLength()); } else { midRow = getMinimumMidpointArray(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRow(midRow); } // Rows are same. Compare on families. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), ((ByteBufferExtendedCell) right).getFamilyByteBuffer(), ((ByteBufferExtendedCell) right).getFamilyPosition(), right.getFamilyLength()); } else { midRow = getMinimumMidpointArray(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRowFamily(right, midRow, 0, midRow.length); } // Families are same. Compare on qualifiers. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getQualifierByteBuffer(), ((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferExtendedCell) right).getQualifierByteBuffer(), ((ByteBufferExtendedCell) right).getQualifierPosition(), right.getQualifierLength()); } else { midRow = getMinimumMidpointArray(left.getQualifierArray(), left.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); } if (midRow != null) { return PrivateCellUtil.createFirstOnRowCol(right, midRow, 0, midRow.length); } // No opportunity for optimization. Just return right key. return right; }
3.68
framework_AbstractComponentTest_unselectChildren
/** * Unselect all child menu items * * @param parent */ protected void unselectChildren(MenuItem parent) { List<MenuItem> children = parent.getChildren(); if (children == null) { return; } for (MenuItem child : children) { setSelected(child, false); } }
3.68
hadoop_AbstractS3ACommitter_getCommitOperations
/** * Get the commit actions instance. * Subclasses may provide a mock version of this. * @return the commit actions instance to use for operations. */ protected CommitOperations getCommitOperations() { return commitOperations; }
3.68
flink_LogicalTypeChecks_getScale
/** Returns the scale of all types that define a scale implicitly or explicitly. */ public static int getScale(LogicalType logicalType) { return logicalType.accept(SCALE_EXTRACTOR); }
3.68
flink_JoinOperator_projectTuple6
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5> ProjectJoin<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes); return new ProjectJoin<I1, I2, Tuple6<T0, T1, T2, T3, T4, T5>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hbase_RegexStringComparator_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the comparator that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) { return true; } if (!(other instanceof RegexStringComparator)) { return false; } RegexStringComparator comparator = (RegexStringComparator) other; return super.areSerializedFieldsEqual(comparator) && engine.getClass().isInstance(comparator.getEngine()) && engine.getPattern().equals(comparator.getEngine().getPattern()) && engine.getFlags() == comparator.getEngine().getFlags() && engine.getCharset().equals(comparator.getEngine().getCharset()); }
3.68
hbase_OrderedBytes_putVaruint64
/** * Encode an unsigned 64-bit unsigned integer {@code val} into {@code dst}. * @param dst The destination to which encoded bytes are written. * @param val The value to write. * @param comp Compliment the encoded value when {@code comp} is true. * @return number of bytes written. */ static int putVaruint64(PositionedByteRange dst, long val, boolean comp) { int w, y, len = 0; final int offset = dst.getOffset(), start = dst.getPosition(); byte[] a = dst.getBytes(); Order ord = comp ? DESCENDING : ASCENDING; if (-1 == unsignedCmp(val, 241L)) { dst.put((byte) val); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } if (-1 == unsignedCmp(val, 2288L)) { y = (int) (val - 240); dst.put((byte) (y / 256 + 241)).put((byte) (y % 256)); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } if (-1 == unsignedCmp(val, 67824L)) { y = (int) (val - 2288); dst.put((byte) 249).put((byte) (y / 256)).put((byte) (y % 256)); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } y = (int) val; w = (int) (val >>> 32); if (w == 0) { if (-1 == unsignedCmp(y, 16777216L)) { dst.put((byte) 250).put((byte) (y >>> 16)).put((byte) (y >>> 8)).put((byte) y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } dst.put((byte) 251); putUint32(dst, y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } if (-1 == unsignedCmp(w, 256L)) { dst.put((byte) 252).put((byte) w); putUint32(dst, y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } if (-1 == unsignedCmp(w, 65536L)) { dst.put((byte) 253).put((byte) (w >>> 8)).put((byte) w); putUint32(dst, y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } if (-1 == unsignedCmp(w, 16777216L)) { dst.put((byte) 254).put((byte) (w >>> 16)).put((byte) (w >>> 8)).put((byte) w); putUint32(dst, y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; } dst.put((byte) 255); putUint32(dst, w); putUint32(dst, y); len = dst.getPosition() - start; ord.apply(a, offset + start, len); return len; }
3.68
framework_ColorUtil_getHexPatternColor
/** * Parses {@link Color} from matched hexadecimal {@link Matcher}. * * @param matcher * {@link Matcher} matching hexadecimal pattern with named regex * groups {@code red}, {@code green}, and {@code blue} * @return {@link Color} parsed from {@link Matcher} */ public static Color getHexPatternColor(Matcher matcher) { int red = Integer.parseInt(matcher.group("red"), 16); int green = Integer.parseInt(matcher.group("green"), 16); int blue = Integer.parseInt(matcher.group("blue"), 16); return new Color(red, green, blue); }
3.68
framework_VAbstractCalendarPanel_onFocus
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.FocusHandler#onFocus(com.google.gwt.event * .dom.client.FocusEvent) */ @Override public void onFocus(FocusEvent event) { if (event.getSource() instanceof VAbstractCalendarPanel) { hasFocus = true; // Focuses the current day if the calendar shows the days if (focusedDay != null) { focusDay(focusedDate); } } }
3.68
hadoop_SnappyDecompressor_setDictionary
/** * Does nothing. */ @Override public void setDictionary(byte[] b, int off, int len) { // do nothing }
3.68
flink_TaskStateStats_getLatestAckTimestamp
/** * @return Ack timestamp of the latest acknowledged subtask or <code>-1</code> if none was * acknowledged yet.. */ public long getLatestAckTimestamp() { SubtaskStateStats subtask = latestAckedSubtaskStats; if (subtask != null) { return subtask.getAckTimestamp(); } else { return -1; } }
3.68
framework_NativeSelect_setEmptySelectionAllowed
/** * Sets whether the user is allowed to select nothing in the combo box. When * true, a special empty item is shown to the user. * * @param emptySelectionAllowed * true to allow not selecting anything, false to require * selection * @since 8.0 */ public void setEmptySelectionAllowed(boolean emptySelectionAllowed) { getState().emptySelectionAllowed = emptySelectionAllowed; }
3.68
zxing_ECIStringBuilder_isEmpty
/** * @return true iff nothing has been appended */ public boolean isEmpty() { return currentBytes.length() == 0 && (result == null || result.length() == 0); }
3.68
cron-utils_CronDefinitionBuilder_withSupportedNicknameHourly
/** * Supports cron nickname @hourly * * @return this CronDefinitionBuilder instance */ public CronDefinitionBuilder withSupportedNicknameHourly() { cronNicknames.add(CronNicknames.HOURLY); return this; }
3.68
morf_AddTable_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hbase_ClusterId_parseFrom
/** * Parse the serialized representation of the {@link ClusterId} * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix * @return An instance of {@link ClusterId} made from <code>bytes</code> * @see #toByteArray() */ public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); ClusterIdProtos.ClusterId cid = null; try { ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); cid = builder.build(); } catch (IOException e) { throw new DeserializationException(e); } return convert(cid); } else { // Presume it was written out this way, the old way. return new ClusterId(Bytes.toString(bytes)); } }
3.68
hadoop_SharedKeyCredentials_canonicalizeHttpRequest
/** * Constructs a canonicalized string from the request's headers that will be used to construct the signature string * for signing a Blob or Queue service request under the Shared Key Full authentication scheme. * * @param address the request URI * @param accountName the account name associated with the request * @param method the verb to be used for the HTTP request. * @param contentType the content type of the HTTP request. * @param contentLength the length of the content written to the outputstream in bytes, -1 if unknown * @param date the date/time specification for the HTTP request * @param conn the HttpURLConnection for the operation. * @return A canonicalized string. */ private static String canonicalizeHttpRequest(final URL address, final String accountName, final String method, final String contentType, final long contentLength, final String date, final HttpURLConnection conn) throws UnsupportedEncodingException { // The first element should be the Method of the request. // I.e. GET, POST, PUT, or HEAD. final StringBuilder canonicalizedString = new StringBuilder(EXPECTED_BLOB_QUEUE_CANONICALIZED_STRING_LENGTH); canonicalizedString.append(conn.getRequestMethod()); // The next elements are // If any element is missing it may be empty. appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_ENCODING, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_LANGUAGE, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, contentLength <= 0 ? "" : String.valueOf(contentLength)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.CONTENT_MD5, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, contentType != null ? contentType : AbfsHttpConstants.EMPTY_STRING); final String dateString = getHeaderValue(conn, HttpHeaderConfigurations.X_MS_DATE, AbfsHttpConstants.EMPTY_STRING); // If x-ms-date header exists, Date should be empty string appendCanonicalizedElement(canonicalizedString, dateString.equals(AbfsHttpConstants.EMPTY_STRING) ? date : ""); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_MODIFIED_SINCE, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_MATCH, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_NONE_MATCH, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.IF_UNMODIFIED_SINCE, AbfsHttpConstants.EMPTY_STRING)); appendCanonicalizedElement(canonicalizedString, getHeaderValue(conn, HttpHeaderConfigurations.RANGE, AbfsHttpConstants.EMPTY_STRING)); addCanonicalizedHeaders(conn, canonicalizedString); appendCanonicalizedElement(canonicalizedString, getCanonicalizedResource(address, accountName)); return canonicalizedString.toString(); }
3.68
hbase_MunkresAssignment_updateMin
/** * A specified row has become covered, and a specified column has become uncovered. The least * value per row may need to be updated. * @param row the index of the row which was just covered * @param col the index of the column which was just uncovered */ private void updateMin(int row, int col) { // If the row is covered we want to ignore it as far as least values go. leastInRow[row] = Float.POSITIVE_INFINITY; for (int r = 0; r < rows; r++) { // Since the column has only just been uncovered, it could not have any // pending adjustments. Only covered rows can have pending adjustments // and covered costs do not count toward row minimums. Therefore, we do // not need to consider rowAdjust[r] or colAdjust[col]. if (!rowsCovered[r] && cost[r][col] < leastInRow[r]) { leastInRow[r] = cost[r][col]; leastInRowIndex[r] = col; } } }
3.68
hudi_SimpleBloomFilter_serializeToString
/** * Serialize the bloom filter as a string. */ @Override public String serializeToString() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); try { filter.write(dos); byte[] bytes = baos.toByteArray(); dos.close(); return Base64CodecUtil.encode(bytes); } catch (IOException e) { throw new HoodieIndexException("Could not serialize BloomFilter instance", e); } }
3.68
hadoop_BlockBlobInputStream_size
/** * Gets the current size of the stream. */ public synchronized int size() { return writePosition - offset; }
3.68
hadoop_AbfsClientThrottlingIntercept_sendingRequest
/** * Called before the request is sent. Client-side throttling * uses this to suspend the request, if necessary, to minimize errors and * maximize throughput. */ @Override public void sendingRequest(AbfsRestOperationType operationType, AbfsCounters abfsCounters) { switch (operationType) { case ReadFile: if (readThrottler.suspendIfNecessary() && abfsCounters != null) { abfsCounters.incrementCounter(AbfsStatistic.READ_THROTTLES, 1); } break; case Append: if (writeThrottler.suspendIfNecessary() && abfsCounters != null) { abfsCounters.incrementCounter(AbfsStatistic.WRITE_THROTTLES, 1); } break; default: break; } }
3.68
pulsar_AbstractAwsConnector_defaultCredentialProvider
/** * It creates a default credential provider which takes accessKey and secretKey form configuration and creates. * {@link AWSCredentials} * * @param awsCredentialPluginParam * @return */ public AwsCredentialProviderPlugin defaultCredentialProvider(String awsCredentialPluginParam) { Map<String, String> credentialMap = new Gson().fromJson(awsCredentialPluginParam, new TypeToken<Map<String, String>>() { }.getType()); String accessKey = credentialMap.get(ACCESS_KEY_NAME); String secretKey = credentialMap.get(SECRET_KEY_NAME); if (!(StringUtils.isNotBlank(accessKey) && StringUtils.isNotBlank(secretKey))) { throw new IllegalArgumentException(String.format("Default %s and %s must be present into json-map " + "if AwsCredentialProviderPlugin not provided", ACCESS_KEY_NAME, SECRET_KEY_NAME)); } return new AwsCredentialProviderPlugin() { @Override public void init(String param) { // noop } @Override public AWSCredentialsProvider getCredentialProvider() { return defaultCredentialProvider(accessKey, secretKey); } @Override public void close() throws IOException { } }; }
3.68
flink_TableFactoryUtil_findCatalogModificationListenerList
/** Find and create modification listener list from configuration. */ public static List<CatalogModificationListener> findCatalogModificationListenerList( final ReadableConfig configuration, final ClassLoader classLoader) { return configuration.getOptional(TableConfigOptions.TABLE_CATALOG_MODIFICATION_LISTENERS) .orElse(Collections.emptyList()).stream() .map( identifier -> FactoryUtil.discoverFactory( classLoader, CatalogModificationListenerFactory.class, identifier) .createListener( new CatalogModificationListenerFactory.Context() { @Override public ReadableConfig getConfiguration() { return configuration; } @Override public ClassLoader getUserClassLoader() { return classLoader; } })) .collect(Collectors.toList()); }
3.68
graphhopper_ResponsePath_getWaypoints
/** * This method returns the input points snapped to the road network. */ public PointList getWaypoints() { check("getWaypoints"); return waypointList; }
3.68
hbase_Bytes_putBytes
/** * Put bytes at the specified byte array position. * @param tgtBytes the byte array * @param tgtOffset position in the array * @param srcBytes array to write out * @param srcOffset source offset * @param srcLength source length * @return incremented offset */ public static int putBytes(byte[] tgtBytes, int tgtOffset, byte[] srcBytes, int srcOffset, int srcLength) { System.arraycopy(srcBytes, srcOffset, tgtBytes, tgtOffset, srcLength); return tgtOffset + srcLength; }
3.68
dubbo_SerializableClassRegistry_getRegisteredClasses
/** * get registered classes * * @return class serializer * */ public static Map<Class<?>, Object> getRegisteredClasses() { return REGISTRATIONS; }
3.68
hadoop_ECChunk_toBytesArray
/** * Convert to a bytes array, just for test usage. * @return bytes array */ public byte[] toBytesArray() { byte[] bytesArr = new byte[chunkBuffer.remaining()]; // Avoid affecting the original one chunkBuffer.mark(); chunkBuffer.get(bytesArr); chunkBuffer.reset(); return bytesArr; }
3.68
framework_AdjacentElementsWithTooltips_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Moving between adjacent elements with tooltips should open quickOpenDelay"; }
3.68
hadoop_OBSLoginHelper_checkPath
/** * Check the path, ignoring authentication details. See {@link * OBSFileSystem#checkPath(Path)} for the operation of this. * * <p>Essentially * * <ol> * <li>The URI is canonicalized. * <li>If the schemas match, the hosts are compared. * <li>If there is a mismatch between null/non-null host, * the default FS values are used to patch in the host. * </ol> * <p> * That all originates in the core FS; the sole change here being to use * {@link URI#getHost()}over {@link URI#getAuthority()}. Some of that code * looks a relic of the code anti-pattern of using "hdfs:file.txt" to define * the path without declaring the hostname. It's retained for compatibility. * * @param conf FS configuration * @param fsUri the FS URI * @param path path to check * @param defaultPort default port of FS */ public static void checkPath(final Configuration conf, final URI fsUri, final Path path, final int defaultPort) { URI pathUri = path.toUri(); String thatScheme = pathUri.getScheme(); if (thatScheme == null) { // fs is relative return; } URI thisUri = canonicalizeUri(fsUri, defaultPort); String thisScheme = thisUri.getScheme(); // hostname and scheme are not case sensitive in these checks if (equalsIgnoreCase(thisScheme, thatScheme)) { // schemes match String thisHost = thisUri.getHost(); String thatHost = pathUri.getHost(); if (thatHost == null && // path's host is null thisHost != null) { // fs has a host URI defaultUri = FileSystem.getDefaultUri(conf); if (equalsIgnoreCase(thisScheme, defaultUri.getScheme())) { pathUri = defaultUri; // schemes match, so use this uri instead } else { pathUri = null; // can't determine auth of the path } } if (pathUri != null) { // canonicalize uri before comparing with this fs pathUri = canonicalizeUri(pathUri, defaultPort); thatHost = pathUri.getHost(); if (equalsIgnoreCase(thisHost, thatHost)) { return; } } } // make sure the exception strips out any auth details throw new IllegalArgumentException( "Wrong FS " + OBSLoginHelper.toString(pathUri) + " -expected " + fsUri); }
3.68
hudi_OptionsResolver_allowCommitOnEmptyBatch
/** * Returns whether to commit even when current batch has no data, for flink defaults false */ public static boolean allowCommitOnEmptyBatch(Configuration conf) { return conf.getBoolean(HoodieWriteConfig.ALLOW_EMPTY_COMMIT.key(), false); }
3.68
querydsl_AbstractJDOQuery_setMaxFetchDepth
/** * Set the maximum fetch depth when fetching. * A value of 0 has no meaning and will throw a {@link JDOUserException}. * A value of -1 means that no limit is placed on fetching. * A positive integer will result in that number of references from the * initial object to be fetched. * * @param depth fetch depth * @return the current object */ @Override public Q setMaxFetchDepth(int depth) { maxFetchDepth = depth; return queryMixin.getSelf(); }
3.68
hbase_SampleUploader_configureJob
/** * Job configuration. */ public static Job configureJob(Configuration conf, String[] args) throws IOException { Path inputPath = new Path(args[0]); String tableName = args[1]; Job job = new Job(conf, NAME + "_" + tableName); job.setJarByClass(Uploader.class); FileInputFormat.setInputPaths(job, inputPath); job.setInputFormatClass(SequenceFileInputFormat.class); job.setMapperClass(Uploader.class); // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. TableMapReduceUtil.initTableReducerJob(tableName, null, job); job.setNumReduceTasks(0); return job; }
3.68
hbase_ServerRpcController_setFailedOn
/** * Sets an exception to be communicated back to the * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} client. * @param ioe the exception encountered during execution of the service method */ public void setFailedOn(IOException ioe) { serviceException = ioe; setFailed(StringUtils.stringifyException(ioe)); }
3.68
framework_VTabsheet_getCurrentlyDisplayedWidget
/** * Returns the currently displayed widget in the tab panel. * * @since 7.2 * @return currently displayed content widget */ public Widget getCurrentlyDisplayedWidget() { return tabPanel.getWidget(tabPanel.getVisibleWidget()); }
3.68
hbase_ZKListener_nodeCreated
/** * Called when a new node has been created. * @param path full path of the new node */ public void nodeCreated(String path) { // no-op }
3.68
pulsar_CliCommand_getOneArgument
/** * * @param params * List of positional arguments * @param pos * Positional arguments start with index as 1 * @param maxArguments * Validate against max arguments * @return */ static String getOneArgument(List<String> params, int pos, int maxArguments) { if (params.size() != maxArguments) { throw new ParameterException(String.format("Need to provide %s parameters", maxArguments)); } return params.get(pos); }
3.68
hudi_HoodieRowCreateHandle_getWriteToken
// TODO extract to utils private static String getWriteToken(int taskPartitionId, long taskId, long taskEpochId) { return taskPartitionId + "-" + taskId + "-" + taskEpochId; }
3.68
hadoop_MawoConfiguration_getRpcHostName
/** * Get RPC Host map. * @return value of rpc.server.hostname */ public String getRpcHostName() { return configsMap.get(RPC_SERVER_HOSTNAME); }
3.68
mutate-test-kata_Company_everybodyGetsRaiseBy
/** * Increase every employee's salary by the specified fraction * @param incrementAsFraction salary increase as a fraction of the original salary. e.g. if the value of the * parameter is 0.1, everyone at the company gets a 10% raise */ public void everybodyGetsRaiseBy(double incrementAsFraction) { this.employees.forEach(e -> e.setSalary(e.getSalary() * incrementAsFraction)); }
3.68
pulsar_Schema_JSON
/** * Create a JSON schema type with schema definition. * * @param schemaDefinition the definition of the schema * @return a Schema instance */ static <T> Schema<T> JSON(SchemaDefinition schemaDefinition) { return DefaultImplementation.getDefaultImplementation().newJSONSchema(schemaDefinition); }
3.68
flink_HadoopDataOutputStream_getHadoopOutputStream
/** * Gets the wrapped Hadoop output stream. * * @return The wrapped Hadoop output stream. */ public org.apache.hadoop.fs.FSDataOutputStream getHadoopOutputStream() { return fdos; }
3.68
hudi_NonThrownExecutor_execute
/** * Run the action in a loop. */ public void execute( final ThrowingRunnable<Throwable> action, final ExceptionHook hook, final String actionName, final Object... actionParams) { executor.execute(wrapAction(action, hook, actionName, actionParams)); }
3.68
pulsar_ManagedLedgerImpl_calculateLastEntryInLedgerList
/** * @return null if all ledgers is empty. */ private PositionImpl calculateLastEntryInLedgerList(List<LedgerInfo> ledgersToDelete) { for (int i = ledgersToDelete.size() - 1; i >= 0; i--) { LedgerInfo ledgerInfo = ledgersToDelete.get(i); if (ledgerInfo != null && ledgerInfo.hasEntries() && ledgerInfo.getEntries() > 0) { return PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1); } } return null; }
3.68
hbase_WALSplitter_checkForErrors
/** * Check for errors in the writer threads. If any is found, rethrow it. */ void checkForErrors() throws IOException { Throwable thrown = this.thrown.get(); if (thrown == null) { return; } if (thrown instanceof IOException) { throw new IOException(thrown); } else { throw new RuntimeException(thrown); } }
3.68
hbase_MetricsHBaseServerSourceFactory_createContextName
/** * From the name of the class that's starting up create the context that an IPC source should * register itself. * @param serverName The name of the class that's starting up. * @return The Camel Cased context name. */ protected static String createContextName(String serverName) { if (serverName.startsWith("HMaster") || serverName.startsWith("master")) { return "Master"; } else if (serverName.startsWith("HRegion") || serverName.startsWith("regionserver")) { return "RegionServer"; } return "IPC"; }
3.68
morf_AbstractSqlDialectTest_testCastToInteger
/** * Tests the output of a cast to an integer. */ @Test public void testCastToInteger() { String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.INTEGER, 10)); assertEquals(expectedIntegerCast(), result); }
3.68
flink_HiveTableMetaStoreFactory_listDataFileRecursively
/** List data files recursively. */ private List<FileStatus> listDataFileRecursively(FileSystem fileSystem, Path f) throws IOException { List<FileStatus> fileStatusList = new ArrayList<>(); for (FileStatus fileStatus : fileSystem.listStatus(f)) { if (fileStatus.isDir() && !isStagingDir(fileStatus.getPath())) { fileStatusList.addAll( listDataFileRecursively(fileSystem, fileStatus.getPath())); } else { if (isDataFile(fileStatus)) { fileStatusList.add(fileStatus); } } } return fileStatusList; }
3.68
hbase_TableListModel_getTables
/** Returns the tables */ @XmlElementRef(name = "table") public List<TableModel> getTables() { return tables; }
3.68
hadoop_EagerKeyGeneratorKeyProviderCryptoExtension_rollNewVersion
/** * Roll a new version of the given key generating the material for it. * <p> * Due to the caching on the ValueQueue, even after a rollNewVersion call, * {@link #generateEncryptedKey(String)} may still return an old key - even * when we drain the queue here, the async thread may later fill in old keys. * This is acceptable since old version keys are still able to decrypt, and * client shall make no assumptions that it will get a new versioned key * after rollNewVersion. */ @Override public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException, IOException { KeyVersion keyVersion = super.rollNewVersion(name); getExtension().drain(name); return keyVersion; }
3.68
flink_SocketClientSink_open
/** * Initialize the connection with the Socket in the server. * * @param openContext the context. */ @Override public void open(OpenContext openContext) throws Exception { try { synchronized (lock) { createConnection(); } } catch (IOException e) { throw new IOException("Cannot connect to socket server at " + hostName + ":" + port, e); } }
3.68
framework_AbstractSelectionModel_getGrid
/** * Returns the grid this selection model is attached to using, or throws * {@link IllegalStateException} if not attached to any selection model. * * @return the grid this selection model is attached to * @throws IllegalStateException * if this selection mode is not attached to any grid */ protected Grid<T> getGrid() throws IllegalStateException { Grid<T> parent = getParent(); if (parent == null) { throw new IllegalStateException( "This selection model is no currently attached to any grid."); } return parent; }
3.68
hbase_MonitoredTaskImpl_getStatusJournal
/** * Returns the status journal. This implementation of status journal is not thread-safe. Currently * we use this to track various stages of flushes and compactions where we can use this/pretty * print for post task analysis, by which time we are already done changing states (writing to * journal) */ @Override public List<StatusJournalEntry> getStatusJournal() { if (journal == null) { return Collections.emptyList(); } else { return ImmutableList.copyOf(journal); } }
3.68
framework_Notification_addCloseListener
/** * Adds a CloseListener to the Notification. * * @param listener * the CloseListener to add, not {@code null} * @since 8.2 */ public Registration addCloseListener(CloseListener listener) { return addListener(CloseEvent.class, listener, CLOSE_METHOD); }
3.68
flink_BinaryStringDataUtil_reverse
/** * Reverse each character in current string. * * @return a new string which character order is reverse to current string. */ public static BinaryStringData reverse(BinaryStringData str) { str.ensureMaterialized(); if (str.inFirstSegment()) { byte[] result = new byte[str.getSizeInBytes()]; // position in byte int byteIdx = 0; while (byteIdx < str.getSizeInBytes()) { int charBytes = numBytesForFirstByte(str.getByteOneSegment(byteIdx)); str.getSegments()[0].get( str.getOffset() + byteIdx, result, result.length - byteIdx - charBytes, charBytes); byteIdx += charBytes; } return BinaryStringData.fromBytes(result); } else { return reverseMultiSegs(str); } }
3.68
framework_AbstractLegacyComponent_getExplicitImmediateValue
/** * Returns the explicitly set immediate value. * * @return the explicitly set immediate value or null if * {@link #setImmediate(boolean)} has not been explicitly invoked */ protected Boolean getExplicitImmediateValue() { return explicitImmediateValue; }
3.68
flink_AsyncSnapshotCallable_logAsyncSnapshotComplete
/** * This method is invoked after completion of the snapshot and can be overridden to output a * logging about the duration of the async part. */ protected void logAsyncSnapshotComplete(long startTime) {}
3.68
hbase_JVM_getSystemLoadAverage
/** * Get the system load average * @see java.lang.management.OperatingSystemMXBean#getSystemLoadAverage */ public double getSystemLoadAverage() { return osMbean.getSystemLoadAverage(); }
3.68
hadoop_StageConfig_getDestinationDir
/** * Destination of job. */ public Path getDestinationDir() { return destinationDir; }
3.68
hadoop_LightWeightLinkedSet_pollN
/** * Remove and return n elements from the hashtable. * The order in which entries are removed is corresponds * to the order in which they were inserted. * * @return first element */ @Override public List<T> pollN(int n) { if (n >= size) { // if we need to remove all elements then do fast polling return pollAll(); } List<T> retList = new ArrayList<T>(n); while (n-- > 0 && head != null) { T curr = head.element; this.removeElem(curr); retList.add(curr); } shrinkIfNecessary(); return retList; }
3.68
morf_DataMaskingXmlDataSetConsumer_getValue
/** * If the provided column should be masked then return null, otherwise return the value. */ @Override protected String getValue(Record record, Column column, String table) { if (tableColumnsToMask.containsKey(table) && tableColumnsToMask.get(table).contains(column.getName())) { return null; } return super.getValue(record, column, table); }
3.68
querydsl_StringExpressions_rpad
/** * Create a {@code rpad(in, length, c)} expression * * <p>Returns in right-padded to length characters with c</p> * * @param in string to be padded * @param length target length * @param c padding char * @return rpad(in, length, c) */ public static StringExpression rpad(Expression<String> in, int length, char c) { return Expressions.stringOperation(Ops.StringOps.RPAD2, in, ConstantImpl.create(length), ConstantImpl.create(c)); }
3.68
hudi_BaseJavaCommitActionExecutor_getInsertPartitioner
/** * Provides a partitioner to perform the insert operation, based on the workload profile. */ public Partitioner getInsertPartitioner(WorkloadProfile profile) { return getUpsertPartitioner(profile); }
3.68
hbase_MasterObserver_preListRSGroups
/** * Called before listing region server group information. * @param ctx the environment to interact with the framework and master */ default void preListRSGroups(final ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
hbase_MultiTableHFileOutputFormat_createCompositeKey
/** * Alternate api which accepts a String for the tableName and ImmutableBytesWritable for the * suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); }
3.68
hbase_FixedLengthWrapper_getLength
/** * Retrieve the maximum length (in bytes) of encoded values. */ public int getLength() { return length; }
3.68
hadoop_RouterHeartbeatService_updateStateAsync
/** * Trigger the update of the Router state asynchronously. */ protected void updateStateAsync() { Thread thread = new Thread(this::updateStateStore, "Router Heartbeat Async"); thread.setDaemon(true); thread.start(); }
3.68
hbase_HRegion_lockRowsAndBuildMiniBatch
/** * Creates Mini-batch of all operations [nextIndexToProcess, lastIndexExclusive) for which a row * lock can be acquired. All mutations with locked rows are considered to be In-progress * operations and hence the name {@link MiniBatchOperationInProgress}. Mini batch is window over * {@link BatchOperation} and contains contiguous pending operations. * @param acquiredRowLocks keeps track of rowLocks acquired. */ public MiniBatchOperationInProgress<Mutation> lockRowsAndBuildMiniBatch(List<RowLock> acquiredRowLocks) throws IOException { int readyToWriteCount = 0; int lastIndexExclusive = 0; RowLock prevRowLock = null; for (; lastIndexExclusive < size(); lastIndexExclusive++) { // It reaches the miniBatchSize, stop here and process the miniBatch // This only applies to non-atomic batch operations. if (!isAtomic() && (readyToWriteCount == region.miniBatchSize)) { break; } if (!isOperationPending(lastIndexExclusive)) { continue; } // HBASE-19389 Limit concurrency of put with dense (hundreds) columns to avoid exhausting // RS handlers, covering both MutationBatchOperation and ReplayBatchOperation // The BAD_FAMILY/SANITY_CHECK_FAILURE cases are handled in checkAndPrepare phase and won't // pass the isOperationPending check Map<byte[], List<Cell>> curFamilyCellMap = getMutation(lastIndexExclusive).getFamilyCellMap(); try { // start the protector before acquiring row lock considering performance, and will finish // it when encountering exception region.storeHotnessProtector.start(curFamilyCellMap); } catch (RegionTooBusyException rtbe) { region.storeHotnessProtector.finish(curFamilyCellMap); if (isAtomic()) { throw rtbe; } retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.STORE_TOO_BUSY, rtbe.getMessage()); continue; } Mutation mutation = getMutation(lastIndexExclusive); // If we haven't got any rows in our batch, we should block to get the next one. RowLock rowLock = null; boolean throwException = false; try { // if atomic then get exclusive lock, else shared lock rowLock = region.getRowLock(mutation.getRow(), !isAtomic(), prevRowLock); } catch (TimeoutIOException | InterruptedIOException e) { // NOTE: We will retry when other exceptions, but we should stop if we receive // TimeoutIOException or InterruptedIOException as operation has timed out or // interrupted respectively. throwException = true; throw e; } catch (IOException ioe) { LOG.warn("Failed getting lock, row={}, in region {}", Bytes.toStringBinary(mutation.getRow()), this, ioe); if (isAtomic()) { // fail, atomic means all or none throwException = true; throw ioe; } } catch (Throwable throwable) { throwException = true; throw throwable; } finally { if (throwException) { region.storeHotnessProtector.finish(curFamilyCellMap); } } if (rowLock == null) { // We failed to grab another lock if (isAtomic()) { region.storeHotnessProtector.finish(curFamilyCellMap); throw new IOException("Can't apply all operations atomically!"); } break; // Stop acquiring more rows for this batch } else { if (rowLock != prevRowLock) { // It is a different row now, add this to the acquiredRowLocks and // set prevRowLock to the new returned rowLock acquiredRowLocks.add(rowLock); prevRowLock = rowLock; } } readyToWriteCount++; } return createMiniBatch(lastIndexExclusive, readyToWriteCount); }
3.68
hadoop_EncryptionSecrets_hasEncryptionAlgorithm
/** * Does this instance have encryption options? * That is: is the algorithm non-null. * @return true if there's an encryption algorithm. */ public boolean hasEncryptionAlgorithm() { return StringUtils.isNotEmpty(encryptionAlgorithm); }
3.68
hbase_Hash_parseHashType
/** * This utility method converts String representation of hash function name to a symbolic * constant. Currently three function types are supported, "jenkins", "murmur" and "murmur3". * @param name hash function name * @return one of the predefined constants */ public static int parseHashType(String name) { if ("jenkins".equalsIgnoreCase(name)) { return JENKINS_HASH; } else if ("murmur".equalsIgnoreCase(name)) { return MURMUR_HASH; } else if ("murmur3".equalsIgnoreCase(name)) { return MURMUR_HASH3; } else { return INVALID_HASH; } }
3.68
framework_MultiSelectionEvent_getAllSelectedItems
/** * {@inheritDoc} * <p> * This is the same as {@link #getValue()}. */ @Override public Set<T> getAllSelectedItems() { return getValue(); }
3.68
hadoop_OBSCommonUtils_closeAll
/** * Close the Closeable objects and <b>ignore</b> any Exception or null * pointers. (This is the SLF4J equivalent of that in {@code IOUtils}). * * @param closeables the objects to close */ static void closeAll(final java.io.Closeable... closeables) { for (java.io.Closeable c : closeables) { if (c != null) { try { if (LOG != null) { LOG.debug("Closing {}", c); } c.close(); } catch (Exception e) { if (LOG != null && LOG.isDebugEnabled()) { LOG.debug("Exception in closing {}", c, e); } } } } }
3.68
flink_Execution_processFail
/** * Process a execution failure. The failure can be fired by JobManager or reported by * TaskManager. If it is fired by JobManager and the execution is already deployed, it needs to * send a PRC call to remove the task from TaskManager. It also needs to release the produced * partitions if it fails before deployed (because the partitions are possibly already created * in external shuffle service) or JobManager proactively fails it (in case that it finishes in * TaskManager when JobManager tries to fail it). The failure will be notified to SchedulerNG if * it is from within the ExecutionGraph. This is to trigger the failure handling of SchedulerNG * to recover this failed execution. * * @param t Failure cause * @param cancelTask Indicating whether to send a PRC call to remove task from TaskManager. True * if the failure is fired by JobManager and the execution is already deployed. Otherwise it * should be false. * @param userAccumulators User accumulators * @param metrics IO metrics * @param releasePartitions Indicating whether to release result partitions produced by this * execution. False if the task is FAILED in TaskManager, otherwise true. * @param fromSchedulerNg Indicating whether the failure is from the SchedulerNg. It should be * false if it is from within the ExecutionGraph. */ private void processFail( Throwable t, boolean cancelTask, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics, boolean releasePartitions, boolean fromSchedulerNg) { assertRunningInJobMasterMainThread(); ExecutionState current = this.state; if (current == FAILED) { // already failed. It is enough to remember once that we failed (its sad enough) return; } if (current == CANCELED || current == FINISHED) { // we are already aborting or are already aborted or we are already finished if (LOG.isDebugEnabled()) { LOG.debug( "Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current); } return; } if (current == CANCELING) { completeCancelling(userAccumulators, metrics, true); return; } if (!fromSchedulerNg) { vertex.getExecutionGraphAccessor() .notifySchedulerNgAboutInternalTaskFailure( attemptId, t, cancelTask, releasePartitions); return; } checkState(transitionState(current, FAILED, t)); // success (in a manner of speaking) this.failureCause = Optional.of( ErrorInfo.createErrorInfoWithNullableCause(t, getStateTimestamp(FAILED))); updateAccumulatorsAndMetrics(userAccumulators, metrics); releaseAssignedResource(t); vertex.getExecutionGraphAccessor().deregisterExecution(this); maybeReleasePartitionsAndSendCancelRpcCall(current, cancelTask, releasePartitions); }
3.68
framework_AbsoluteLayoutRelativeSizeContent_createHalfTableOnFull
/** * Creates an {@link AbsoluteLayout} of full size that contains a half-sized * {@link Table}. * * @return the created layout */ private Component createHalfTableOnFull() { AbsoluteLayout absoluteLayout = new AbsoluteLayout(); absoluteLayout.setSizeFull(); absoluteLayout.setId("halfinfull-layout"); absoluteLayout.setCaption("half-sized table expected"); Table table = new Table(); table.setWidth(50, Unit.PERCENTAGE); table.setHeight(50, Unit.PERCENTAGE); table.setId("halfinfull-table"); absoluteLayout.addComponent(table); return absoluteLayout; }
3.68
framework_GridLayout_readDesign
/** * {@inheritDoc} * <p> * After reading the design, cursorY is set to point to a row outside of the * GridLayout area. CursorX is reset to 0. */ @Override public void readDesign(Element design, DesignContext designContext) { super.readDesign(design, designContext); setMargin(readMargin(design, getMargin(), designContext)); if (design.childNodeSize() > 0) { // Touch content only if there is some content specified. This is // needed to be able to use extended GridLayouts which add // components in the constructor (e.g. Designs based on GridLayout). readChildComponents(design.children(), designContext); } // Set cursor position explicitly setCursorY(getRows()); setCursorX(0); }
3.68
AreaShop_AreaShop_setupTasks
/** * Register all required tasks. */ private void setupTasks() { // Rent expiration timer long expirationCheck = Utils.millisToTicks(Utils.getDurationFromSecondsOrString("expiration.delay")); final AreaShop finalPlugin = this; if(expirationCheck > 0) { Do.syncTimer(expirationCheck, () -> { if(isReady()) { finalPlugin.getFileManager().checkRents(); AreaShop.debugTask("Checking rent expirations..."); } else { AreaShop.debugTask("Skipped checking rent expirations, plugin not ready"); } }); } // Inactive unrenting/selling timer long inactiveCheck = Utils.millisToTicks(Utils.getDurationFromMinutesOrString("inactive.delay")); if(inactiveCheck > 0) { Do.syncTimer(inactiveCheck, () -> { if(isReady()) { finalPlugin.getFileManager().checkForInactiveRegions(); AreaShop.debugTask("Checking for regions with players that are inactive too long..."); } else { AreaShop.debugTask("Skipped checking for regions of inactive players, plugin not ready"); } }); } // Periodic updating of signs for timeleft tags long periodicUpdate = Utils.millisToTicks(Utils.getDurationFromSecondsOrString("signs.delay")); if(periodicUpdate > 0) { Do.syncTimer(periodicUpdate, () -> { if(isReady()) { finalPlugin.getFileManager().performPeriodicSignUpdate(); AreaShop.debugTask("Performing periodic sign update..."); } else { AreaShop.debugTask("Skipped performing periodic sign update, plugin not ready"); } }); } // Saving regions and group settings long saveFiles = Utils.millisToTicks(Utils.getDurationFromMinutesOrString("saving.delay")); if(saveFiles > 0) { Do.syncTimer(saveFiles, () -> { if(isReady()) { finalPlugin.getFileManager().saveRequiredFiles(); AreaShop.debugTask("Saving required files..."); } else { AreaShop.debugTask("Skipped saving required files, plugin not ready"); } }); } // Sending warnings about rent regions to online players long expireWarning = Utils.millisToTicks(Utils.getDurationFromMinutesOrString("expireWarning.delay")); if(expireWarning > 0) { Do.syncTimer(expireWarning, () -> { if(isReady()) { finalPlugin.getFileManager().sendRentExpireWarnings(); AreaShop.debugTask("Sending rent expire warnings..."); } else { AreaShop.debugTask("Skipped sending rent expire warnings, plugin not ready"); } }); } // Update all regions on startup if(getConfig().getBoolean("updateRegionsOnStartup")) { Do.syncLater(20, () -> { finalPlugin.getFileManager().updateAllRegions(); AreaShop.debugTask("Updating all regions at startup..."); }); } }
3.68
querydsl_DateExpression_dayOfWeek
/** * Create a day of week expression (range 1-7 / SUN-SAT) * <p>NOT supported in JDOQL and not in Derby</p> * * @return day of week */ public NumberExpression<Integer> dayOfWeek() { if (dayOfWeek == null) { dayOfWeek = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_WEEK, mixin); } return dayOfWeek; }
3.68
rocketmq-connect_LocalStateManagementServiceImpl_start
/** * Start dependent services (if needed) */ @Override public void start() { connectorStatusStore.load(); taskStatusStore.load(); dataSynchronizer.start(); startSignal(); }
3.68
framework_VLoadingIndicator_setThirdDelay
/** * Sets the delay (in ms) which must pass before the loading indicator moves * to its "third" state. * * @param thirdDelay * The delay (in ms) from the event until changing the loading * indicator into its "third" state. Counted from when * {@link #trigger()} is called. */ public void setThirdDelay(int thirdDelay) { this.thirdDelay = thirdDelay; }
3.68
pulsar_TxnLogBufferedWriter_close
/** * Release resources and cancel pending tasks. */ public CompletableFuture<Void> close() { // If batch feature is disabled, there is nothing to close, so set the stat only. if (!batchEnabled) { STATE_UPDATER.compareAndSet(this, State.OPEN, State.CLOSED); return CompletableFuture.completedFuture(null); } // If other thread already called "close()", so do nothing. if (!STATE_UPDATER.compareAndSet(this, State.OPEN, State.CLOSING)){ return CompletableFuture.completedFuture(null); } CompletableFuture closeFuture = new CompletableFuture(); // Cancel pending tasks and release resources. FutureUtil.safeRunAsync(() -> { // If some requests are flushed, BK will trigger these callbacks, and the remaining requests in should // fail. failureCallbackByContextAndRecycle(flushContext, new ManagedLedgerException.ManagedLedgerFencedException( new Exception("Transaction log buffered write has closed") )); // Cancel the timing task. if (!timeout.isCancelled()) { this.timeout.cancel(); } STATE_UPDATER.set(this, State.CLOSED); closeFuture.complete(null); }, singleThreadExecutorForWrite, closeFuture); return closeFuture; }
3.68
hbase_HFileLink_createHFileLinkName
/** * Create a new HFileLink name * @param tableName - Linked HFile table name * @param regionName - Linked HFile region name * @param hfileName - Linked HFile name * @return file name of the HFile Link */ public static String createHFileLinkName(final TableName tableName, final String regionName, final String hfileName) { String s = String.format("%s=%s-%s", tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), regionName, hfileName); return s; }
3.68
Activiti_Identity_primTransform
/** * {@inheritDoc} */ @Override protected Object primTransform(Object anObject) throws Exception { return anObject; }
3.68
hbase_ExportSnapshot_getSnapshotFiles
/** * Extract the list of files (HFiles/WALs) to copy using Map-Reduce. * @return list of files referenced by the snapshot (pair of path and size) */ private static List<Pair<SnapshotFileInfo, Long>> getSnapshotFiles(final Configuration conf, final FileSystem fs, final Path snapshotDir) throws IOException { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<>(); final TableName table = TableName.valueOf(snapshotDesc.getTable()); // Get snapshot files LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list"); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { Pair<SnapshotFileInfo, Long> snapshotFileAndSize = null; if (!storeFile.hasReference()) { String region = regionInfo.getEncodedName(); String hfile = storeFile.getName(); snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, region, family, hfile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1); } else { Pair<String, String> referredToRegionAndFile = StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); String referencedRegion = referredToRegionAndFile.getFirst(); String referencedHFile = referredToRegionAndFile.getSecond(); snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family, referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1); } files.add(snapshotFileAndSize); } }); return files; }
3.68
hbase_OrderedNumeric_encodeDouble
/** * Write instance {@code val} into buffer {@code dst}. * @param dst the {@link PositionedByteRange} to write to * @param val the value to write to {@code dst} * @return the number of bytes written */ public int encodeDouble(PositionedByteRange dst, double val) { return OrderedBytes.encodeNumeric(dst, val, order); }
3.68
flink_FileSink_build
/** Creates the actual sink. */ public FileSink<IN> build() { return new FileSink<>(this); }
3.68
flink_ConnectionUtils_hasCommonPrefix
/** * Checks if two addresses have a common prefix (first 2 bytes). Example: 192.168.???.??? Works * also with ipv6, but accepts probably too many addresses */ private static boolean hasCommonPrefix(byte[] address, byte[] address2) { return address[0] == address2[0] && address[1] == address2[1]; }
3.68
hbase_ReplicationQueueInfo_extractDeadServersFromZNodeString
/** * Parse dead server names from queue id. servername can contain "-" such as * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-&lt;server name>-... */ private static void extractDeadServersFromZNodeString(String deadServerListStr, List<ServerName> result) { if (deadServerListStr == null || result == null || deadServerListStr.isEmpty()) { return; } // valid server name delimiter "-" has to be after "," in a server name int seenCommaCnt = 0; int startIndex = 0; int len = deadServerListStr.length(); for (int i = 0; i < len; i++) { switch (deadServerListStr.charAt(i)) { case ',': seenCommaCnt += 1; break; case '-': if (seenCommaCnt >= 2) { if (i > startIndex) { String serverName = deadServerListStr.substring(startIndex, i); if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name:" + serverName); } startIndex = i + 1; } seenCommaCnt = 0; } break; default: break; } } // add tail if (startIndex < len - 1) { String serverName = deadServerListStr.substring(startIndex, len); if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name at the end:" + serverName); } } LOG.debug("Found dead servers:" + result); }
3.68
hbase_RpcServer_getMetrics
/** * Returns the metrics instance for reporting RPC call statistics */ @Override public MetricsHBaseServer getMetrics() { return metrics; }
3.68
flink_ClassDataTypeConverter_extractDataType
/** * Returns the clearly identifiable data type if possible. For example, {@link Long} can be * expressed as {@link DataTypes#BIGINT()}. However, for example, {@link Row} cannot be * extracted as information about the fields is missing. Or {@link BigDecimal} needs to be * mapped from a variable precision/scale to constant ones. */ @SuppressWarnings("unchecked") public static Optional<DataType> extractDataType(Class<?> clazz) { // prefer BYTES over ARRAY<TINYINT> for byte[] if (clazz == byte[].class) { return Optional.of(DataTypes.BYTES()); } if (clazz.isArray()) { return extractDataType(clazz.getComponentType()).map(DataTypes::ARRAY); } if (TableSymbol.class.isAssignableFrom(clazz)) { return Optional.of(new AtomicDataType(new SymbolType<>(), clazz)); } return Optional.ofNullable(defaultDataTypes.get(clazz.getName())); }
3.68
framework_VComboBox_resetCurrentSuggestionBasedOnServerResponse
/* * Updates the current suggestion based on values provided by the * server. */ private void resetCurrentSuggestionBasedOnServerResponse( String selectedKey, String selectedCaption, String selectedIconUri) { if (currentSuggestion == null && (selectedKey != null || selectedCaption != null)) { currentSuggestion = new ComboBoxSuggestion(selectedKey, selectedCaption, "", selectedIconUri); } else if (selectedKey == null && selectedCaption == null) { currentSuggestion = null; } }
3.68
framework_Form_setInternalValue
/** * Sets the internal value. * * This is relevant when the Form is used as Field. * * @see AbstractField#setInternalValue(java.lang.Object) */ @Override protected void setInternalValue(Object newValue) { // Stores the old value final Object oldValue = propertyValue; // Sets the current Value super.setInternalValue(newValue); propertyValue = newValue; // Ignores form updating if data object has not changed. if (oldValue != newValue) { setFormDataSource(newValue, getVisibleItemProperties()); } }
3.68