code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
protected boolean runAllTasks(long timeoutNanos) {
fetchFromScheduledTaskQueue();
Runnable task = pollTask();
if (task == null) {
afterRunningAllTasks();
return false;
}
final long deadline = ScheduledFutureTask.nanoTime() + timeoutNanos;
long runTasks = 0;
long lastExecutionTime;
for (;;) {
safeExecute(task);
runTasks ++;
// Check timeout every 64 tasks because nanoTime() is relatively expensive.
// XXX: Hard-coded value - will make it configurable if it is really a problem.
if ((runTasks & 0x3F) == 0) {
lastExecutionTime = ScheduledFutureTask.nanoTime();
if (lastExecutionTime >= deadline) {
break;
}
}
task = pollTask();
if (task == null) {
lastExecutionTime = ScheduledFutureTask.nanoTime();
break;
}
}
afterRunningAllTasks();
this.lastExecutionTime = lastExecutionTime;
return true;
} | Poll all tasks from the task queue and run them via {@link Runnable#run()} method. This method stops running
the tasks in the task queue and returns if it ran longer than {@code timeoutNanos}. |
protected long delayNanos(long currentTimeNanos) {
ScheduledFutureTask<?> scheduledTask = peekScheduledTask();
if (scheduledTask == null) {
return SCHEDULE_PURGE_INTERVAL;
}
return scheduledTask.delayNanos(currentTimeNanos);
} | Returns the amount of time left until the scheduled task with the closest dead line is executed. |
@UnstableApi
protected long deadlineNanos() {
ScheduledFutureTask<?> scheduledTask = peekScheduledTask();
if (scheduledTask == null) {
return nanoTime() + SCHEDULE_PURGE_INTERVAL;
}
return scheduledTask.deadlineNanos();
} | Returns the absolute point in time (relative to {@link #nanoTime()}) at which the the next
closest scheduled task should run. |
public void addShutdownHook(final Runnable task) {
if (inEventLoop()) {
shutdownHooks.add(task);
} else {
execute(new Runnable() {
@Override
public void run() {
shutdownHooks.add(task);
}
});
}
} | Add a {@link Runnable} which will be executed on shutdown of this instance |
public void removeShutdownHook(final Runnable task) {
if (inEventLoop()) {
shutdownHooks.remove(task);
} else {
execute(new Runnable() {
@Override
public void run() {
shutdownHooks.remove(task);
}
});
}
} | Remove a previous added {@link Runnable} as a shutdown hook |
protected boolean confirmShutdown() {
if (!isShuttingDown()) {
return false;
}
if (!inEventLoop()) {
throw new IllegalStateException("must be invoked from an event loop");
}
cancelScheduledTasks();
if (gracefulShutdownStartTime == 0) {
gracefulShutdownStartTime = ScheduledFutureTask.nanoTime();
}
if (runAllTasks() || runShutdownHooks()) {
if (isShutdown()) {
// Executor shut down - no new tasks anymore.
return true;
}
// There were tasks in the queue. Wait a little bit more until no tasks are queued for the quiet period or
// terminate if the quiet period is 0.
// See https://github.com/netty/netty/issues/4241
if (gracefulShutdownQuietPeriod == 0) {
return true;
}
wakeup(true);
return false;
}
final long nanoTime = ScheduledFutureTask.nanoTime();
if (isShutdown() || nanoTime - gracefulShutdownStartTime > gracefulShutdownTimeout) {
return true;
}
if (nanoTime - lastExecutionTime <= gracefulShutdownQuietPeriod) {
// Check if any tasks were added to the queue every 100ms.
// TODO: Change the behavior of takeTask() so that it returns on timeout.
wakeup(true);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// Ignore
}
return false;
}
// No tasks were added for last quiet period - hopefully safe to shut down.
// (Hopefully because we really cannot make a guarantee that there will be no execute() calls by a user.)
return true;
} | Confirm that the shutdown if the instance should be done now! |
public final ThreadProperties threadProperties() {
ThreadProperties threadProperties = this.threadProperties;
if (threadProperties == null) {
Thread thread = this.thread;
if (thread == null) {
assert !inEventLoop();
submit(NOOP_TASK).syncUninterruptibly();
thread = this.thread;
assert thread != null;
}
threadProperties = new DefaultThreadProperties(thread);
if (!PROPERTIES_UPDATER.compareAndSet(this, null, threadProperties)) {
threadProperties = this.threadProperties;
}
}
return threadProperties;
} | Returns the {@link ThreadProperties} of the {@link Thread} that powers the {@link SingleThreadEventExecutor}.
If the {@link SingleThreadEventExecutor} is not started yet, this operation will start it and block until
it is fully started. |
boolean add(DatagramPacket packet) {
if (count == packets.length) {
// We already filled up to UIO_MAX_IOV messages. This is the max allowed per sendmmsg(...) call, we will
// try again later.
return false;
}
ByteBuf content = packet.content();
int len = content.readableBytes();
if (len == 0) {
return true;
}
NativeDatagramPacket p = packets[count];
InetSocketAddress recipient = packet.recipient();
int offset = iovArray.count();
if (!iovArray.add(content)) {
// Not enough space to hold the whole content, we will try again later.
return false;
}
p.init(iovArray.memoryAddress(offset), iovArray.count() - offset, recipient);
count++;
return true;
} | Try to add the given {@link DatagramPacket}. Returns {@code true} on success,
{@code false} otherwise. |
void writeBits(ByteBuf out, final int count, final long value) {
if (count < 0 || count > 32) {
throw new IllegalArgumentException("count: " + count + " (expected: 0-32)");
}
int bitCount = this.bitCount;
long bitBuffer = this.bitBuffer | value << 64 - count >>> bitCount;
bitCount += count;
if (bitCount >= 32) {
out.writeInt((int) (bitBuffer >>> 32));
bitBuffer <<= 32;
bitCount -= 32;
}
this.bitBuffer = bitBuffer;
this.bitCount = bitCount;
} | Writes up to 32 bits to the output {@link ByteBuf}.
@param count The number of bits to write (maximum {@code 32} as a size of {@code int})
@param value The bits to write |
void writeBoolean(ByteBuf out, final boolean value) {
int bitCount = this.bitCount + 1;
long bitBuffer = this.bitBuffer | (value ? 1L << 64 - bitCount : 0L);
if (bitCount == 32) {
out.writeInt((int) (bitBuffer >>> 32));
bitBuffer = 0;
bitCount = 0;
}
this.bitBuffer = bitBuffer;
this.bitCount = bitCount;
} | Writes a single bit to the output {@link ByteBuf}.
@param value The bit to write |
void writeUnary(ByteBuf out, int value) {
if (value < 0) {
throw new IllegalArgumentException("value: " + value + " (expected 0 or more)");
}
while (value-- > 0) {
writeBoolean(out, true);
}
writeBoolean(out, false);
} | Writes a zero-terminated unary number to the output {@link ByteBuf}.
Example of the output for value = 6: {@code 1111110}
@param value The number of {@code 1} to write |
void flush(ByteBuf out) {
final int bitCount = this.bitCount;
if (bitCount > 0) {
final long bitBuffer = this.bitBuffer;
final int shiftToRight = 64 - bitCount;
if (bitCount <= 8) {
out.writeByte((int) (bitBuffer >>> shiftToRight << 8 - bitCount));
} else if (bitCount <= 16) {
out.writeShort((int) (bitBuffer >>> shiftToRight << 16 - bitCount));
} else if (bitCount <= 24) {
out.writeMedium((int) (bitBuffer >>> shiftToRight << 24 - bitCount));
} else {
out.writeInt((int) (bitBuffer >>> shiftToRight << 32 - bitCount));
}
}
} | Writes any remaining bits to the output {@link ByteBuf},
zero padding to a whole byte as required. |
public void encodeHeaders(int streamId, ByteBuf out, Http2Headers headers, SensitivityDetector sensitivityDetector)
throws Http2Exception {
if (ignoreMaxHeaderListSize) {
encodeHeadersIgnoreMaxHeaderListSize(out, headers, sensitivityDetector);
} else {
encodeHeadersEnforceMaxHeaderListSize(streamId, out, headers, sensitivityDetector);
}
} | Encode the header field into the header block.
<strong>The given {@link CharSequence}s must be immutable!</strong> |
private void encodeHeader(ByteBuf out, CharSequence name, CharSequence value, boolean sensitive, long headerSize) {
// If the header value is sensitive then it must never be indexed
if (sensitive) {
int nameIndex = getNameIndex(name);
encodeLiteral(out, name, value, IndexType.NEVER, nameIndex);
return;
}
// If the peer will only use the static table
if (maxHeaderTableSize == 0) {
int staticTableIndex = HpackStaticTable.getIndex(name, value);
if (staticTableIndex == -1) {
int nameIndex = HpackStaticTable.getIndex(name);
encodeLiteral(out, name, value, IndexType.NONE, nameIndex);
} else {
encodeInteger(out, 0x80, 7, staticTableIndex);
}
return;
}
// If the headerSize is greater than the max table size then it must be encoded literally
if (headerSize > maxHeaderTableSize) {
int nameIndex = getNameIndex(name);
encodeLiteral(out, name, value, IndexType.NONE, nameIndex);
return;
}
HeaderEntry headerField = getEntry(name, value);
if (headerField != null) {
int index = getIndex(headerField.index) + HpackStaticTable.length;
// Section 6.1. Indexed Header Field Representation
encodeInteger(out, 0x80, 7, index);
} else {
int staticTableIndex = HpackStaticTable.getIndex(name, value);
if (staticTableIndex != -1) {
// Section 6.1. Indexed Header Field Representation
encodeInteger(out, 0x80, 7, staticTableIndex);
} else {
ensureCapacity(headerSize);
encodeLiteral(out, name, value, IndexType.INCREMENTAL, getNameIndex(name));
add(name, value, headerSize);
}
}
} | Encode the header field into the header block.
<strong>The given {@link CharSequence}s must be immutable!</strong> |
public void setMaxHeaderTableSize(ByteBuf out, long maxHeaderTableSize) throws Http2Exception {
if (maxHeaderTableSize < MIN_HEADER_TABLE_SIZE || maxHeaderTableSize > MAX_HEADER_TABLE_SIZE) {
throw connectionError(PROTOCOL_ERROR, "Header Table Size must be >= %d and <= %d but was %d",
MIN_HEADER_TABLE_SIZE, MAX_HEADER_TABLE_SIZE, maxHeaderTableSize);
}
if (this.maxHeaderTableSize == maxHeaderTableSize) {
return;
}
this.maxHeaderTableSize = maxHeaderTableSize;
ensureCapacity(0);
// Casting to integer is safe as we verified the maxHeaderTableSize is a valid unsigned int.
encodeInteger(out, 0x20, 5, maxHeaderTableSize);
} | Set the maximum table size. |
private static void encodeInteger(ByteBuf out, int mask, int n, int i) {
encodeInteger(out, mask, n, (long) i);
} | Encode integer according to <a href="https://tools.ietf.org/html/rfc7541#section-5.1">Section 5.1</a>. |
private static void encodeInteger(ByteBuf out, int mask, int n, long i) {
assert n >= 0 && n <= 8 : "N: " + n;
int nbits = 0xFF >>> (8 - n);
if (i < nbits) {
out.writeByte((int) (mask | i));
} else {
out.writeByte(mask | nbits);
long length = i - nbits;
for (; (length & ~0x7F) != 0; length >>>= 7) {
out.writeByte((int) ((length & 0x7F) | 0x80));
}
out.writeByte((int) length);
}
} | Encode integer according to <a href="https://tools.ietf.org/html/rfc7541#section-5.1">Section 5.1</a>. |
private void encodeStringLiteral(ByteBuf out, CharSequence string) {
int huffmanLength = hpackHuffmanEncoder.getEncodedLength(string);
if (huffmanLength < string.length()) {
encodeInteger(out, 0x80, 7, huffmanLength);
hpackHuffmanEncoder.encode(out, string);
} else {
encodeInteger(out, 0x00, 7, string.length());
if (string instanceof AsciiString) {
// Fast-path
AsciiString asciiString = (AsciiString) string;
out.writeBytes(asciiString.array(), asciiString.arrayOffset(), asciiString.length());
} else {
// Only ASCII is allowed in http2 headers, so its fine to use this.
// https://tools.ietf.org/html/rfc7540#section-8.1.2
out.writeCharSequence(string, CharsetUtil.ISO_8859_1);
}
}
} | Encode string literal according to Section 5.2. |
private void encodeLiteral(ByteBuf out, CharSequence name, CharSequence value, IndexType indexType,
int nameIndex) {
boolean nameIndexValid = nameIndex != -1;
switch (indexType) {
case INCREMENTAL:
encodeInteger(out, 0x40, 6, nameIndexValid ? nameIndex : 0);
break;
case NONE:
encodeInteger(out, 0x00, 4, nameIndexValid ? nameIndex : 0);
break;
case NEVER:
encodeInteger(out, 0x10, 4, nameIndexValid ? nameIndex : 0);
break;
default:
throw new Error("should not reach here");
}
if (!nameIndexValid) {
encodeStringLiteral(out, name);
}
encodeStringLiteral(out, value);
} | Encode literal header field according to Section 6.2. |
HpackHeaderField getHeaderField(int index) {
HeaderEntry entry = head;
while (index-- >= 0) {
entry = entry.before;
}
return entry;
} | Return the header field at the given index. Exposed for testing. |
private HeaderEntry getEntry(CharSequence name, CharSequence value) {
if (length() == 0 || name == null || value == null) {
return null;
}
int h = AsciiString.hashCode(name);
int i = index(h);
for (HeaderEntry e = headerFields[i]; e != null; e = e.next) {
// To avoid short circuit behavior a bitwise operator is used instead of a boolean operator.
if (e.hash == h && (equalsConstantTime(name, e.name) & equalsConstantTime(value, e.value)) != 0) {
return e;
}
}
return null;
} | Returns the header entry with the lowest index value for the header field. Returns null if
header field is not in the dynamic table. |
private int getIndex(CharSequence name) {
if (length() == 0 || name == null) {
return -1;
}
int h = AsciiString.hashCode(name);
int i = index(h);
for (HeaderEntry e = headerFields[i]; e != null; e = e.next) {
if (e.hash == h && equalsConstantTime(name, e.name) != 0) {
return getIndex(e.index);
}
}
return -1;
} | Returns the lowest index value for the header field name in the dynamic table. Returns -1 if
the header field name is not in the dynamic table. |
private void add(CharSequence name, CharSequence value, long headerSize) {
// Clear the table if the header field size is larger than the maxHeaderTableSize.
if (headerSize > maxHeaderTableSize) {
clear();
return;
}
// Evict oldest entries until we have enough maxHeaderTableSize.
while (maxHeaderTableSize - size < headerSize) {
remove();
}
int h = AsciiString.hashCode(name);
int i = index(h);
HeaderEntry old = headerFields[i];
HeaderEntry e = new HeaderEntry(h, name, value, head.before.index - 1, old);
headerFields[i] = e;
e.addBefore(head);
size += headerSize;
} | Add the header field to the dynamic table. Entries are evicted from the dynamic table until
the size of the table and the new header field is less than the table's maxHeaderTableSize. If the size
of the new entry is larger than the table's maxHeaderTableSize, the dynamic table will be cleared. |
static X509Certificate selfSignedCertificate() throws CertificateException {
return (X509Certificate) SslContext.X509_CERT_FACTORY.generateCertificate(
new ByteArrayInputStream(CERT.getBytes(CharsetUtil.US_ASCII))
);
} | Returns a self-signed {@link X509Certificate} for {@code netty.io}. |
public static boolean isCipherSuiteAvailable(String cipherSuite) {
String converted = CipherSuiteConverter.toOpenSsl(cipherSuite, IS_BORINGSSL);
if (converted != null) {
cipherSuite = converted;
}
return AVAILABLE_OPENSSL_CIPHER_SUITES.contains(cipherSuite);
} | Returns {@code true} if and only if the specified cipher suite is available in OpenSSL.
Both Java-style cipher suite and OpenSSL-style cipher suite are accepted. |
protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception {
if (discardingTooLongFrame) {
discardingTooLongFrame(in);
}
if (in.readableBytes() < lengthFieldEndOffset) {
return null;
}
int actualLengthFieldOffset = in.readerIndex() + lengthFieldOffset;
long frameLength = getUnadjustedFrameLength(in, actualLengthFieldOffset, lengthFieldLength, byteOrder);
if (frameLength < 0) {
failOnNegativeLengthField(in, frameLength, lengthFieldEndOffset);
}
frameLength += lengthAdjustment + lengthFieldEndOffset;
if (frameLength < lengthFieldEndOffset) {
failOnFrameLengthLessThanLengthFieldEndOffset(in, frameLength, lengthFieldEndOffset);
}
if (frameLength > maxFrameLength) {
exceededFrameLength(in, frameLength);
return null;
}
// never overflows because it's less than maxFrameLength
int frameLengthInt = (int) frameLength;
if (in.readableBytes() < frameLengthInt) {
return null;
}
if (initialBytesToStrip > frameLengthInt) {
failOnFrameLengthLessThanInitialBytesToStrip(in, frameLength, initialBytesToStrip);
}
in.skipBytes(initialBytesToStrip);
// extract frame
int readerIndex = in.readerIndex();
int actualFrameLength = frameLengthInt - initialBytesToStrip;
ByteBuf frame = extractFrame(ctx, in, readerIndex, actualFrameLength);
in.readerIndex(readerIndex + actualFrameLength);
return frame;
} | Create a frame out of the {@link ByteBuf} and return it.
@param ctx the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
@param in the {@link ByteBuf} from which to read data
@return frame the {@link ByteBuf} which represent the frame or {@code null} if no frame could
be created. |
protected long getUnadjustedFrameLength(ByteBuf buf, int offset, int length, ByteOrder order) {
buf = buf.order(order);
long frameLength;
switch (length) {
case 1:
frameLength = buf.getUnsignedByte(offset);
break;
case 2:
frameLength = buf.getUnsignedShort(offset);
break;
case 3:
frameLength = buf.getUnsignedMedium(offset);
break;
case 4:
frameLength = buf.getUnsignedInt(offset);
break;
case 8:
frameLength = buf.getLong(offset);
break;
default:
throw new DecoderException(
"unsupported lengthFieldLength: " + lengthFieldLength + " (expected: 1, 2, 3, 4, or 8)");
}
return frameLength;
} | Decodes the specified region of the buffer into an unadjusted frame length. The default implementation is
capable of decoding the specified region into an unsigned 8/16/24/32/64 bit integer. Override this method to
decode the length field encoded differently. Note that this method must not modify the state of the specified
buffer (e.g. {@code readerIndex}, {@code writerIndex}, and the content of the buffer.)
@throws DecoderException if failed to decode the specified region |
protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) {
return buffer.retainedSlice(index, length);
} | Extract the sub-region of the specified buffer.
<p>
If you are sure that the frame and its content are not accessed after
the current {@link #decode(ChannelHandlerContext, ByteBuf)}
call returns, you can even avoid memory copy by returning the sliced
sub-region (i.e. <tt>return buffer.slice(index, length)</tt>).
It's often useful when you convert the extracted frame into an object.
Refer to the source code of {@link ObjectDecoder} to see how this method
is overridden to avoid memory copy. |
public String encode(Cookie cookie) {
final String name = checkNotNull(cookie, "cookie").name();
final String value = cookie.value() != null ? cookie.value() : "";
validateCookie(name, value);
StringBuilder buf = stringBuilder();
if (cookie.wrap()) {
addQuoted(buf, name, value);
} else {
add(buf, name, value);
}
if (cookie.maxAge() != Long.MIN_VALUE) {
add(buf, CookieHeaderNames.MAX_AGE, cookie.maxAge());
Date expires = new Date(cookie.maxAge() * 1000 + System.currentTimeMillis());
buf.append(CookieHeaderNames.EXPIRES);
buf.append('=');
DateFormatter.append(expires, buf);
buf.append(';');
buf.append(HttpConstants.SP_CHAR);
}
if (cookie.path() != null) {
add(buf, CookieHeaderNames.PATH, cookie.path());
}
if (cookie.domain() != null) {
add(buf, CookieHeaderNames.DOMAIN, cookie.domain());
}
if (cookie.isSecure()) {
add(buf, CookieHeaderNames.SECURE);
}
if (cookie.isHttpOnly()) {
add(buf, CookieHeaderNames.HTTPONLY);
}
return stripTrailingSeparator(buf);
} | Encodes the specified cookie into a Set-Cookie header value.
@param cookie the cookie
@return a single Set-Cookie header value |
private static List<String> dedup(List<String> encoded, Map<String, Integer> nameToLastIndex) {
boolean[] isLastInstance = new boolean[encoded.size()];
for (int idx : nameToLastIndex.values()) {
isLastInstance[idx] = true;
}
List<String> dedupd = new ArrayList<String>(nameToLastIndex.size());
for (int i = 0, n = encoded.size(); i < n; i++) {
if (isLastInstance[i]) {
dedupd.add(encoded.get(i));
}
}
return dedupd;
} | Deduplicate a list of encoded cookies by keeping only the last instance with a given name.
@param encoded The list of encoded cookies.
@param nameToLastIndex A map from cookie name to index of last cookie instance.
@return The encoded list with all but the last instance of a named cookie. |
public List<String> encode(Collection<? extends Cookie> cookies) {
if (checkNotNull(cookies, "cookies").isEmpty()) {
return Collections.emptyList();
}
List<String> encoded = new ArrayList<String>(cookies.size());
Map<String, Integer> nameToIndex = strict && cookies.size() > 1 ? new HashMap<String, Integer>() : null;
int i = 0;
boolean hasDupdName = false;
for (Cookie c : cookies) {
encoded.add(encode(c));
if (nameToIndex != null) {
hasDupdName |= nameToIndex.put(c.name(), i++) != null;
}
}
return hasDupdName ? dedup(encoded, nameToIndex) : encoded;
} | Batch encodes cookies into Set-Cookie header values.
@param cookies a bunch of cookies
@return the corresponding bunch of Set-Cookie headers |
public List<String> encode(Iterable<? extends Cookie> cookies) {
Iterator<? extends Cookie> cookiesIt = checkNotNull(cookies, "cookies").iterator();
if (!cookiesIt.hasNext()) {
return Collections.emptyList();
}
List<String> encoded = new ArrayList<String>();
Cookie firstCookie = cookiesIt.next();
Map<String, Integer> nameToIndex = strict && cookiesIt.hasNext() ? new HashMap<String, Integer>() : null;
int i = 0;
encoded.add(encode(firstCookie));
boolean hasDupdName = nameToIndex != null && nameToIndex.put(firstCookie.name(), i++) != null;
while (cookiesIt.hasNext()) {
Cookie c = cookiesIt.next();
encoded.add(encode(c));
if (nameToIndex != null) {
hasDupdName |= nameToIndex.put(c.name(), i++) != null;
}
}
return hasDupdName ? dedup(encoded, nameToIndex) : encoded;
} | Batch encodes cookies into Set-Cookie header values.
@param cookies a bunch of cookies
@return the corresponding bunch of Set-Cookie headers |
@Override
public void trace(String format, Object arg) {
if (isTraceEnabled()) {
FormattingTuple ft = MessageFormatter.format(format, arg);
logger.log(FQCN, traceCapable ? Level.TRACE : Level.DEBUG, ft
.getMessage(), ft.getThrowable());
}
} | Log a message at level TRACE according to the specified format and
argument.
<p>
This form avoids superfluous object creation when the logger is disabled
for level TRACE.
</p>
@param format
the format string
@param arg
the argument |
@Override
public void trace(String msg, Throwable t) {
logger.log(FQCN, traceCapable ? Level.TRACE : Level.DEBUG, msg, t);
} | Log an exception (throwable) at level TRACE with an accompanying message.
@param msg
the message accompanying the exception
@param t
the exception (throwable) to log |
@Override
public void warn(String msg) {
logger.log(FQCN, Level.WARN, msg, null);
} | Log a message object at the WARN level.
@param msg
- the message object to be logged |
public static SslContextBuilder forServer(File keyCertChainFile, File keyFile) {
return new SslContextBuilder(true).keyManager(keyCertChainFile, keyFile);
} | Creates a builder for new server-side {@link SslContext}.
@param keyCertChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@see #keyManager(File, File) |
public static SslContextBuilder forServer(InputStream keyCertChainInputStream, InputStream keyInputStream) {
return new SslContextBuilder(true).keyManager(keyCertChainInputStream, keyInputStream);
} | Creates a builder for new server-side {@link SslContext}.
@param keyCertChainInputStream an input stream for an X.509 certificate chain in PEM format
@param keyInputStream an input stream for a PKCS#8 private key in PEM format
@see #keyManager(InputStream, InputStream) |
public static SslContextBuilder forServer(PrivateKey key, X509Certificate... keyCertChain) {
return new SslContextBuilder(true).keyManager(key, keyCertChain);
} | Creates a builder for new server-side {@link SslContext}.
@param key a PKCS#8 private key
@param keyCertChain the X.509 certificate chain
@see #keyManager(PrivateKey, X509Certificate[]) |
public static SslContextBuilder forServer(
File keyCertChainFile, File keyFile, String keyPassword) {
return new SslContextBuilder(true).keyManager(keyCertChainFile, keyFile, keyPassword);
} | Creates a builder for new server-side {@link SslContext}.
@param keyCertChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}, or {@code null} if it's not
password-protected
@see #keyManager(File, File, String) |
public static SslContextBuilder forServer(
InputStream keyCertChainInputStream, InputStream keyInputStream, String keyPassword) {
return new SslContextBuilder(true).keyManager(keyCertChainInputStream, keyInputStream, keyPassword);
} | Creates a builder for new server-side {@link SslContext}.
@param keyCertChainInputStream an input stream for an X.509 certificate chain in PEM format
@param keyInputStream an input stream for a PKCS#8 private key in PEM format
@param keyPassword the password of the {@code keyFile}, or {@code null} if it's not
password-protected
@see #keyManager(InputStream, InputStream, String) |
public SslContextBuilder trustManager(File trustCertCollectionFile) {
try {
return trustManager(SslContext.toX509Certificates(trustCertCollectionFile));
} catch (Exception e) {
throw new IllegalArgumentException("File does not contain valid certificates: "
+ trustCertCollectionFile, e);
}
} | Trusted certificates for verifying the remote endpoint's certificate. The file should
contain an X.509 certificate collection in PEM format. {@code null} uses the system default. |
public SslContextBuilder trustManager(InputStream trustCertCollectionInputStream) {
try {
return trustManager(SslContext.toX509Certificates(trustCertCollectionInputStream));
} catch (Exception e) {
throw new IllegalArgumentException("Input stream does not contain valid certificates.", e);
}
} | Trusted certificates for verifying the remote endpoint's certificate. The input stream should
contain an X.509 certificate collection in PEM format. {@code null} uses the system default. |
public SslContextBuilder trustManager(X509Certificate... trustCertCollection) {
this.trustCertCollection = trustCertCollection != null ? trustCertCollection.clone() : null;
trustManagerFactory = null;
return this;
} | Trusted certificates for verifying the remote endpoint's certificate, {@code null} uses the system default. |
public SslContextBuilder keyManager(File keyCertChainFile, File keyFile) {
return keyManager(keyCertChainFile, keyFile, null);
} | Identifying certificate for this host. {@code keyCertChainFile} and {@code keyFile} may
be {@code null} for client contexts, which disables mutual authentication.
@param keyCertChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format |
public SslContextBuilder keyManager(InputStream keyCertChainInputStream, InputStream keyInputStream) {
return keyManager(keyCertChainInputStream, keyInputStream, null);
} | Identifying certificate for this host. {@code keyCertChainInputStream} and {@code keyInputStream} may
be {@code null} for client contexts, which disables mutual authentication.
@param keyCertChainInputStream an input stream for an X.509 certificate chain in PEM format
@param keyInputStream an input stream for a PKCS#8 private key in PEM format |
public SslContextBuilder keyManager(PrivateKey key, X509Certificate... keyCertChain) {
return keyManager(key, null, keyCertChain);
} | Identifying certificate for this host. {@code keyCertChain} and {@code key} may
be {@code null} for client contexts, which disables mutual authentication.
@param key a PKCS#8 private key
@param keyCertChain an X.509 certificate chain |
public SslContextBuilder keyManager(File keyCertChainFile, File keyFile, String keyPassword) {
X509Certificate[] keyCertChain;
PrivateKey key;
try {
keyCertChain = SslContext.toX509Certificates(keyCertChainFile);
} catch (Exception e) {
throw new IllegalArgumentException("File does not contain valid certificates: " + keyCertChainFile, e);
}
try {
key = SslContext.toPrivateKey(keyFile, keyPassword);
} catch (Exception e) {
throw new IllegalArgumentException("File does not contain valid private key: " + keyFile, e);
}
return keyManager(key, keyPassword, keyCertChain);
} | Identifying certificate for this host. {@code keyCertChainFile} and {@code keyFile} may
be {@code null} for client contexts, which disables mutual authentication.
@param keyCertChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}, or {@code null} if it's not
password-protected |
public SslContextBuilder keyManager(InputStream keyCertChainInputStream, InputStream keyInputStream,
String keyPassword) {
X509Certificate[] keyCertChain;
PrivateKey key;
try {
keyCertChain = SslContext.toX509Certificates(keyCertChainInputStream);
} catch (Exception e) {
throw new IllegalArgumentException("Input stream not contain valid certificates.", e);
}
try {
key = SslContext.toPrivateKey(keyInputStream, keyPassword);
} catch (Exception e) {
throw new IllegalArgumentException("Input stream does not contain valid private key.", e);
}
return keyManager(key, keyPassword, keyCertChain);
} | Identifying certificate for this host. {@code keyCertChainInputStream} and {@code keyInputStream} may
be {@code null} for client contexts, which disables mutual authentication.
@param keyCertChainInputStream an input stream for an X.509 certificate chain in PEM format
@param keyInputStream an input stream for a PKCS#8 private key in PEM format
@param keyPassword the password of the {@code keyInputStream}, or {@code null} if it's not
password-protected |
public SslContextBuilder keyManager(PrivateKey key, String keyPassword, X509Certificate... keyCertChain) {
if (forServer) {
checkNotNull(keyCertChain, "keyCertChain required for servers");
if (keyCertChain.length == 0) {
throw new IllegalArgumentException("keyCertChain must be non-empty");
}
checkNotNull(key, "key required for servers");
}
if (keyCertChain == null || keyCertChain.length == 0) {
this.keyCertChain = null;
} else {
for (X509Certificate cert: keyCertChain) {
if (cert == null) {
throw new IllegalArgumentException("keyCertChain contains null entry");
}
}
this.keyCertChain = keyCertChain.clone();
}
this.key = key;
this.keyPassword = keyPassword;
keyManagerFactory = null;
return this;
} | Identifying certificate for this host. {@code keyCertChain} and {@code key} may
be {@code null} for client contexts, which disables mutual authentication.
@param key a PKCS#8 private key file
@param keyPassword the password of the {@code key}, or {@code null} if it's not
password-protected
@param keyCertChain an X.509 certificate chain |
public SslContextBuilder keyManager(KeyManagerFactory keyManagerFactory) {
if (forServer) {
checkNotNull(keyManagerFactory, "keyManagerFactory required for servers");
}
keyCertChain = null;
key = null;
keyPassword = null;
this.keyManagerFactory = keyManagerFactory;
return this;
} | Identifying manager for this host. {@code keyManagerFactory} may be {@code null} for
client contexts, which disables mutual authentication. Using a {@link KeyManagerFactory}
is only supported for {@link SslProvider#JDK} or {@link SslProvider#OPENSSL} / {@link SslProvider#OPENSSL_REFCNT}
if the used openssl version is 1.0.1+. You can check if your openssl version supports using a
{@link KeyManagerFactory} by calling {@link OpenSsl#supportsKeyManagerFactory()}. If this is not the case
you must use {@link #keyManager(File, File)} or {@link #keyManager(File, File, String)}.
If you use {@link SslProvider#OPENSSL} or {@link SslProvider#OPENSSL_REFCNT} consider using
{@link OpenSslX509KeyManagerFactory} or {@link OpenSslCachingX509KeyManagerFactory}. |
public SslContextBuilder ciphers(Iterable<String> ciphers, CipherSuiteFilter cipherFilter) {
checkNotNull(cipherFilter, "cipherFilter");
this.ciphers = ciphers;
this.cipherFilter = cipherFilter;
return this;
} | The cipher suites to enable, in the order of preference. {@code cipherFilter} will be
applied to the ciphers before use. If {@code ciphers} is {@code null}, then the default
cipher suites will be used. |
public SslContextBuilder protocols(String... protocols) {
this.protocols = protocols == null ? null : protocols.clone();
return this;
} | The TLS protocol versions to enable.
@param protocols The protocols to enable, or {@code null} to enable the default protocols.
@see SSLEngine#setEnabledCipherSuites(String[]) |
public SslContext build() throws SSLException {
if (forServer) {
return SslContext.newServerContextInternal(provider, sslContextProvider, trustCertCollection,
trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory,
ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout, clientAuth, protocols, startTls,
enableOcsp);
} else {
return SslContext.newClientContextInternal(provider, sslContextProvider, trustCertCollection,
trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory,
ciphers, cipherFilter, apn, protocols, sessionCacheSize, sessionTimeout, enableOcsp);
}
} | Create new {@code SslContext} instance with configured settings.
<p>If {@link #sslProvider(SslProvider)} is set to {@link SslProvider#OPENSSL_REFCNT} then the caller is
responsible for releasing this object, or else native memory may leak. |
private int distributeToChildren(int maxBytes, Writer writer, State state) throws Http2Exception {
long oldTotalQueuedWeights = state.totalQueuedWeights;
State childState = state.pollPseudoTimeQueue();
State nextChildState = state.peekPseudoTimeQueue();
childState.setDistributing();
try {
assert nextChildState == null || nextChildState.pseudoTimeToWrite >= childState.pseudoTimeToWrite :
"nextChildState[" + nextChildState.streamId + "].pseudoTime(" + nextChildState.pseudoTimeToWrite +
") < " + " childState[" + childState.streamId + "].pseudoTime(" + childState.pseudoTimeToWrite + ")";
int nsent = distribute(nextChildState == null ? maxBytes :
min(maxBytes, (int) min((nextChildState.pseudoTimeToWrite - childState.pseudoTimeToWrite) *
childState.weight / oldTotalQueuedWeights + allocationQuantum, MAX_VALUE)
),
writer,
childState);
state.pseudoTime += nsent;
childState.updatePseudoTime(state, nsent, oldTotalQueuedWeights);
return nsent;
} finally {
childState.unsetDistributing();
// Do in finally to ensure the internal flags is not corrupted if an exception is thrown.
// The offer operation is delayed until we unroll up the recursive stack, so we don't have to remove from
// the priority pseudoTimeQueue due to a write operation.
if (childState.activeCountForTree != 0) {
state.offerPseudoTimeQueue(childState);
}
}
} | It is a pre-condition that {@code state.poll()} returns a non-{@code null} value. This is a result of the way
the allocation algorithm is structured and can be explained in the following cases:
<h3>For the recursive case</h3>
If a stream has no children (in the allocation tree) than that node must be active or it will not be in the
allocation tree. If a node is active then it will not delegate to children and recursion ends.
<h3>For the initial case</h3>
We check connectionState.activeCountForTree == 0 before any allocation is done. So if the connection stream
has no active children we don't get into this method. |
boolean isChild(int childId, int parentId, short weight) {
State parent = state(parentId);
State child;
return parent.children.containsKey(childId) &&
(child = state(childId)).parent == parent && child.weight == weight;
} | For testing only! |
int numChildren(int streamId) {
State state = state(streamId);
return state == null ? 0 : state.children.size();
} | For testing only! |
void notifyParentChanged(List<ParentChangedEvent> events) {
for (int i = 0; i < events.size(); ++i) {
ParentChangedEvent event = events.get(i);
stateOnlyRemovalQueue.priorityChanged(event.state);
if (event.state.parent != null && event.state.activeCountForTree != 0) {
event.state.parent.offerAndInitializePseudoTime(event.state);
event.state.parent.activeCountChangeForTree(event.state.activeCountForTree);
}
}
} | Notify all listeners of the priority tree change events (in ascending order)
@param events The events (top down order) which have changed |
public int length() {
int length;
if (head < tail) {
length = hpackHeaderFields.length - tail + head;
} else {
length = head - tail;
}
return length;
} | Return the number of header fields in the dynamic table. |
public HpackHeaderField getEntry(int index) {
if (index <= 0 || index > length()) {
throw new IndexOutOfBoundsException();
}
int i = head - index;
if (i < 0) {
return hpackHeaderFields[i + hpackHeaderFields.length];
} else {
return hpackHeaderFields[i];
}
} | Return the header field at the given index. The first and newest entry is always at index 1,
and the oldest entry is at the index length(). |
public void add(HpackHeaderField header) {
int headerSize = header.size();
if (headerSize > capacity) {
clear();
return;
}
while (capacity - size < headerSize) {
remove();
}
hpackHeaderFields[head++] = header;
size += header.size();
if (head == hpackHeaderFields.length) {
head = 0;
}
} | Add the header field to the dynamic table. Entries are evicted from the dynamic table until
the size of the table and the new header field is less than or equal to the table's capacity.
If the size of the new entry is larger than the table's capacity, the dynamic table will be
cleared. |
public HpackHeaderField remove() {
HpackHeaderField removed = hpackHeaderFields[tail];
if (removed == null) {
return null;
}
size -= removed.size();
hpackHeaderFields[tail++] = null;
if (tail == hpackHeaderFields.length) {
tail = 0;
}
return removed;
} | Remove and return the oldest header field from the dynamic table. |
public void clear() {
while (tail != head) {
hpackHeaderFields[tail++] = null;
if (tail == hpackHeaderFields.length) {
tail = 0;
}
}
head = 0;
tail = 0;
size = 0;
} | Remove all entries from the dynamic table. |
public void setCapacity(long capacity) {
if (capacity < MIN_HEADER_TABLE_SIZE || capacity > MAX_HEADER_TABLE_SIZE) {
throw new IllegalArgumentException("capacity is invalid: " + capacity);
}
// initially capacity will be -1 so init won't return here
if (this.capacity == capacity) {
return;
}
this.capacity = capacity;
if (capacity == 0) {
clear();
} else {
// initially size will be 0 so remove won't be called
while (size > capacity) {
remove();
}
}
int maxEntries = (int) (capacity / HpackHeaderField.HEADER_ENTRY_OVERHEAD);
if (capacity % HpackHeaderField.HEADER_ENTRY_OVERHEAD != 0) {
maxEntries++;
}
// check if capacity change requires us to reallocate the array
if (hpackHeaderFields != null && hpackHeaderFields.length == maxEntries) {
return;
}
HpackHeaderField[] tmp = new HpackHeaderField[maxEntries];
// initially length will be 0 so there will be no copy
int len = length();
int cursor = tail;
for (int i = 0; i < len; i++) {
HpackHeaderField entry = hpackHeaderFields[cursor++];
tmp[i] = entry;
if (cursor == hpackHeaderFields.length) {
cursor = 0;
}
}
tail = 0;
head = tail + len;
hpackHeaderFields = tmp;
} | Set the maximum size of the dynamic table. Entries are evicted from the dynamic table until
the size of the table is less than or equal to the maximum size. |
public static Signal valueOf(Class<?> firstNameComponent, String secondNameComponent) {
return pool.valueOf(firstNameComponent, secondNameComponent);
} | Shortcut of {@link #valueOf(String) valueOf(firstNameComponent.getName() + "#" + secondNameComponent)}. |
public final ChannelFuture spliceTo(final AbstractEpollStreamChannel ch, final int len,
final ChannelPromise promise) {
if (ch.eventLoop() != eventLoop()) {
throw new IllegalArgumentException("EventLoops are not the same.");
}
checkPositiveOrZero(len, "len");
if (ch.config().getEpollMode() != EpollMode.LEVEL_TRIGGERED
|| config().getEpollMode() != EpollMode.LEVEL_TRIGGERED) {
throw new IllegalStateException("spliceTo() supported only when using " + EpollMode.LEVEL_TRIGGERED);
}
checkNotNull(promise, "promise");
if (!isOpen()) {
promise.tryFailure(SPLICE_TO_CLOSED_CHANNEL_EXCEPTION);
} else {
addToSpliceQueue(new SpliceInChannelTask(ch, len, promise));
failSpliceIfClosed(promise);
}
return promise;
} | Splice from this {@link AbstractEpollStreamChannel} to another {@link AbstractEpollStreamChannel}.
The {@code len} is the number of bytes to splice. If using {@link Integer#MAX_VALUE} it will
splice until the {@link ChannelFuture} was canceled or it was failed.
Please note:
<ul>
<li>both channels need to be registered to the same {@link EventLoop}, otherwise an
{@link IllegalArgumentException} is thrown. </li>
<li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this and the
target {@link AbstractEpollStreamChannel}</li>
</ul> |
public final ChannelFuture spliceTo(final FileDescriptor ch, final int offset, final int len) {
return spliceTo(ch, offset, len, newPromise());
} | Splice from this {@link AbstractEpollStreamChannel} to another {@link FileDescriptor}.
The {@code offset} is the offset for the {@link FileDescriptor} and {@code len} is the
number of bytes to splice. If using {@link Integer#MAX_VALUE} it will splice until the
{@link ChannelFuture} was canceled or it was failed.
Please note:
<ul>
<li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this
{@link AbstractEpollStreamChannel}</li>
<li>the {@link FileDescriptor} will not be closed after the {@link ChannelFuture} is notified</li>
<li>this channel must be registered to an event loop or {@link IllegalStateException} will be thrown.</li>
</ul> |
public final ChannelFuture spliceTo(final FileDescriptor ch, final int offset, final int len,
final ChannelPromise promise) {
checkPositiveOrZero(len, "len");
checkPositiveOrZero(offset, "offser");
if (config().getEpollMode() != EpollMode.LEVEL_TRIGGERED) {
throw new IllegalStateException("spliceTo() supported only when using " + EpollMode.LEVEL_TRIGGERED);
}
checkNotNull(promise, "promise");
if (!isOpen()) {
promise.tryFailure(SPLICE_TO_CLOSED_CHANNEL_EXCEPTION);
} else {
addToSpliceQueue(new SpliceFdTask(ch, offset, len, promise));
failSpliceIfClosed(promise);
}
return promise;
} | Splice from this {@link AbstractEpollStreamChannel} to another {@link FileDescriptor}.
The {@code offset} is the offset for the {@link FileDescriptor} and {@code len} is the
number of bytes to splice. If using {@link Integer#MAX_VALUE} it will splice until the
{@link ChannelFuture} was canceled or it was failed.
Please note:
<ul>
<li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this
{@link AbstractEpollStreamChannel}</li>
<li>the {@link FileDescriptor} will not be closed after the {@link ChannelPromise} is notified</li>
<li>this channel must be registered to an event loop or {@link IllegalStateException} will be thrown.</li>
</ul> |
private int writeDefaultFileRegion(ChannelOutboundBuffer in, DefaultFileRegion region) throws Exception {
final long offset = region.transferred();
final long regionCount = region.count();
if (offset >= regionCount) {
in.remove();
return 0;
}
final long flushedAmount = socket.sendFile(region, region.position(), offset, regionCount - offset);
if (flushedAmount > 0) {
in.progress(flushedAmount);
if (region.transferred() >= regionCount) {
in.remove();
}
return 1;
} else if (flushedAmount == 0) {
validateFileRegion(region, offset);
}
return WRITE_STATUS_SNDBUF_FULL;
} | Write a {@link DefaultFileRegion}
@param in the collection which contains objects to write.
@param region the {@link DefaultFileRegion} from which the bytes should be written
@return The value that should be decremented from the write quantum which starts at
{@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
<ul>
<li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
is encountered</li>
<li>1 - if a single call to write data was made to the OS</li>
<li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
no data was accepted</li>
</ul> |
protected int doWriteSingle(ChannelOutboundBuffer in) throws Exception {
// The outbound buffer contains only one message or it contains a file region.
Object msg = in.current();
if (msg instanceof ByteBuf) {
return writeBytes(in, (ByteBuf) msg);
} else if (msg instanceof DefaultFileRegion) {
return writeDefaultFileRegion(in, (DefaultFileRegion) msg);
} else if (msg instanceof FileRegion) {
return writeFileRegion(in, (FileRegion) msg);
} else if (msg instanceof SpliceOutTask) {
if (!((SpliceOutTask) msg).spliceOut()) {
return WRITE_STATUS_SNDBUF_FULL;
}
in.remove();
return 1;
} else {
// Should never reach here.
throw new Error();
}
} | Attempt to write a single object.
@param in the collection which contains objects to write.
@return The value that should be decremented from the write quantum which starts at
{@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
<ul>
<li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
is encountered</li>
<li>1 - if a single call to write data was made to the OS</li>
<li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
no data was accepted</li>
</ul>
@throws Exception If an I/O error occurs. |
private int doWriteMultiple(ChannelOutboundBuffer in) throws Exception {
final long maxBytesPerGatheringWrite = config().getMaxBytesPerGatheringWrite();
IovArray array = ((EpollEventLoop) eventLoop()).cleanIovArray();
array.maxBytes(maxBytesPerGatheringWrite);
in.forEachFlushedMessage(array);
if (array.count() >= 1) {
// TODO: Handle the case where cnt == 1 specially.
return writeBytesMultiple(in, array);
}
// cnt == 0, which means the outbound buffer contained empty buffers only.
in.removeBytes(0);
return 0;
} | Attempt to write multiple {@link ByteBuf} objects.
@param in the collection which contains objects to write.
@return The value that should be decremented from the write quantum which starts at
{@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
<ul>
<li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
is encountered</li>
<li>1 - if a single call to write data was made to the OS</li>
<li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
no data was accepted</li>
</ul>
@throws Exception If an I/O error occurs. |
private void setUpgradeRequestHeaders(ChannelHandlerContext ctx, HttpRequest request) {
// Set the UPGRADE header on the request.
request.headers().set(HttpHeaderNames.UPGRADE, upgradeCodec.protocol());
// Add all protocol-specific headers to the request.
Set<CharSequence> connectionParts = new LinkedHashSet<CharSequence>(2);
connectionParts.addAll(upgradeCodec.setUpgradeHeaders(ctx, request));
// Set the CONNECTION header from the set of all protocol-specific headers that were added.
StringBuilder builder = new StringBuilder();
for (CharSequence part : connectionParts) {
builder.append(part);
builder.append(',');
}
builder.append(HttpHeaderValues.UPGRADE);
request.headers().add(HttpHeaderNames.CONNECTION, builder.toString());
} | Adds all upgrade request headers necessary for an upgrade to the supported protocols. |
private void sendNotModified(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, NOT_MODIFIED);
setDateHeader(response);
this.sendAndCleanupConnection(ctx, response);
} | When file timestamp is the same as what the browser is sending up, send a "304 Not Modified"
@param ctx
Context |
private void sendAndCleanupConnection(ChannelHandlerContext ctx, FullHttpResponse response) {
final FullHttpRequest request = this.request;
final boolean keepAlive = HttpUtil.isKeepAlive(request);
HttpUtil.setContentLength(response, response.content().readableBytes());
if (!keepAlive) {
// We're going to close the connection as soon as the response is sent,
// so we should also make it clear for the client.
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
} else if (request.protocolVersion().equals(HTTP_1_0)) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
ChannelFuture flushPromise = ctx.writeAndFlush(response);
if (!keepAlive) {
// Close the connection as soon as the response is sent.
flushPromise.addListener(ChannelFutureListener.CLOSE);
}
} | If Keep-Alive is disabled, attaches "Connection: close" header to the response
and closes the connection after the response being sent. |
private static void setContentTypeHeader(HttpResponse response, File file) {
MimetypesFileTypeMap mimeTypesMap = new MimetypesFileTypeMap();
response.headers().set(HttpHeaderNames.CONTENT_TYPE, mimeTypesMap.getContentType(file.getPath()));
} | Sets the content type header for the HTTP Response
@param response
HTTP response
@param file
file to extract content type |
protected EmbeddedChannel newContentCompressor(ChannelHandlerContext ctx, CharSequence contentEncoding)
throws Http2Exception {
if (GZIP.contentEqualsIgnoreCase(contentEncoding) || X_GZIP.contentEqualsIgnoreCase(contentEncoding)) {
return newCompressionChannel(ctx, ZlibWrapper.GZIP);
}
if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) {
return newCompressionChannel(ctx, ZlibWrapper.ZLIB);
}
// 'identity' or unsupported
return null;
} | Returns a new {@link EmbeddedChannel} that encodes the HTTP2 message content encoded in the specified
{@code contentEncoding}.
@param ctx the context.
@param contentEncoding the value of the {@code content-encoding} header
@return a new {@link ByteToMessageDecoder} if the specified encoding is supported. {@code null} otherwise
(alternatively, you can throw a {@link Http2Exception} to block unknown encoding).
@throws Http2Exception If the specified encoding is not not supported and warrants an exception |
private EmbeddedChannel newCompressionChannel(final ChannelHandlerContext ctx, ZlibWrapper wrapper) {
return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(),
ctx.channel().config(), ZlibCodecFactory.newZlibEncoder(wrapper, compressionLevel, windowBits,
memLevel));
} | Generate a new instance of an {@link EmbeddedChannel} capable of compressing data
@param ctx the context.
@param wrapper Defines what type of encoder should be used |
private EmbeddedChannel newCompressor(ChannelHandlerContext ctx, Http2Headers headers, boolean endOfStream)
throws Http2Exception {
if (endOfStream) {
return null;
}
CharSequence encoding = headers.get(CONTENT_ENCODING);
if (encoding == null) {
encoding = IDENTITY;
}
final EmbeddedChannel compressor = newContentCompressor(ctx, encoding);
if (compressor != null) {
CharSequence targetContentEncoding = getTargetContentEncoding(encoding);
if (IDENTITY.contentEqualsIgnoreCase(targetContentEncoding)) {
headers.remove(CONTENT_ENCODING);
} else {
headers.set(CONTENT_ENCODING, targetContentEncoding);
}
// The content length will be for the decompressed data. Since we will compress the data
// this content-length will not be correct. Instead of queuing messages or delaying sending
// header frames...just remove the content-length header
headers.remove(CONTENT_LENGTH);
}
return compressor;
} | Checks if a new compressor object is needed for the stream identified by {@code streamId}. This method will
modify the {@code content-encoding} header contained in {@code headers}.
@param ctx the context.
@param headers Object representing headers which are to be written
@param endOfStream Indicates if the stream has ended
@return The channel used to compress data.
@throws Http2Exception if any problems occur during initialization. |
private void bindCompressorToStream(EmbeddedChannel compressor, int streamId) {
if (compressor != null) {
Http2Stream stream = connection().stream(streamId);
if (stream != null) {
stream.setProperty(propertyKey, compressor);
}
}
} | Called after the super class has written the headers and created any associated stream objects.
@param compressor The compressor associated with the stream identified by {@code streamId}.
@param streamId The stream id for which the headers were written. |
void cleanup(Http2Stream stream, EmbeddedChannel compressor) {
if (compressor.finish()) {
for (;;) {
final ByteBuf buf = compressor.readOutbound();
if (buf == null) {
break;
}
buf.release();
}
}
stream.removeProperty(propertyKey);
} | Release remaining content from {@link EmbeddedChannel} and remove the compressor from the {@link Http2Stream}.
@param stream The stream for which {@code compressor} is the compressor for
@param compressor The compressor for {@code stream} |
private static ByteBuf nextReadableBuf(EmbeddedChannel compressor) {
for (;;) {
final ByteBuf buf = compressor.readOutbound();
if (buf == null) {
return null;
}
if (!buf.isReadable()) {
buf.release();
continue;
}
return buf;
}
} | Read the next compressed {@link ByteBuf} from the {@link EmbeddedChannel} or {@code null} if one does not exist.
@param compressor The channel to read from
@return The next decoded {@link ByteBuf} from the {@link EmbeddedChannel} or {@code null} if one does not exist |
public static void loadLibrary(String libName, boolean absolute) {
if (absolute) {
System.load(libName);
} else {
System.loadLibrary(libName);
}
} | Delegate the calling to {@link System#load(String)} or {@link System#loadLibrary(String)}.
@param libName - The native library path or name
@param absolute - Whether the native library will be loaded by path or by name |
static void writeRawVarint32(ByteBuf out, int value) {
while (true) {
if ((value & ~0x7F) == 0) {
out.writeByte(value);
return;
} else {
out.writeByte((value & 0x7F) | 0x80);
value >>>= 7;
}
}
} | Writes protobuf varint32 to (@link ByteBuf).
@param out to be written to
@param value to be written |
protected ByteBuf allocateBuffer(
ChannelHandlerContext ctx,
@SuppressWarnings("unused") AddressedEnvelope<DnsResponse, InetSocketAddress> msg) throws Exception {
return ctx.alloc().ioBuffer(1024);
} | Allocate a {@link ByteBuf} which will be used for constructing a datagram packet.
Sub-classes may override this method to return a {@link ByteBuf} with a perfect matching initial capacity. |
private static void encodeHeader(DnsResponse response, ByteBuf buf) {
buf.writeShort(response.id());
int flags = 32768;
flags |= (response.opCode().byteValue() & 0xFF) << 11;
if (response.isAuthoritativeAnswer()) {
flags |= 1 << 10;
}
if (response.isTruncated()) {
flags |= 1 << 9;
}
if (response.isRecursionDesired()) {
flags |= 1 << 8;
}
if (response.isRecursionAvailable()) {
flags |= 1 << 7;
}
flags |= response.z() << 4;
flags |= response.code().intValue();
buf.writeShort(flags);
buf.writeShort(response.count(DnsSection.QUESTION));
buf.writeShort(response.count(DnsSection.ANSWER));
buf.writeShort(response.count(DnsSection.AUTHORITY));
buf.writeShort(response.count(DnsSection.ADDITIONAL));
} | Encodes the header that is always 12 bytes long.
@param response the response header being encoded
@param buf the buffer the encoded data should be written to |
protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
final int eol = findEndOfLine(buffer);
if (!discarding) {
if (eol >= 0) {
final ByteBuf frame;
final int length = eol - buffer.readerIndex();
final int delimLength = buffer.getByte(eol) == '\r'? 2 : 1;
if (length > maxLength) {
buffer.readerIndex(eol + delimLength);
fail(ctx, length);
return null;
}
if (stripDelimiter) {
frame = buffer.readRetainedSlice(length);
buffer.skipBytes(delimLength);
} else {
frame = buffer.readRetainedSlice(length + delimLength);
}
return frame;
} else {
final int length = buffer.readableBytes();
if (length > maxLength) {
discardedBytes = length;
buffer.readerIndex(buffer.writerIndex());
discarding = true;
offset = 0;
if (failFast) {
fail(ctx, "over " + discardedBytes);
}
}
return null;
}
} else {
if (eol >= 0) {
final int length = discardedBytes + eol - buffer.readerIndex();
final int delimLength = buffer.getByte(eol) == '\r'? 2 : 1;
buffer.readerIndex(eol + delimLength);
discardedBytes = 0;
discarding = false;
if (!failFast) {
fail(ctx, length);
}
} else {
discardedBytes += buffer.readableBytes();
buffer.readerIndex(buffer.writerIndex());
// We skip everything in the buffer, we need to set the offset to 0 again.
offset = 0;
}
return null;
}
} | Create a frame out of the {@link ByteBuf} and return it.
@param ctx the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
@param buffer the {@link ByteBuf} from which to read data
@return frame the {@link ByteBuf} which represent the frame or {@code null} if no frame could
be created. |
private int findEndOfLine(final ByteBuf buffer) {
int totalLength = buffer.readableBytes();
int i = buffer.forEachByte(buffer.readerIndex() + offset, totalLength - offset, ByteProcessor.FIND_LF);
if (i >= 0) {
offset = 0;
if (i > 0 && buffer.getByte(i - 1) == '\r') {
i--;
}
} else {
offset = totalLength;
}
return i;
} | Returns the index in the buffer of the end of line found.
Returns -1 if no end of line was found in the buffer. |
public boolean isSharable() {
/**
* Cache the result of {@link Sharable} annotation detection to workaround a condition. We use a
* {@link ThreadLocal} and {@link WeakHashMap} to eliminate the volatile write/reads. Using different
* {@link WeakHashMap} instances per {@link Thread} is good enough for us and the number of
* {@link Thread}s are quite limited anyway.
*
* See <a href="https://github.com/netty/netty/issues/2289">#2289</a>.
*/
Class<?> clazz = getClass();
Map<Class<?>, Boolean> cache = InternalThreadLocalMap.get().handlerSharableCache();
Boolean sharable = cache.get(clazz);
if (sharable == null) {
sharable = clazz.isAnnotationPresent(Sharable.class);
cache.put(clazz, sharable);
}
return sharable;
} | Return {@code true} if the implementation is {@link Sharable} and so can be added
to different {@link ChannelPipeline}s. |
private static MqttFixedHeader decodeFixedHeader(ByteBuf buffer) {
short b1 = buffer.readUnsignedByte();
MqttMessageType messageType = MqttMessageType.valueOf(b1 >> 4);
boolean dupFlag = (b1 & 0x08) == 0x08;
int qosLevel = (b1 & 0x06) >> 1;
boolean retain = (b1 & 0x01) != 0;
int remainingLength = 0;
int multiplier = 1;
short digit;
int loops = 0;
do {
digit = buffer.readUnsignedByte();
remainingLength += (digit & 127) * multiplier;
multiplier *= 128;
loops++;
} while ((digit & 128) != 0 && loops < 4);
// MQTT protocol limits Remaining Length to 4 bytes
if (loops == 4 && (digit & 128) != 0) {
throw new DecoderException("remaining length exceeds 4 digits (" + messageType + ')');
}
MqttFixedHeader decodedFixedHeader =
new MqttFixedHeader(messageType, dupFlag, MqttQoS.valueOf(qosLevel), retain, remainingLength);
return validateFixedHeader(resetUnusedFields(decodedFixedHeader));
} | Decodes the fixed header. It's one byte for the flags and then variable bytes for the remaining length.
@param buffer the buffer to decode from
@return the fixed header |
private static Result<?> decodeVariableHeader(ByteBuf buffer, MqttFixedHeader mqttFixedHeader) {
switch (mqttFixedHeader.messageType()) {
case CONNECT:
return decodeConnectionVariableHeader(buffer);
case CONNACK:
return decodeConnAckVariableHeader(buffer);
case SUBSCRIBE:
case UNSUBSCRIBE:
case SUBACK:
case UNSUBACK:
case PUBACK:
case PUBREC:
case PUBCOMP:
case PUBREL:
return decodeMessageIdVariableHeader(buffer);
case PUBLISH:
return decodePublishVariableHeader(buffer, mqttFixedHeader);
case PINGREQ:
case PINGRESP:
case DISCONNECT:
// Empty variable header
return new Result<Object>(null, 0);
}
return new Result<Object>(null, 0); //should never reach here
} | Decodes the variable header (if any)
@param buffer the buffer to decode from
@param mqttFixedHeader MqttFixedHeader of the same message
@return the variable header |
private static Result<?> decodePayload(
ByteBuf buffer,
MqttMessageType messageType,
int bytesRemainingInVariablePart,
Object variableHeader) {
switch (messageType) {
case CONNECT:
return decodeConnectionPayload(buffer, (MqttConnectVariableHeader) variableHeader);
case SUBSCRIBE:
return decodeSubscribePayload(buffer, bytesRemainingInVariablePart);
case SUBACK:
return decodeSubackPayload(buffer, bytesRemainingInVariablePart);
case UNSUBSCRIBE:
return decodeUnsubscribePayload(buffer, bytesRemainingInVariablePart);
case PUBLISH:
return decodePublishPayload(buffer, bytesRemainingInVariablePart);
default:
// unknown payload , no byte consumed
return new Result<Object>(null, 0);
}
} | Decodes the payload.
@param buffer the buffer to decode from
@param messageType type of the message being decoded
@param bytesRemainingInVariablePart bytes remaining
@param variableHeader variable header of the same message
@return the payload |
public final void add(ByteBuf buf, ChannelFutureListener listener) {
// buffers are added before promises so that we naturally 'consume' the entire buffer during removal
// before we complete it's promise.
bufAndListenerPairs.add(buf);
if (listener != null) {
bufAndListenerPairs.add(listener);
}
incrementReadableBytes(buf.readableBytes());
} | Add a buffer to the end of the queue and associate a listener with it that should be completed when
all the buffers bytes have been consumed from the queue and written.
@param buf to add to the tail of the queue
@param listener to notify when all the bytes have been consumed and written, can be {@code null}. |
public final ByteBuf removeFirst(ChannelPromise aggregatePromise) {
Object entry = bufAndListenerPairs.poll();
if (entry == null) {
return null;
}
assert entry instanceof ByteBuf;
ByteBuf result = (ByteBuf) entry;
decrementReadableBytes(result.readableBytes());
entry = bufAndListenerPairs.peek();
if (entry instanceof ChannelFutureListener) {
aggregatePromise.addListener((ChannelFutureListener) entry);
bufAndListenerPairs.poll();
}
return result;
} | Remove the first {@link ByteBuf} from the queue.
@param aggregatePromise used to aggregate the promises and listeners for the returned buffer.
@return the first {@link ByteBuf} from the queue. |
public final ByteBuf remove(ByteBufAllocator alloc, int bytes, ChannelPromise aggregatePromise) {
checkPositiveOrZero(bytes, "bytes");
checkNotNull(aggregatePromise, "aggregatePromise");
// Use isEmpty rather than readableBytes==0 as we may have a promise associated with an empty buffer.
if (bufAndListenerPairs.isEmpty()) {
return removeEmptyValue();
}
bytes = Math.min(bytes, readableBytes);
ByteBuf toReturn = null;
ByteBuf entryBuffer = null;
int originalBytes = bytes;
try {
for (;;) {
Object entry = bufAndListenerPairs.poll();
if (entry == null) {
break;
}
if (entry instanceof ChannelFutureListener) {
aggregatePromise.addListener((ChannelFutureListener) entry);
continue;
}
entryBuffer = (ByteBuf) entry;
if (entryBuffer.readableBytes() > bytes) {
// Add the buffer back to the queue as we can't consume all of it.
bufAndListenerPairs.addFirst(entryBuffer);
if (bytes > 0) {
// Take a slice of what we can consume and retain it.
entryBuffer = entryBuffer.readRetainedSlice(bytes);
toReturn = toReturn == null ? composeFirst(alloc, entryBuffer)
: compose(alloc, toReturn, entryBuffer);
bytes = 0;
}
break;
} else {
bytes -= entryBuffer.readableBytes();
toReturn = toReturn == null ? composeFirst(alloc, entryBuffer)
: compose(alloc, toReturn, entryBuffer);
}
entryBuffer = null;
}
} catch (Throwable cause) {
safeRelease(entryBuffer);
safeRelease(toReturn);
aggregatePromise.setFailure(cause);
throwException(cause);
}
decrementReadableBytes(originalBytes - bytes);
return toReturn;
} | Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are
fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise}
completes.
@param alloc The allocator used if a new {@link ByteBuf} is generated during the aggregation process.
@param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater
than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned.
@param aggregatePromise used to aggregate the promises and listeners for the constituent buffers.
@return a {@link ByteBuf} composed of the enqueued buffers. |
public final void copyTo(AbstractCoalescingBufferQueue dest) {
dest.bufAndListenerPairs.addAll(bufAndListenerPairs);
dest.incrementReadableBytes(readableBytes);
} | Copy all pending entries in this queue into the destination queue.
@param dest to copy pending buffers to. |
public final void writeAndRemoveAll(ChannelHandlerContext ctx) {
decrementReadableBytes(readableBytes);
Throwable pending = null;
ByteBuf previousBuf = null;
for (;;) {
Object entry = bufAndListenerPairs.poll();
try {
if (entry == null) {
if (previousBuf != null) {
ctx.write(previousBuf, ctx.voidPromise());
}
break;
}
if (entry instanceof ByteBuf) {
if (previousBuf != null) {
ctx.write(previousBuf, ctx.voidPromise());
}
previousBuf = (ByteBuf) entry;
} else if (entry instanceof ChannelPromise) {
ctx.write(previousBuf, (ChannelPromise) entry);
previousBuf = null;
} else {
ctx.write(previousBuf).addListener((ChannelFutureListener) entry);
previousBuf = null;
}
} catch (Throwable t) {
if (pending == null) {
pending = t;
} else {
logger.info("Throwable being suppressed because Throwable {} is already pending", pending, t);
}
}
}
if (pending != null) {
throw new IllegalStateException(pending);
}
} | Writes all remaining elements in this queue.
@param ctx The context to write all elements to. |
protected final ByteBuf composeIntoComposite(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf next) {
// Create a composite buffer to accumulate this pair and potentially all the buffers
// in the queue. Using +2 as we have already dequeued current and next.
CompositeByteBuf composite = alloc.compositeBuffer(size() + 2);
try {
composite.addComponent(true, cumulation);
composite.addComponent(true, next);
} catch (Throwable cause) {
composite.release();
safeRelease(next);
throwException(cause);
}
return composite;
} | Compose {@code cumulation} and {@code next} into a new {@link CompositeByteBuf}. |
protected final ByteBuf copyAndCompose(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf next) {
ByteBuf newCumulation = alloc.ioBuffer(cumulation.readableBytes() + next.readableBytes());
try {
newCumulation.writeBytes(cumulation).writeBytes(next);
} catch (Throwable cause) {
newCumulation.release();
safeRelease(next);
throwException(cause);
}
cumulation.release();
next.release();
return newCumulation;
} | Compose {@code cumulation} and {@code next} into a new {@link ByteBufAllocator#ioBuffer()}.
@param alloc The allocator to use to allocate the new buffer.
@param cumulation The current cumulation.
@param next The next buffer.
@return The result of {@code cumulation + next}. |
@Override
protected void verify(FullHttpResponse response) {
final HttpResponseStatus status = HttpResponseStatus.SWITCHING_PROTOCOLS;
final HttpHeaders headers = response.headers();
if (!response.status().equals(status)) {
throw new WebSocketHandshakeException("Invalid handshake response getStatus: " + response.status());
}
CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE);
if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) {
throw new WebSocketHandshakeException("Invalid handshake response upgrade: " + upgrade);
}
if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) {
throw new WebSocketHandshakeException("Invalid handshake response connection: "
+ headers.get(HttpHeaderNames.CONNECTION));
}
CharSequence accept = headers.get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT);
if (accept == null || !accept.equals(expectedChallengeResponseString)) {
throw new WebSocketHandshakeException(String.format(
"Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString));
}
} | <p>
Process server response:
</p>
<pre>
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
Sec-WebSocket-Protocol: chat
</pre>
@param response
HTTP response returned from the server for the request sent by beginOpeningHandshake00().
@throws WebSocketHandshakeException |
public static HostsFileEntries parseSilently(Charset... charsets) {
File hostsFile = locateHostsFile();
try {
return parse(hostsFile, charsets);
} catch (IOException e) {
if (logger.isWarnEnabled()) {
logger.warn("Failed to load and parse hosts file at " + hostsFile.getPath(), e);
}
return HostsFileEntries.EMPTY;
}
} | Parse hosts file at standard OS location using the given {@link Charset}s one after each other until
we were able to parse something or none is left.
@param charsets the {@link Charset}s to try as file encodings when parsing.
@return a {@link HostsFileEntries} |
public static HostsFileEntries parse(File file, Charset... charsets) throws IOException {
checkNotNull(file, "file");
checkNotNull(charsets, "charsets");
if (file.exists() && file.isFile()) {
for (Charset charset: charsets) {
HostsFileEntries entries = parse(new BufferedReader(new InputStreamReader(
new FileInputStream(file), charset)));
if (entries != HostsFileEntries.EMPTY) {
return entries;
}
}
}
return HostsFileEntries.EMPTY;
} | Parse a hosts file.
@param file the file to be parsed
@param charsets the {@link Charset}s to try as file encodings when parsing.
@return a {@link HostsFileEntries}
@throws IOException file could not be read |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.