code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
private void lsUpdateGroup(final int isa, final int first, final int last) {
final int[] SA = this.SA;
int a, b;
int t;
for (a = first; a < last; ++a) {
if (0 <= SA[a]) {
b = a;
do {
SA[isa + SA[a]] = a;
} while (++a < last && 0 <= SA[a]);
SA[b] = b - a;
if (last <= a) {
break;
}
}
b = a;
do {
SA[a] = ~SA[a];
} while (SA[++a] < 0);
t = a;
do {
SA[isa + SA[b]] = t;
} while (++b <= a);
}
} | /*--------------------------------------------------------------------------- |
public int bwt() {
final int[] SA = this.SA;
final byte[] T = this.T;
final int n = this.n;
final int[] bucketA = new int[BUCKET_A_SIZE];
final int[] bucketB = new int[BUCKET_B_SIZE];
if (n == 0) {
return 0;
}
if (n == 1) {
SA[0] = T[0];
return 0;
}
int m = sortTypeBstar(bucketA, bucketB);
if (0 < m) {
return constructBWT(bucketA, bucketB);
}
return 0;
} | Performs a Burrows Wheeler Transform on the input array.
@return the index of the first character of the input array within the output array |
public CompositeByteBuf addComponents(boolean increaseWriterIndex, ByteBuf... buffers) {
checkNotNull(buffers, "buffers");
addComponents0(increaseWriterIndex, componentCount, buffers, 0);
consolidateIfNeeded();
return this;
} | Add the given {@link ByteBuf}s and increase the {@code writerIndex} if {@code increaseWriterIndex} is
{@code true}.
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this
{@link CompositeByteBuf}.
@param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()}
ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. |
public CompositeByteBuf addComponents(boolean increaseWriterIndex, Iterable<ByteBuf> buffers) {
return addComponents(increaseWriterIndex, componentCount, buffers);
} | Add the given {@link ByteBuf}s and increase the {@code writerIndex} if {@code increaseWriterIndex} is
{@code true}.
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this
{@link CompositeByteBuf}.
@param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()}
ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. |
public CompositeByteBuf addComponent(boolean increaseWriterIndex, int cIndex, ByteBuf buffer) {
checkNotNull(buffer, "buffer");
addComponent0(increaseWriterIndex, cIndex, buffer);
consolidateIfNeeded();
return this;
} | Add the given {@link ByteBuf} on the specific index and increase the {@code writerIndex}
if {@code increaseWriterIndex} is {@code true}.
{@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}.
@param cIndex the index on which the {@link ByteBuf} will be added.
@param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this
{@link CompositeByteBuf}. |
private int addComponent0(boolean increaseWriterIndex, int cIndex, ByteBuf buffer) {
assert buffer != null;
boolean wasAdded = false;
try {
checkComponentIndex(cIndex);
// No need to consolidate - just add a component to the list.
Component c = newComponent(buffer, 0);
int readableBytes = c.length();
addComp(cIndex, c);
wasAdded = true;
if (readableBytes > 0 && cIndex < componentCount - 1) {
updateComponentOffsets(cIndex);
} else if (cIndex > 0) {
c.reposition(components[cIndex - 1].endOffset);
}
if (increaseWriterIndex) {
writerIndex += readableBytes;
}
return cIndex;
} finally {
if (!wasAdded) {
buffer.release();
}
}
} | Precondition is that {@code buffer != null}. |
public CompositeByteBuf addComponents(int cIndex, ByteBuf... buffers) {
checkNotNull(buffers, "buffers");
addComponents0(false, cIndex, buffers, 0);
consolidateIfNeeded();
return this;
} | Add the given {@link ByteBuf}s on the specific index
<p>
Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}.
If you need to have it increased you need to handle it by your own.
<p>
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this
{@link CompositeByteBuf}.
@param cIndex the index on which the {@link ByteBuf} will be added. {@link ByteBuf#release()} ownership of all
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transferred to this
{@link CompositeByteBuf}.
@param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()}
ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. |
public CompositeByteBuf addComponents(int cIndex, Iterable<ByteBuf> buffers) {
return addComponents(false, cIndex, buffers);
} | Add the given {@link ByteBuf}s on the specific index
Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}.
If you need to have it increased you need to handle it by your own.
<p>
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this
{@link CompositeByteBuf}.
@param cIndex the index on which the {@link ByteBuf} will be added.
@param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all
{@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transferred to this
{@link CompositeByteBuf}. |
public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, ByteBuf buffer) {
checkNotNull(buffer, "buffer");
final int ridx = buffer.readerIndex();
final int widx = buffer.writerIndex();
if (ridx == widx) {
buffer.release();
return this;
}
if (!(buffer instanceof CompositeByteBuf)) {
addComponent0(increaseWriterIndex, componentCount, buffer);
consolidateIfNeeded();
return this;
}
final CompositeByteBuf from = (CompositeByteBuf) buffer;
from.checkIndex(ridx, widx - ridx);
final Component[] fromComponents = from.components;
final int compCountBefore = componentCount;
final int writerIndexBefore = writerIndex;
try {
for (int cidx = from.toComponentIndex0(ridx), newOffset = capacity();; cidx++) {
final Component component = fromComponents[cidx];
final int compOffset = component.offset;
final int fromIdx = Math.max(ridx, compOffset);
final int toIdx = Math.min(widx, component.endOffset);
final int len = toIdx - fromIdx;
if (len > 0) { // skip empty components
// Note that it's safe to just retain the unwrapped buf here, even in the case
// of PooledSlicedByteBufs - those slices will still be properly released by the
// source Component's free() method.
addComp(componentCount, new Component(
component.buf.retain(), component.idx(fromIdx), newOffset, len, null));
}
if (widx == toIdx) {
break;
}
newOffset += len;
}
if (increaseWriterIndex) {
writerIndex = writerIndexBefore + (widx - ridx);
}
consolidateIfNeeded();
buffer.release();
buffer = null;
return this;
} finally {
if (buffer != null) {
// if we did not succeed, attempt to rollback any components that were added
if (increaseWriterIndex) {
writerIndex = writerIndexBefore;
}
for (int cidx = componentCount - 1; cidx >= compCountBefore; cidx--) {
components[cidx].free();
removeComp(cidx);
}
}
}
} | Add the given {@link ByteBuf} and increase the {@code writerIndex} if {@code increaseWriterIndex} is
{@code true}. If the provided buffer is a {@link CompositeByteBuf} itself, a "shallow copy" of its
readable components will be performed. Thus the actual number of new components added may vary
and in particular will be zero if the provided buffer is not readable.
<p>
{@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}.
@param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this
{@link CompositeByteBuf}. |
private CompositeByteBuf addComponents(boolean increaseIndex, int cIndex, Iterable<ByteBuf> buffers) {
if (buffers instanceof ByteBuf) {
// If buffers also implements ByteBuf (e.g. CompositeByteBuf), it has to go to addComponent(ByteBuf).
return addComponent(increaseIndex, cIndex, (ByteBuf) buffers);
}
checkNotNull(buffers, "buffers");
Iterator<ByteBuf> it = buffers.iterator();
try {
checkComponentIndex(cIndex);
// No need for consolidation
while (it.hasNext()) {
ByteBuf b = it.next();
if (b == null) {
break;
}
cIndex = addComponent0(increaseIndex, cIndex, b) + 1;
cIndex = Math.min(cIndex, componentCount);
}
} finally {
while (it.hasNext()) {
ReferenceCountUtil.safeRelease(it.next());
}
}
consolidateIfNeeded();
return this;
} | but we do in the most common case that the Iterable is a Collection) |
private void consolidateIfNeeded() {
// Consolidate if the number of components will exceed the allowed maximum by the current
// operation.
int size = componentCount;
if (size > maxNumComponents) {
final int capacity = components[size - 1].endOffset;
ByteBuf consolidated = allocBuffer(capacity);
lastAccessed = null;
// We're not using foreach to avoid creating an iterator.
for (int i = 0; i < size; i ++) {
components[i].transferTo(consolidated);
}
components[0] = new Component(consolidated, 0, 0, capacity, consolidated);
removeCompRange(1, size);
}
} | This should only be called as last operation from a method as this may adjust the underlying
array of components and so affect the index etc. |
public CompositeByteBuf removeComponent(int cIndex) {
checkComponentIndex(cIndex);
Component comp = components[cIndex];
if (lastAccessed == comp) {
lastAccessed = null;
}
comp.free();
removeComp(cIndex);
if (comp.length() > 0) {
// Only need to call updateComponentOffsets if the length was > 0
updateComponentOffsets(cIndex);
}
return this;
} | Remove the {@link ByteBuf} from the given index.
@param cIndex the index on from which the {@link ByteBuf} will be remove |
public CompositeByteBuf removeComponents(int cIndex, int numComponents) {
checkComponentIndex(cIndex, numComponents);
if (numComponents == 0) {
return this;
}
int endIndex = cIndex + numComponents;
boolean needsUpdate = false;
for (int i = cIndex; i < endIndex; ++i) {
Component c = components[i];
if (c.length() > 0) {
needsUpdate = true;
}
if (lastAccessed == c) {
lastAccessed = null;
}
c.free();
}
removeCompRange(cIndex, endIndex);
if (needsUpdate) {
// Only need to call updateComponentOffsets if the length was > 0
updateComponentOffsets(cIndex);
}
return this;
} | Remove the number of {@link ByteBuf}s starting from the given index.
@param cIndex the index on which the {@link ByteBuf}s will be started to removed
@param numComponents the number of components to remove |
public List<ByteBuf> decompose(int offset, int length) {
checkIndex(offset, length);
if (length == 0) {
return Collections.emptyList();
}
int componentId = toComponentIndex0(offset);
int bytesToSlice = length;
// The first component
Component firstC = components[componentId];
ByteBuf slice = firstC.buf.slice(firstC.idx(offset), Math.min(firstC.endOffset - offset, bytesToSlice));
bytesToSlice -= slice.readableBytes();
if (bytesToSlice == 0) {
return Collections.singletonList(slice);
}
List<ByteBuf> sliceList = new ArrayList<ByteBuf>(componentCount - componentId);
sliceList.add(slice);
// Add all the slices until there is nothing more left and then return the List.
do {
Component component = components[++componentId];
slice = component.buf.slice(component.idx(component.offset), Math.min(component.length(), bytesToSlice));
bytesToSlice -= slice.readableBytes();
sliceList.add(slice);
} while (bytesToSlice > 0);
return sliceList;
} | Same with {@link #slice(int, int)} except that this method returns a list. |
public CompositeByteBuf consolidate() {
ensureAccessible();
final int numComponents = componentCount;
if (numComponents <= 1) {
return this;
}
final int capacity = components[numComponents - 1].endOffset;
final ByteBuf consolidated = allocBuffer(capacity);
for (int i = 0; i < numComponents; i ++) {
components[i].transferTo(consolidated);
}
lastAccessed = null;
components[0] = new Component(consolidated, 0, 0, capacity, consolidated);
removeCompRange(1, numComponents);
return this;
} | Consolidate the composed {@link ByteBuf}s |
public CompositeByteBuf consolidate(int cIndex, int numComponents) {
checkComponentIndex(cIndex, numComponents);
if (numComponents <= 1) {
return this;
}
final int endCIndex = cIndex + numComponents;
final Component last = components[endCIndex - 1];
final int capacity = last.endOffset - components[cIndex].offset;
final ByteBuf consolidated = allocBuffer(capacity);
for (int i = cIndex; i < endCIndex; i ++) {
components[i].transferTo(consolidated);
}
lastAccessed = null;
removeCompRange(cIndex + 1, endCIndex);
components[cIndex] = new Component(consolidated, 0, 0, capacity, consolidated);
updateComponentOffsets(cIndex);
return this;
} | Consolidate the composed {@link ByteBuf}s
@param cIndex the index on which to start to compose
@param numComponents the number of components to compose |
public CompositeByteBuf discardReadComponents() {
ensureAccessible();
final int readerIndex = readerIndex();
if (readerIndex == 0) {
return this;
}
// Discard everything if (readerIndex = writerIndex = capacity).
int writerIndex = writerIndex();
if (readerIndex == writerIndex && writerIndex == capacity()) {
for (int i = 0, size = componentCount; i < size; i++) {
components[i].free();
}
lastAccessed = null;
clearComps();
setIndex(0, 0);
adjustMarkers(readerIndex);
return this;
}
// Remove read components.
int firstComponentId = 0;
Component c = null;
for (int size = componentCount; firstComponentId < size; firstComponentId++) {
c = components[firstComponentId];
if (c.endOffset > readerIndex) {
break;
}
c.free();
}
if (firstComponentId == 0) {
return this; // Nothing to discard
}
Component la = lastAccessed;
if (la != null && la.endOffset <= readerIndex) {
lastAccessed = null;
}
removeCompRange(0, firstComponentId);
// Update indexes and markers.
int offset = c.offset;
updateComponentOffsets(0);
setIndex(readerIndex - offset, writerIndex - offset);
adjustMarkers(offset);
return this;
} | Discard all {@link ByteBuf}s which are read. |
private void configureSsl(SocketChannel ch) {
ch.pipeline().addLast(sslCtx.newHandler(ch.alloc()), new Http2OrHttpHandler());
} | Configure the pipeline for TLS NPN negotiation to HTTP/2. |
private void configureClearText(SocketChannel ch) {
final ChannelPipeline p = ch.pipeline();
final HttpServerCodec sourceCodec = new HttpServerCodec();
p.addLast(sourceCodec);
p.addLast(new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory));
p.addLast(new SimpleChannelInboundHandler<HttpMessage>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws Exception {
// If this handler is hit then no upgrade has been attempted and the client is just talking HTTP.
System.err.println("Directly talking: " + msg.protocolVersion() + " (no upgrade was attempted)");
ChannelPipeline pipeline = ctx.pipeline();
ChannelHandlerContext thisCtx = pipeline.context(this);
pipeline.addAfter(thisCtx.name(), null, new HelloWorldHttp1Handler("Direct. No Upgrade Attempted."));
pipeline.replace(this, null, new HttpObjectAggregator(maxHttpContentLength));
ctx.fireChannelRead(ReferenceCountUtil.retain(msg));
}
});
p.addLast(new UserEventLogger());
} | Configure the pipeline for a cleartext upgrade from HTTP to HTTP/2.0 |
static PemEncoded toPEM(ByteBufAllocator allocator, boolean useDirect,
X509Certificate... chain) throws CertificateEncodingException {
if (chain == null || chain.length == 0) {
throw new IllegalArgumentException("X.509 certificate chain can't be null or empty");
}
// We can take a shortcut if there is only one certificate and
// it already happens to be a PemEncoded instance. This is the
// ideal case and reason why all this exists. It allows the user
// to pass pre-encoded bytes straight into OpenSSL without having
// to do any of the extra work.
if (chain.length == 1) {
X509Certificate first = chain[0];
if (first instanceof PemEncoded) {
return ((PemEncoded) first).retain();
}
}
boolean success = false;
ByteBuf pem = null;
try {
for (X509Certificate cert : chain) {
if (cert == null) {
throw new IllegalArgumentException("Null element in chain: " + Arrays.toString(chain));
}
if (cert instanceof PemEncoded) {
pem = append(allocator, useDirect, (PemEncoded) cert, chain.length, pem);
} else {
pem = append(allocator, useDirect, cert, chain.length, pem);
}
}
PemValue value = new PemValue(pem, false);
success = true;
return value;
} finally {
// Make sure we never leak the PEM's ByteBuf in the event of an Exception
if (!success && pem != null) {
pem.release();
}
}
} | Creates a {@link PemEncoded} value from the {@link X509Certificate}s. |
private static ByteBuf append(ByteBufAllocator allocator, boolean useDirect,
PemEncoded encoded, int count, ByteBuf pem) {
ByteBuf content = encoded.content();
if (pem == null) {
// see the other append() method
pem = newBuffer(allocator, useDirect, content.readableBytes() * count);
}
pem.writeBytes(content.slice());
return pem;
} | Appends the {@link PemEncoded} value to the {@link ByteBuf} (last arg) and returns it.
If the {@link ByteBuf} didn't exist yet it'll create it using the {@link ByteBufAllocator}. |
private static ByteBuf append(ByteBufAllocator allocator, boolean useDirect,
X509Certificate cert, int count, ByteBuf pem) throws CertificateEncodingException {
ByteBuf encoded = Unpooled.wrappedBuffer(cert.getEncoded());
try {
ByteBuf base64 = SslUtils.toBase64(allocator, encoded);
try {
if (pem == null) {
// We try to approximate the buffer's initial size. The sizes of
// certificates can vary a lot so it'll be off a bit depending
// on the number of elements in the array (count argument).
pem = newBuffer(allocator, useDirect,
(BEGIN_CERT.length + base64.readableBytes() + END_CERT.length) * count);
}
pem.writeBytes(BEGIN_CERT);
pem.writeBytes(base64);
pem.writeBytes(END_CERT);
} finally {
base64.release();
}
} finally {
encoded.release();
}
return pem;
} | Appends the {@link X509Certificate} value to the {@link ByteBuf} (last arg) and returns it.
If the {@link ByteBuf} didn't exist yet it'll create it using the {@link ByteBufAllocator}. |
public AsciiString decode(ByteBuf buf, int length) throws Http2Exception {
processor.reset();
buf.forEachByte(buf.readerIndex(), length, processor);
buf.skipBytes(length);
return processor.end();
} | Decompresses the given Huffman coded string literal.
@param buf the string literal to be decoded
@return the output stream for the compressed data
@throws Http2Exception EOS Decoded |
private boolean move0(PoolChunk<T> chunk) {
if (prevList == null) {
// There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and
// all memory associated with the PoolChunk will be released.
assert chunk.usage() == 0;
return false;
}
return prevList.move(chunk);
} | Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list so it will end up in the right
{@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}. |
void add0(PoolChunk<T> chunk) {
chunk.parent = this;
if (head == null) {
head = chunk;
chunk.prev = null;
chunk.next = null;
} else {
chunk.prev = null;
chunk.next = head;
head.prev = chunk;
head = chunk;
}
} | Adds the {@link PoolChunk} to this {@link PoolChunkList}. |
public static RecyclableArrayList newInstance(int minCapacity) {
RecyclableArrayList ret = RECYCLER.get();
ret.ensureCapacity(minCapacity);
return ret;
} | Create a new empty {@link RecyclableArrayList} instance with the given capacity. |
public static ClassResolver weakCachingResolver(ClassLoader classLoader) {
return new CachingClassResolver(
new ClassLoaderClassResolver(defaultClassLoader(classLoader)),
new WeakReferenceMap<String, Class<?>>(new HashMap<String, Reference<Class<?>>>()));
} | non-aggressive non-concurrent cache
good for non-shared default cache
@param classLoader - specific classLoader to use, or null if you want to revert to default
@return new instance of class resolver |
public static ClassResolver softCachingResolver(ClassLoader classLoader) {
return new CachingClassResolver(
new ClassLoaderClassResolver(defaultClassLoader(classLoader)),
new SoftReferenceMap<String, Class<?>>(new HashMap<String, Reference<Class<?>>>()));
} | aggressive non-concurrent cache
good for non-shared cache, when we're not worried about class unloading
@param classLoader - specific classLoader to use, or null if you want to revert to default
@return new instance of class resolver |
public static ClassResolver weakCachingConcurrentResolver(ClassLoader classLoader) {
return new CachingClassResolver(
new ClassLoaderClassResolver(defaultClassLoader(classLoader)),
new WeakReferenceMap<String, Class<?>>(
PlatformDependent.<String, Reference<Class<?>>>newConcurrentHashMap()));
} | non-aggressive concurrent cache
good for shared cache, when we're worried about class unloading
@param classLoader - specific classLoader to use, or null if you want to revert to default
@return new instance of class resolver |
public static ClassResolver softCachingConcurrentResolver(ClassLoader classLoader) {
return new CachingClassResolver(
new ClassLoaderClassResolver(defaultClassLoader(classLoader)),
new SoftReferenceMap<String, Class<?>>(
PlatformDependent.<String, Reference<Class<?>>>newConcurrentHashMap()));
} | aggressive concurrent cache
good for shared cache, when we're not worried about class unloading
@param classLoader - specific classLoader to use, or null if you want to revert to default
@return new instance of class resolver |
public static boolean isMultipart(HttpRequest request) {
if (request.headers().contains(HttpHeaderNames.CONTENT_TYPE)) {
return getMultipartDataBoundary(request.headers().get(HttpHeaderNames.CONTENT_TYPE)) != null;
} else {
return false;
}
} | Check if the given request is a multipart request
@return True if the request is a Multipart request |
protected static String[] getMultipartDataBoundary(String contentType) {
// Check if Post using "multipart/form-data; boundary=--89421926422648 [; charset=xxx]"
String[] headerContentType = splitHeaderContentType(contentType);
final String multiPartHeader = HttpHeaderValues.MULTIPART_FORM_DATA.toString();
if (headerContentType[0].regionMatches(true, 0, multiPartHeader, 0 , multiPartHeader.length())) {
int mrank;
int crank;
final String boundaryHeader = HttpHeaderValues.BOUNDARY.toString();
if (headerContentType[1].regionMatches(true, 0, boundaryHeader, 0, boundaryHeader.length())) {
mrank = 1;
crank = 2;
} else if (headerContentType[2].regionMatches(true, 0, boundaryHeader, 0, boundaryHeader.length())) {
mrank = 2;
crank = 1;
} else {
return null;
}
String boundary = StringUtil.substringAfter(headerContentType[mrank], '=');
if (boundary == null) {
throw new ErrorDataDecoderException("Needs a boundary value");
}
if (boundary.charAt(0) == '"') {
String bound = boundary.trim();
int index = bound.length() - 1;
if (bound.charAt(index) == '"') {
boundary = bound.substring(1, index);
}
}
final String charsetHeader = HttpHeaderValues.CHARSET.toString();
if (headerContentType[crank].regionMatches(true, 0, charsetHeader, 0, charsetHeader.length())) {
String charset = StringUtil.substringAfter(headerContentType[crank], '=');
if (charset != null) {
return new String[] {"--" + boundary, charset};
}
}
return new String[] {"--" + boundary};
}
return null;
} | Check from the request ContentType if this request is a Multipart request.
@return an array of String if multipartDataBoundary exists with the multipartDataBoundary
as first element, charset if any as second (missing if not set), else null |
private static String[] splitHeaderContentType(String sb) {
int aStart;
int aEnd;
int bStart;
int bEnd;
int cStart;
int cEnd;
aStart = HttpPostBodyUtil.findNonWhitespace(sb, 0);
aEnd = sb.indexOf(';');
if (aEnd == -1) {
return new String[] { sb, "", "" };
}
bStart = HttpPostBodyUtil.findNonWhitespace(sb, aEnd + 1);
if (sb.charAt(aEnd - 1) == ' ') {
aEnd--;
}
bEnd = sb.indexOf(';', bStart);
if (bEnd == -1) {
bEnd = HttpPostBodyUtil.findEndOfString(sb);
return new String[] { sb.substring(aStart, aEnd), sb.substring(bStart, bEnd), "" };
}
cStart = HttpPostBodyUtil.findNonWhitespace(sb, bEnd + 1);
if (sb.charAt(bEnd - 1) == ' ') {
bEnd--;
}
cEnd = HttpPostBodyUtil.findEndOfString(sb);
return new String[] { sb.substring(aStart, aEnd), sb.substring(bStart, bEnd), sb.substring(cStart, cEnd) };
} | Split the very first line (Content-Type value) in 3 Strings
@return the array of 3 Strings |
public Iterator<V> valueIterator(@SuppressWarnings("unused") K name) {
List<V> empty = Collections.emptyList();
return empty.iterator();
} | Equivalent to {@link #getAll(Object)} but no intermediate list is generated.
@param name the name of the header to retrieve
@return an {@link Iterator} of header values corresponding to {@code name}. |
private ByteBuf buffer(int i) {
ByteBuf b = buffers[i];
return b instanceof Component ? ((Component) b).buf : b;
} | Return the {@link ByteBuf} stored at the given index of the array. |
@Deprecated
public static SslContext newServerContext(File certChainFile, File keyFile) throws SSLException {
return newServerContext(certChainFile, keyFile, null);
} | Creates a new server-side {@link SslContext}.
@param certChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@return a new server-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newServerContext(
File certChainFile, File keyFile, String keyPassword,
Iterable<String> ciphers, Iterable<String> nextProtocols,
long sessionCacheSize, long sessionTimeout) throws SSLException {
return newServerContext(
null, certChainFile, keyFile, keyPassword,
ciphers, nextProtocols, sessionCacheSize, sessionTimeout);
} | Creates a new server-side {@link SslContext}.
@param certChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}.
{@code null} if it's not password-protected.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param nextProtocols the application layer protocols to accept, in the order of preference.
{@code null} to disable TLS NPN/ALPN extension.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new server-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newServerContext(
SslProvider provider, File certChainFile, File keyFile, String keyPassword) throws SSLException {
return newServerContext(provider, certChainFile, keyFile, keyPassword, null, IdentityCipherSuiteFilter.INSTANCE,
null, 0, 0);
} | Creates a new server-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param certChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}.
{@code null} if it's not password-protected.
@return a new server-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newServerContext(
SslProvider provider,
File certChainFile, File keyFile, String keyPassword,
Iterable<String> ciphers, Iterable<String> nextProtocols,
long sessionCacheSize, long sessionTimeout) throws SSLException {
return newServerContext(provider, certChainFile, keyFile, keyPassword,
ciphers, IdentityCipherSuiteFilter.INSTANCE,
toApplicationProtocolConfig(nextProtocols), sessionCacheSize, sessionTimeout);
} | Creates a new server-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param certChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}.
{@code null} if it's not password-protected.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param nextProtocols the application layer protocols to accept, in the order of preference.
{@code null} to disable TLS NPN/ALPN extension.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new server-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newServerContext(SslProvider provider,
File certChainFile, File keyFile, String keyPassword,
Iterable<String> ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn,
long sessionCacheSize, long sessionTimeout) throws SSLException {
return newServerContext(provider, null, null, certChainFile, keyFile, keyPassword, null,
ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout);
} | Creates a new server-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param certChainFile an X.509 certificate chain file in PEM format
@param keyFile a PKCS#8 private key file in PEM format
@param keyPassword the password of the {@code keyFile}.
{@code null} if it's not password-protected.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param cipherFilter a filter to apply over the supplied list of ciphers
Only required if {@code provider} is {@link SslProvider#JDK}
@param apn Provides a means to configure parameters related to application protocol negotiation.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new server-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(TrustManagerFactory trustManagerFactory) throws SSLException {
return newClientContext(null, null, trustManagerFactory);
} | Creates a new client-side {@link SslContext}.
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
File certChainFile, TrustManagerFactory trustManagerFactory) throws SSLException {
return newClientContext(null, certChainFile, trustManagerFactory);
} | Creates a new client-side {@link SslContext}.
@param certChainFile an X.509 certificate chain file in PEM format.
{@code null} to use the system default
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
File certChainFile, TrustManagerFactory trustManagerFactory,
Iterable<String> ciphers, Iterable<String> nextProtocols,
long sessionCacheSize, long sessionTimeout) throws SSLException {
return newClientContext(
null, certChainFile, trustManagerFactory,
ciphers, nextProtocols, sessionCacheSize, sessionTimeout);
} | Creates a new client-side {@link SslContext}.
@param certChainFile an X.509 certificate chain file in PEM format.
{@code null} to use the system default
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param nextProtocols the application layer protocols to accept, in the order of preference.
{@code null} to disable TLS NPN/ALPN extension.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
File certChainFile, TrustManagerFactory trustManagerFactory,
Iterable<String> ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn,
long sessionCacheSize, long sessionTimeout) throws SSLException {
return newClientContext(
null, certChainFile, trustManagerFactory,
ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout);
} | Creates a new client-side {@link SslContext}.
@param certChainFile an X.509 certificate chain file in PEM format.
{@code null} to use the system default
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param cipherFilter a filter to apply over the supplied list of ciphers
@param apn Provides a means to configure parameters related to application protocol negotiation.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(SslProvider provider) throws SSLException {
return newClientContext(provider, null, null);
} | Creates a new client-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(SslProvider provider, File certChainFile) throws SSLException {
return newClientContext(provider, certChainFile, null);
} | Creates a new client-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param certChainFile an X.509 certificate chain file in PEM format.
{@code null} to use the system default
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
SslProvider provider, TrustManagerFactory trustManagerFactory) throws SSLException {
return newClientContext(provider, null, trustManagerFactory);
} | Creates a new client-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
SslProvider provider, File certChainFile, TrustManagerFactory trustManagerFactory) throws SSLException {
return newClientContext(provider, certChainFile, trustManagerFactory, null, IdentityCipherSuiteFilter.INSTANCE,
null, 0, 0);
} | Creates a new client-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param certChainFile an X.509 certificate chain file in PEM format.
{@code null} to use the system default
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
@Deprecated
public static SslContext newClientContext(
SslProvider provider,
File trustCertCollectionFile, TrustManagerFactory trustManagerFactory,
File keyCertChainFile, File keyFile, String keyPassword,
KeyManagerFactory keyManagerFactory,
Iterable<String> ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn,
long sessionCacheSize, long sessionTimeout) throws SSLException {
try {
return newClientContextInternal(provider, null,
toX509Certificates(trustCertCollectionFile), trustManagerFactory,
toX509Certificates(keyCertChainFile), toPrivateKey(keyFile, keyPassword),
keyPassword, keyManagerFactory, ciphers, cipherFilter,
apn, null, sessionCacheSize, sessionTimeout, false);
} catch (Exception e) {
if (e instanceof SSLException) {
throw (SSLException) e;
}
throw new SSLException("failed to initialize the client-side SSL context", e);
}
} | Creates a new client-side {@link SslContext}.
@param provider the {@link SslContext} implementation to use.
{@code null} to use the current default one.
@param trustCertCollectionFile an X.509 certificate collection file in PEM format.
{@code null} to use the system default
@param trustManagerFactory the {@link TrustManagerFactory} that provides the {@link TrustManager}s
that verifies the certificates sent from servers.
{@code null} to use the default or the results of parsing
{@code trustCertCollectionFile}.
This parameter is ignored if {@code provider} is not {@link SslProvider#JDK}.
@param keyCertChainFile an X.509 certificate chain file in PEM format.
This provides the public key for mutual authentication.
{@code null} to use the system default
@param keyFile a PKCS#8 private key file in PEM format.
This provides the private key for mutual authentication.
{@code null} for no mutual authentication.
@param keyPassword the password of the {@code keyFile}.
{@code null} if it's not password-protected.
Ignored if {@code keyFile} is {@code null}.
@param keyManagerFactory the {@link KeyManagerFactory} that provides the {@link KeyManager}s
that is used to encrypt data being sent to servers.
{@code null} to use the default or the results of parsing
{@code keyCertChainFile} and {@code keyFile}.
This parameter is ignored if {@code provider} is not {@link SslProvider#JDK}.
@param ciphers the cipher suites to enable, in the order of preference.
{@code null} to use the default cipher suites.
@param cipherFilter a filter to apply over the supplied list of ciphers
@param apn Provides a means to configure parameters related to application protocol negotiation.
@param sessionCacheSize the size of the cache used for storing SSL session objects.
{@code 0} to use the default value.
@param sessionTimeout the timeout for the cached SSL session objects, in seconds.
{@code 0} to use the default value.
@return a new client-side {@link SslContext}
@deprecated Replaced by {@link SslContextBuilder} |
public SslHandler newHandler(ByteBufAllocator alloc, Executor delegatedTaskExecutor) {
return newHandler(alloc, startTls, delegatedTaskExecutor);
} | Creates a new {@link SslHandler}.
<p>If {@link SslProvider#OPENSSL_REFCNT} is used then the returned {@link SslHandler} will release the engine
that is wrapped. If the returned {@link SslHandler} is not inserted into a pipeline then you may leak native
memory!
<p><b>Beware</b>: the underlying generated {@link SSLEngine} won't have
<a href="https://wiki.openssl.org/index.php/Hostname_validation">hostname verification</a> enabled by default.
If you create {@link SslHandler} for the client side and want proper security, we advice that you configure
the {@link SSLEngine} (see {@link javax.net.ssl.SSLParameters#setEndpointIdentificationAlgorithm(String)}):
<pre>
SSLEngine sslEngine = sslHandler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
// only available since Java 7
sslParameters.setEndpointIdentificationAlgorithm("HTTPS");
sslEngine.setSSLParameters(sslParameters);
</pre>
<p>
The underlying {@link SSLEngine} may not follow the restrictions imposed by the
<a href="https://docs.oracle.com/javase/7/docs/api/javax/net/ssl/SSLEngine.html">SSLEngine javadocs</a> which
limits wrap/unwrap to operate on a single SSL/TLS packet.
@param alloc If supported by the SSLEngine then the SSLEngine will use this to allocate ByteBuf objects.
@param delegatedTaskExecutor the {@link Executor} that will be used to execute tasks that are returned by
{@link SSLEngine#getDelegatedTask()}.
@return a new {@link SslHandler} |
protected SslHandler newHandler(ByteBufAllocator alloc, boolean startTls, Executor executor) {
return new SslHandler(newEngine(alloc), startTls, executor);
} | Create a new SslHandler.
@see #newHandler(ByteBufAllocator, String, int, boolean, Executor) |
public final SslHandler newHandler(ByteBufAllocator alloc, String peerHost, int peerPort) {
return newHandler(alloc, peerHost, peerPort, startTls);
} | Creates a new {@link SslHandler}
@see #newHandler(ByteBufAllocator, String, int, Executor) |
static KeyStore buildKeyStore(X509Certificate[] certChain, PrivateKey key, char[] keyPasswordChars)
throws KeyStoreException, NoSuchAlgorithmException,
CertificateException, IOException {
KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
ks.load(null, null);
ks.setKeyEntry(ALIAS, key, keyPasswordChars, certChain);
return ks;
} | Generates a new {@link KeyStore}.
@param certChain a X.509 certificate chain
@param key a PKCS#8 private key
@param keyPasswordChars the password of the {@code keyFile}.
{@code null} if it's not password-protected.
@return generated {@link KeyStore}. |
@Deprecated
protected static TrustManagerFactory buildTrustManagerFactory(
File certChainFile, TrustManagerFactory trustManagerFactory)
throws NoSuchAlgorithmException, CertificateException, KeyStoreException, IOException {
X509Certificate[] x509Certs = toX509Certificates(certChainFile);
return buildTrustManagerFactory(x509Certs, trustManagerFactory);
} | Build a {@link TrustManagerFactory} from a certificate chain file.
@param certChainFile The certificate file to build from.
@param trustManagerFactory The existing {@link TrustManagerFactory} that will be used if not {@code null}.
@return A {@link TrustManagerFactory} which contains the certificates in {@code certChainFile} |
public void verify(PublicKey key, Provider sigProvider)
throws CertificateException, NoSuchAlgorithmException, InvalidKeyException, SignatureException {
unwrap().verify(key, sigProvider);
} | No @Override annotation as it was only introduced in Java8. |
protected String format(ChannelHandlerContext ctx, String eventName) {
String chStr = ctx.channel().toString();
return new StringBuilder(chStr.length() + 1 + eventName.length())
.append(chStr)
.append(' ')
.append(eventName)
.toString();
} | Formats an event and returns the formatted message.
@param eventName the name of the event |
protected String format(ChannelHandlerContext ctx, String eventName, Object arg) {
if (arg instanceof ByteBuf) {
return formatByteBuf(ctx, eventName, (ByteBuf) arg);
} else if (arg instanceof ByteBufHolder) {
return formatByteBufHolder(ctx, eventName, (ByteBufHolder) arg);
} else {
return formatSimple(ctx, eventName, arg);
}
} | Formats an event and returns the formatted message.
@param eventName the name of the event
@param arg the argument of the event |
protected String format(ChannelHandlerContext ctx, String eventName, Object firstArg, Object secondArg) {
if (secondArg == null) {
return formatSimple(ctx, eventName, firstArg);
}
String chStr = ctx.channel().toString();
String arg1Str = String.valueOf(firstArg);
String arg2Str = secondArg.toString();
StringBuilder buf = new StringBuilder(
chStr.length() + 1 + eventName.length() + 2 + arg1Str.length() + 2 + arg2Str.length());
buf.append(chStr).append(' ').append(eventName).append(": ").append(arg1Str).append(", ").append(arg2Str);
return buf.toString();
} | Formats an event and returns the formatted message. This method is currently only used for formatting
{@link ChannelOutboundHandler#connect(ChannelHandlerContext, SocketAddress, SocketAddress, ChannelPromise)}.
@param eventName the name of the event
@param firstArg the first argument of the event
@param secondArg the second argument of the event |
private static String formatByteBuf(ChannelHandlerContext ctx, String eventName, ByteBuf msg) {
String chStr = ctx.channel().toString();
int length = msg.readableBytes();
if (length == 0) {
StringBuilder buf = new StringBuilder(chStr.length() + 1 + eventName.length() + 4);
buf.append(chStr).append(' ').append(eventName).append(": 0B");
return buf.toString();
} else {
int rows = length / 16 + (length % 15 == 0? 0 : 1) + 4;
StringBuilder buf = new StringBuilder(chStr.length() + 1 + eventName.length() + 2 + 10 + 1 + 2 + rows * 80);
buf.append(chStr).append(' ').append(eventName).append(": ").append(length).append('B').append(NEWLINE);
appendPrettyHexDump(buf, msg);
return buf.toString();
}
} | Generates the default log message of the specified event whose argument is a {@link ByteBuf}. |
private static String formatByteBufHolder(ChannelHandlerContext ctx, String eventName, ByteBufHolder msg) {
String chStr = ctx.channel().toString();
String msgStr = msg.toString();
ByteBuf content = msg.content();
int length = content.readableBytes();
if (length == 0) {
StringBuilder buf = new StringBuilder(chStr.length() + 1 + eventName.length() + 2 + msgStr.length() + 4);
buf.append(chStr).append(' ').append(eventName).append(", ").append(msgStr).append(", 0B");
return buf.toString();
} else {
int rows = length / 16 + (length % 15 == 0? 0 : 1) + 4;
StringBuilder buf = new StringBuilder(
chStr.length() + 1 + eventName.length() + 2 + msgStr.length() + 2 + 10 + 1 + 2 + rows * 80);
buf.append(chStr).append(' ').append(eventName).append(": ")
.append(msgStr).append(", ").append(length).append('B').append(NEWLINE);
appendPrettyHexDump(buf, content);
return buf.toString();
}
} | Generates the default log message of the specified event whose argument is a {@link ByteBufHolder}. |
private static String formatSimple(ChannelHandlerContext ctx, String eventName, Object msg) {
String chStr = ctx.channel().toString();
String msgStr = String.valueOf(msg);
StringBuilder buf = new StringBuilder(chStr.length() + 1 + eventName.length() + 2 + msgStr.length());
return buf.append(chStr).append(' ').append(eventName).append(": ").append(msgStr).toString();
} | Generates the default log message of the specified event whose argument is an arbitrary object. |
private static List<Entry<String, String>> formget(
Bootstrap bootstrap, String host, int port, String get, URI uriSimple) throws Exception {
// XXX /formget
// No use of HttpPostRequestEncoder since not a POST
Channel channel = bootstrap.connect(host, port).sync().channel();
// Prepare the HTTP request.
QueryStringEncoder encoder = new QueryStringEncoder(get);
// add Form attribute
encoder.addParam("getform", "GET");
encoder.addParam("info", "first value");
encoder.addParam("secondinfo", "secondvalue ���&");
// not the big one since it is not compatible with GET size
// encoder.addParam("thirdinfo", textArea);
encoder.addParam("thirdinfo", "third value\r\ntest second line\r\n\r\nnew line\r\n");
encoder.addParam("Send", "Send");
URI uriGet = new URI(encoder.toString());
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uriGet.toASCIIString());
HttpHeaders headers = request.headers();
headers.set(HttpHeaderNames.HOST, host);
headers.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
headers.set(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP + "," + HttpHeaderValues.DEFLATE);
headers.set(HttpHeaderNames.ACCEPT_CHARSET, "ISO-8859-1,utf-8;q=0.7,*;q=0.7");
headers.set(HttpHeaderNames.ACCEPT_LANGUAGE, "fr");
headers.set(HttpHeaderNames.REFERER, uriSimple.toString());
headers.set(HttpHeaderNames.USER_AGENT, "Netty Simple Http Client side");
headers.set(HttpHeaderNames.ACCEPT, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8");
//connection will not close but needed
// headers.set("Connection","keep-alive");
// headers.set("Keep-Alive","300");
headers.set(
HttpHeaderNames.COOKIE, ClientCookieEncoder.STRICT.encode(
new DefaultCookie("my-cookie", "foo"),
new DefaultCookie("another-cookie", "bar"))
);
// send request
channel.writeAndFlush(request);
// Wait for the server to close the connection.
channel.closeFuture().sync();
// convert headers to list
return headers.entries();
} | Standard usage of HTTP API in Netty without file Upload (get is not able to achieve File upload
due to limitation on request size).
@return the list of headers that will be used in every example after |
private static List<InterfaceHttpData> formpost(
Bootstrap bootstrap,
String host, int port, URI uriSimple, File file, HttpDataFactory factory,
List<Entry<String, String>> headers) throws Exception {
// XXX /formpost
// Start the connection attempt.
ChannelFuture future = bootstrap.connect(SocketUtils.socketAddress(host, port));
// Wait until the connection attempt succeeds or fails.
Channel channel = future.sync().channel();
// Prepare the HTTP request.
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uriSimple.toASCIIString());
// Use the PostBody encoder
HttpPostRequestEncoder bodyRequestEncoder =
new HttpPostRequestEncoder(factory, request, false); // false => not multipart
// it is legal to add directly header or cookie into the request until finalize
for (Entry<String, String> entry : headers) {
request.headers().set(entry.getKey(), entry.getValue());
}
// add Form attribute
bodyRequestEncoder.addBodyAttribute("getform", "POST");
bodyRequestEncoder.addBodyAttribute("info", "first value");
bodyRequestEncoder.addBodyAttribute("secondinfo", "secondvalue ���&");
bodyRequestEncoder.addBodyAttribute("thirdinfo", textArea);
bodyRequestEncoder.addBodyAttribute("fourthinfo", textAreaLong);
bodyRequestEncoder.addBodyFileUpload("myfile", file, "application/x-zip-compressed", false);
// finalize request
request = bodyRequestEncoder.finalizeRequest();
// Create the bodylist to be reused on the last version with Multipart support
List<InterfaceHttpData> bodylist = bodyRequestEncoder.getBodyListAttributes();
// send request
channel.write(request);
// test if request was chunked and if so, finish the write
if (bodyRequestEncoder.isChunked()) { // could do either request.isChunked()
// either do it through ChunkedWriteHandler
channel.write(bodyRequestEncoder);
}
channel.flush();
// Do not clear here since we will reuse the InterfaceHttpData on the next request
// for the example (limit action on client side). Take this as a broadcast of the same
// request on both Post actions.
//
// On standard program, it is clearly recommended to clean all files after each request
// bodyRequestEncoder.cleanFiles();
// Wait for the server to close the connection.
channel.closeFuture().sync();
return bodylist;
} | Standard post without multipart but already support on Factory (memory management)
@return the list of HttpData object (attribute and file) to be reused on next post |
private static void formpostmultipart(
Bootstrap bootstrap, String host, int port, URI uriFile, HttpDataFactory factory,
Iterable<Entry<String, String>> headers, List<InterfaceHttpData> bodylist) throws Exception {
// XXX /formpostmultipart
// Start the connection attempt.
ChannelFuture future = bootstrap.connect(SocketUtils.socketAddress(host, port));
// Wait until the connection attempt succeeds or fails.
Channel channel = future.sync().channel();
// Prepare the HTTP request.
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uriFile.toASCIIString());
// Use the PostBody encoder
HttpPostRequestEncoder bodyRequestEncoder =
new HttpPostRequestEncoder(factory, request, true); // true => multipart
// it is legal to add directly header or cookie into the request until finalize
for (Entry<String, String> entry : headers) {
request.headers().set(entry.getKey(), entry.getValue());
}
// add Form attribute from previous request in formpost()
bodyRequestEncoder.setBodyHttpDatas(bodylist);
// finalize request
bodyRequestEncoder.finalizeRequest();
// send request
channel.write(request);
// test if request was chunked and if so, finish the write
if (bodyRequestEncoder.isChunked()) {
channel.write(bodyRequestEncoder);
}
channel.flush();
// Now no more use of file representation (and list of HttpData)
bodyRequestEncoder.cleanFiles();
// Wait for the server to close the connection.
channel.closeFuture().sync();
} | Multipart example |
final void clear() {
while (!resolveCache.isEmpty()) {
for (Iterator<Entry<String, Entries>> i = resolveCache.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, Entries> e = i.next();
i.remove();
e.getValue().clearAndCancel();
}
}
} | Remove everything from the cache. |
final boolean clear(String hostname) {
Entries entries = resolveCache.remove(hostname);
return entries != null && entries.clearAndCancel();
} | Clear all entries (if anything exists) for the given hostname and return {@code true} if anything was removed. |
final List<? extends E> get(String hostname) {
Entries entries = resolveCache.get(hostname);
return entries == null ? null : entries.get();
} | Returns all caches entries for the given hostname. |
final void cache(String hostname, E value, int ttl, EventLoop loop) {
Entries entries = resolveCache.get(hostname);
if (entries == null) {
entries = new Entries(hostname);
Entries oldEntries = resolveCache.putIfAbsent(hostname, entries);
if (oldEntries != null) {
entries = oldEntries;
}
}
entries.add(value, ttl, loop);
} | Cache a value for the given hostname that will automatically expire once the TTL is reached. |
public ServerBootstrap group(EventLoopGroup parentGroup, EventLoopGroup childGroup) {
super.group(parentGroup);
if (childGroup == null) {
throw new NullPointerException("childGroup");
}
if (this.childGroup != null) {
throw new IllegalStateException("childGroup set already");
}
this.childGroup = childGroup;
return this;
} | Set the {@link EventLoopGroup} for the parent (acceptor) and the child (client). These
{@link EventLoopGroup}'s are used to handle all the events and IO for {@link ServerChannel} and
{@link Channel}'s. |
public <T> ServerBootstrap childOption(ChannelOption<T> childOption, T value) {
if (childOption == null) {
throw new NullPointerException("childOption");
}
if (value == null) {
synchronized (childOptions) {
childOptions.remove(childOption);
}
} else {
synchronized (childOptions) {
childOptions.put(childOption, value);
}
}
return this;
} | Allow to specify a {@link ChannelOption} which is used for the {@link Channel} instances once they get created
(after the acceptor accepted the {@link Channel}). Use a value of {@code null} to remove a previous set
{@link ChannelOption}. |
public <T> ServerBootstrap childAttr(AttributeKey<T> childKey, T value) {
if (childKey == null) {
throw new NullPointerException("childKey");
}
if (value == null) {
childAttrs.remove(childKey);
} else {
childAttrs.put(childKey, value);
}
return this;
} | Set the specific {@link AttributeKey} with the given value on every child {@link Channel}. If the value is
{@code null} the {@link AttributeKey} is removed |
public ByteBuf remove(int bytes, ChannelPromise aggregatePromise) {
return remove(channel.alloc(), bytes, aggregatePromise);
} | Remove a {@link ByteBuf} from the queue with the specified number of bytes. Any added buffer who's bytes are
fully consumed during removal will have it's promise completed when the passed aggregate {@link ChannelPromise}
completes.
@param bytes the maximum number of readable bytes in the returned {@link ByteBuf}, if {@code bytes} is greater
than {@link #readableBytes} then a buffer of length {@link #readableBytes} is returned.
@param aggregatePromise used to aggregate the promises and listeners for the constituent buffers.
@return a {@link ByteBuf} composed of the enqueued buffers. |
private static long mix64(long z) {
z = (z ^ (z >>> 33)) * 0xff51afd7ed558ccdL;
z = (z ^ (z >>> 33)) * 0xc4ceb9fe1a85ec53L;
return z ^ (z >>> 33);
} | http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/main/java/util/concurrent/ThreadLocalRandom.java |
public int nextInt(int least, int bound) {
if (least >= bound) {
throw new IllegalArgumentException();
}
return nextInt(bound - least) + least;
} | Returns a pseudorandom, uniformly distributed value between the
given least value (inclusive) and bound (exclusive).
@param least the least value returned
@param bound the upper bound (exclusive)
@throws IllegalArgumentException if least greater than or equal
to bound
@return the next value |
public double nextDouble(double least, double bound) {
if (least >= bound) {
throw new IllegalArgumentException();
}
return nextDouble() * (bound - least) + least;
} | Returns a pseudorandom, uniformly distributed value between the
given least value (inclusive) and bound (exclusive).
@param least the least value returned
@param bound the upper bound (exclusive)
@return the next value
@throws IllegalArgumentException if least greater than or equal
to bound |
public boolean add(ByteBuf buf) {
if (count == IOV_MAX) {
// No more room!
return false;
} else if (buf.nioBufferCount() == 1) {
final int len = buf.readableBytes();
if (len == 0) {
return true;
}
if (buf.hasMemoryAddress()) {
return add(buf.memoryAddress(), buf.readerIndex(), len);
} else {
ByteBuffer nioBuffer = buf.internalNioBuffer(buf.readerIndex(), len);
return add(Buffer.memoryAddress(nioBuffer), nioBuffer.position(), len);
}
} else {
ByteBuffer[] buffers = buf.nioBuffers();
for (ByteBuffer nioBuffer : buffers) {
final int len = nioBuffer.remaining();
if (len != 0 &&
(!add(Buffer.memoryAddress(nioBuffer), nioBuffer.position(), len) || count == IOV_MAX)) {
return false;
}
}
return true;
}
} | Add a {@link ByteBuf} to this {@link IovArray}.
@param buf The {@link ByteBuf} to add.
@return {@code true} if the entire {@link ByteBuf} has been added to this {@link IovArray}. Note in the event
that {@link ByteBuf} is a {@link CompositeByteBuf} {@code false} may be returned even if some of the components
have been added. |
public static OpenSslX509KeyManagerFactory newEngineBased(File certificateChain, String password)
throws CertificateException, IOException,
KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
return newEngineBased(SslContext.toX509Certificates(certificateChain), password);
} | Create a new initialized {@link OpenSslX509KeyManagerFactory} which loads its {@link PrivateKey} directly from
an {@code OpenSSL engine} via the
<a href="https://www.openssl.org/docs/man1.1.0/crypto/ENGINE_load_private_key.html">ENGINE_load_private_key</a>
function. |
public static OpenSslX509KeyManagerFactory newEngineBased(X509Certificate[] certificateChain, String password)
throws CertificateException, IOException,
KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
KeyStore store = new OpenSslKeyStore(certificateChain.clone(), false);
store.load(null, null);
OpenSslX509KeyManagerFactory factory = new OpenSslX509KeyManagerFactory();
factory.init(store, password == null ? null : password.toCharArray());
return factory;
} | Create a new initialized {@link OpenSslX509KeyManagerFactory} which loads its {@link PrivateKey} directly from
an {@code OpenSSL engine} via the
<a href="https://www.openssl.org/docs/man1.1.0/crypto/ENGINE_load_private_key.html">ENGINE_load_private_key</a>
function. |
public static OpenSslX509KeyManagerFactory newKeyless(File chain)
throws CertificateException, IOException,
KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
return newKeyless(SslContext.toX509Certificates(chain));
} | See {@link OpenSslX509KeyManagerFactory#newEngineBased(X509Certificate[], String)}. |
public static OpenSslX509KeyManagerFactory newKeyless(X509Certificate... certificateChain)
throws CertificateException, IOException,
KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
KeyStore store = new OpenSslKeyStore(certificateChain.clone(), true);
store.load(null, null);
OpenSslX509KeyManagerFactory factory = new OpenSslX509KeyManagerFactory();
factory.init(store, null);
return factory;
} | Returns a new initialized {@link OpenSslX509KeyManagerFactory} which will provide its private key by using the
{@link OpenSslPrivateKeyMethod}. |
public static void removeAll() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet();
if (threadLocalMap == null) {
return;
}
try {
Object v = threadLocalMap.indexedVariable(variablesToRemoveIndex);
if (v != null && v != InternalThreadLocalMap.UNSET) {
@SuppressWarnings("unchecked")
Set<FastThreadLocal<?>> variablesToRemove = (Set<FastThreadLocal<?>>) v;
FastThreadLocal<?>[] variablesToRemoveArray =
variablesToRemove.toArray(new FastThreadLocal[0]);
for (FastThreadLocal<?> tlv: variablesToRemoveArray) {
tlv.remove(threadLocalMap);
}
}
} finally {
InternalThreadLocalMap.remove();
}
} | Removes all {@link FastThreadLocal} variables bound to the current thread. This operation is useful when you
are in a container environment, and you don't want to leave the thread local variables in the threads you do not
manage. |
@SuppressWarnings("unchecked")
public final V get() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.get();
Object v = threadLocalMap.indexedVariable(index);
if (v != InternalThreadLocalMap.UNSET) {
return (V) v;
}
return initialize(threadLocalMap);
} | Returns the current value for the current thread |
@SuppressWarnings("unchecked")
public final V getIfExists() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet();
if (threadLocalMap != null) {
Object v = threadLocalMap.indexedVariable(index);
if (v != InternalThreadLocalMap.UNSET) {
return (V) v;
}
}
return null;
} | Returns the current value for the current thread if it exists, {@code null} otherwise. |
public final void set(V value) {
if (value != InternalThreadLocalMap.UNSET) {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.get();
setKnownNotUnset(threadLocalMap, value);
} else {
remove();
}
} | Set the value for the current thread. |
@SuppressWarnings("unchecked")
public final void remove(InternalThreadLocalMap threadLocalMap) {
if (threadLocalMap == null) {
return;
}
Object v = threadLocalMap.removeIndexedVariable(index);
removeFromVariablesToRemove(threadLocalMap, this);
if (v != InternalThreadLocalMap.UNSET) {
try {
onRemoval((V) v);
} catch (Exception e) {
PlatformDependent.throwException(e);
}
}
} | Sets the value to uninitialized for the specified thread local map;
a proceeding call to get() will trigger a call to initialValue().
The specified thread local map must be for the current thread. |
static InetAddress decodeAddress(DnsRecord record, String name, boolean decodeIdn) {
if (!(record instanceof DnsRawRecord)) {
return null;
}
final ByteBuf content = ((ByteBufHolder) record).content();
final int contentLen = content.readableBytes();
if (contentLen != INADDRSZ4 && contentLen != INADDRSZ6) {
return null;
}
final byte[] addrBytes = new byte[contentLen];
content.getBytes(content.readerIndex(), addrBytes);
try {
return InetAddress.getByAddress(decodeIdn ? IDN.toUnicode(name) : name, addrBytes);
} catch (UnknownHostException e) {
// Should never reach here.
throw new Error(e);
}
} | Decodes an {@link InetAddress} from an A or AAAA {@link DnsRawRecord}.
@param record the {@link DnsRecord}, most likely a {@link DnsRawRecord}
@param name the host name of the decoded address
@param decodeIdn whether to convert {@code name} to a unicode host name
@return the {@link InetAddress}, or {@code null} if {@code record} is not a {@link DnsRawRecord} or
its content is malformed |
@UnstableApi
public void setOcspResponse(byte[] response) {
if (!enableOcsp) {
throw new IllegalStateException("OCSP stapling is not enabled");
}
if (clientMode) {
throw new IllegalStateException("Not a server SSLEngine");
}
synchronized (this) {
SSL.setOcspResponse(ssl, response);
}
} | Sets the OCSP response. |
public final synchronized void shutdown() {
if (DESTROYED_UPDATER.compareAndSet(this, 0, 1)) {
engineMap.remove(ssl);
SSL.freeSSL(ssl);
ssl = networkBIO = 0;
isInboundDone = outboundClosed = true;
}
// On shutdown clear all errors
SSL.clearError();
} | Destroys this engine. |
private int writePlaintextData(final ByteBuffer src, int len) {
final int pos = src.position();
final int limit = src.limit();
final int sslWrote;
if (src.isDirect()) {
sslWrote = SSL.writeToSSL(ssl, bufferAddress(src) + pos, len);
if (sslWrote > 0) {
src.position(pos + sslWrote);
}
} else {
ByteBuf buf = alloc.directBuffer(len);
try {
src.limit(pos + len);
buf.setBytes(0, src);
src.limit(limit);
sslWrote = SSL.writeToSSL(ssl, memoryAddress(buf), len);
if (sslWrote > 0) {
src.position(pos + sslWrote);
} else {
src.position(pos);
}
} finally {
buf.release();
}
}
return sslWrote;
} | Write plaintext data to the OpenSSL internal BIO
Calling this function with src.remaining == 0 is undefined. |
private ByteBuf writeEncryptedData(final ByteBuffer src, int len) {
final int pos = src.position();
if (src.isDirect()) {
SSL.bioSetByteBuffer(networkBIO, bufferAddress(src) + pos, len, false);
} else {
final ByteBuf buf = alloc.directBuffer(len);
try {
final int limit = src.limit();
src.limit(pos + len);
buf.writeBytes(src);
// Restore the original position and limit because we don't want to consume from `src`.
src.position(pos);
src.limit(limit);
SSL.bioSetByteBuffer(networkBIO, memoryAddress(buf), len, false);
return buf;
} catch (Throwable cause) {
buf.release();
PlatformDependent.throwException(cause);
}
}
return null;
} | Write encrypted data to the OpenSSL network BIO. |
private int readPlaintextData(final ByteBuffer dst) {
final int sslRead;
final int pos = dst.position();
if (dst.isDirect()) {
sslRead = SSL.readFromSSL(ssl, bufferAddress(dst) + pos, dst.limit() - pos);
if (sslRead > 0) {
dst.position(pos + sslRead);
}
} else {
final int limit = dst.limit();
final int len = min(maxEncryptedPacketLength0(), limit - pos);
final ByteBuf buf = alloc.directBuffer(len);
try {
sslRead = SSL.readFromSSL(ssl, memoryAddress(buf), len);
if (sslRead > 0) {
dst.limit(pos + sslRead);
buf.getBytes(buf.readerIndex(), dst);
dst.limit(limit);
}
} finally {
buf.release();
}
}
return sslRead;
} | Read plaintext data from the OpenSSL internal BIO |
private SSLException shutdownWithError(String operations, int sslError) {
return shutdownWithError(operations, sslError, SSL.getLastErrorNumber());
} | Log the error, shutdown the engine and throw an exception. |
private boolean doSSLShutdown() {
if (SSL.isInInit(ssl) != 0) {
// Only try to call SSL_shutdown if we are not in the init state anymore.
// Otherwise we will see 'error:140E0197:SSL routines:SSL_shutdown:shutdown while in init' in our logs.
//
// See also http://hg.nginx.org/nginx/rev/062c189fee20
return false;
}
int err = SSL.shutdownSSL(ssl);
if (err < 0) {
int sslErr = SSL.getError(ssl, err);
if (sslErr == SSL.SSL_ERROR_SYSCALL || sslErr == SSL.SSL_ERROR_SSL) {
if (logger.isDebugEnabled()) {
int error = SSL.getLastErrorNumber();
logger.debug("SSL_shutdown failed: OpenSSL error: {} {}", error, SSL.getErrorString(error));
}
// There was an internal error -- shutdown
shutdown();
return false;
}
SSL.clearError();
}
return true;
} | Attempt to call {@link SSL#shutdownSSL(long)}.
@return {@code false} if the call to {@link SSL#shutdownSSL(long)} was not attempted or returned an error. |
@Override
public final void setEnabledProtocols(String[] protocols) {
if (protocols == null) {
// This is correct from the API docs
throw new IllegalArgumentException();
}
int minProtocolIndex = OPENSSL_OP_NO_PROTOCOLS.length;
int maxProtocolIndex = 0;
for (String p: protocols) {
if (!OpenSsl.SUPPORTED_PROTOCOLS_SET.contains(p)) {
throw new IllegalArgumentException("Protocol " + p + " is not supported.");
}
if (p.equals(PROTOCOL_SSL_V2)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2;
}
} else if (p.equals(PROTOCOL_SSL_V3)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3;
}
} else if (p.equals(PROTOCOL_TLS_V1)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1;
}
} else if (p.equals(PROTOCOL_TLS_V1_1)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1;
}
} else if (p.equals(PROTOCOL_TLS_V1_2)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2;
}
} else if (p.equals(PROTOCOL_TLS_V1_3)) {
if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3) {
minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3;
}
if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3) {
maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3;
}
}
}
synchronized (this) {
if (!isDestroyed()) {
// Clear out options which disable protocols
SSL.clearOptions(ssl, SSL.SSL_OP_NO_SSLv2 | SSL.SSL_OP_NO_SSLv3 | SSL.SSL_OP_NO_TLSv1 |
SSL.SSL_OP_NO_TLSv1_1 | SSL.SSL_OP_NO_TLSv1_2 | SSL.SSL_OP_NO_TLSv1_3);
int opts = 0;
for (int i = 0; i < minProtocolIndex; ++i) {
opts |= OPENSSL_OP_NO_PROTOCOLS[i];
}
assert maxProtocolIndex != MAX_VALUE;
for (int i = maxProtocolIndex + 1; i < OPENSSL_OP_NO_PROTOCOLS.length; ++i) {
opts |= OPENSSL_OP_NO_PROTOCOLS[i];
}
// Disable protocols we do not want
SSL.setOptions(ssl, opts);
} else {
throw new IllegalStateException("failed to enable protocols: " + Arrays.asList(protocols));
}
}
} | {@inheritDoc}
TLS doesn't support a way to advertise non-contiguous versions from the client's perspective, and the client
just advertises the max supported version. The TLS protocol also doesn't support all different combinations of
discrete protocols, and instead assumes contiguous ranges. OpenSSL has some unexpected behavior
(e.g. handshake failures) if non-contiguous protocols are used even where there is a compatible set of protocols
and ciphers. For these reasons this method will determine the minimum protocol and the maximum protocol and
enabled a contiguous range from [min protocol, max protocol] in OpenSSL. |
private String toJavaCipherSuite(String openSslCipherSuite) {
if (openSslCipherSuite == null) {
return null;
}
String version = SSL.getVersion(ssl);
String prefix = toJavaCipherSuitePrefix(version);
return CipherSuiteConverter.toJava(openSslCipherSuite, prefix);
} | Converts the specified OpenSSL cipher suite to the Java cipher suite. |
private static String toJavaCipherSuitePrefix(String protocolVersion) {
final char c;
if (protocolVersion == null || protocolVersion.isEmpty()) {
c = 0;
} else {
c = protocolVersion.charAt(0);
}
switch (c) {
case 'T':
return "TLS";
case 'S':
return "SSL";
default:
return "UNKNOWN";
}
} | Converts the protocol version string returned by {@link SSL#getVersion(long)} to protocol family string. |
private static BitSet validCookieNameOctets() {
BitSet bits = new BitSet();
for (int i = 32; i < 127; i++) {
bits.set(i);
}
int[] separators = new int[]
{ '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t' };
for (int separator : separators) {
bits.set(separator, false);
}
return bits;
} | | "{" | "}" | SP | HT |
private static BitSet validCookieValueOctets() {
BitSet bits = new BitSet();
bits.set(0x21);
for (int i = 0x23; i <= 0x2B; i++) {
bits.set(i);
}
for (int i = 0x2D; i <= 0x3A; i++) {
bits.set(i);
}
for (int i = 0x3C; i <= 0x5B; i++) {
bits.set(i);
}
for (int i = 0x5D; i <= 0x7E; i++) {
bits.set(i);
}
return bits;
} | US-ASCII characters excluding CTLs, whitespace, DQUOTE, comma, semicolon, and backslash |
private static BitSet validCookieAttributeValueOctets() {
BitSet bits = new BitSet();
for (int i = 32; i < 127; i++) {
bits.set(i);
}
bits.set(';', false);
return bits;
} | path-value = <any CHAR except CTLs or ";"> |
public ChannelFlushPromiseNotifier add(ChannelPromise promise, long pendingDataSize) {
if (promise == null) {
throw new NullPointerException("promise");
}
checkPositiveOrZero(pendingDataSize, "pendingDataSize");
long checkpoint = writeCounter + pendingDataSize;
if (promise instanceof FlushCheckpoint) {
FlushCheckpoint cp = (FlushCheckpoint) promise;
cp.flushCheckpoint(checkpoint);
flushCheckpoints.add(cp);
} else {
flushCheckpoints.add(new DefaultFlushCheckpoint(checkpoint, promise));
}
return this;
} | Add a {@link ChannelPromise} to this {@link ChannelFlushPromiseNotifier} which will be notified after the given
{@code pendingDataSize} was reached. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.