code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
static void closeOnFlush(Channel ch) { if (ch.isActive()) { ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); } }
Closes the specified channel after all queued write requests are flushed.
private void closeBlock(ByteBuf out) { final Bzip2BlockCompressor blockCompressor = this.blockCompressor; if (!blockCompressor.isEmpty()) { blockCompressor.close(out); final int blockCRC = blockCompressor.crc(); streamCRC = (streamCRC << 1 | streamCRC >>> 31) ^ blockCRC; } }
Close current block and update {@link #streamCRC}.
public Entry<ChannelFuture, ChannelPromise> put(int streamId, ChannelFuture writeFuture, ChannelPromise promise) { return streamidPromiseMap.put(streamId, new SimpleEntry<ChannelFuture, ChannelPromise>(writeFuture, promise)); }
Create an association between an anticipated response stream id and a {@link io.netty.channel.ChannelPromise} @param streamId The stream for which a response is expected @param writeFuture A future that represent the request write operation @param promise The promise object that will be used to wait/notify events @return The previous object associated with {@code streamId} @see HttpResponseHandler#awaitResponses(long, java.util.concurrent.TimeUnit)
public void awaitResponses(long timeout, TimeUnit unit) { Iterator<Entry<Integer, Entry<ChannelFuture, ChannelPromise>>> itr = streamidPromiseMap.entrySet().iterator(); while (itr.hasNext()) { Entry<Integer, Entry<ChannelFuture, ChannelPromise>> entry = itr.next(); ChannelFuture writeFuture = entry.getValue().getKey(); if (!writeFuture.awaitUninterruptibly(timeout, unit)) { throw new IllegalStateException("Timed out waiting to write for stream id " + entry.getKey()); } if (!writeFuture.isSuccess()) { throw new RuntimeException(writeFuture.cause()); } ChannelPromise promise = entry.getValue().getValue(); if (!promise.awaitUninterruptibly(timeout, unit)) { throw new IllegalStateException("Timed out waiting for response on stream id " + entry.getKey()); } if (!promise.isSuccess()) { throw new RuntimeException(promise.cause()); } System.out.println("---Stream id: " + entry.getKey() + " received---"); itr.remove(); } }
Wait (sequentially) for a time duration for each anticipated response @param timeout Value of time to wait for each response @param unit Units associated with {@code timeout} @see HttpResponseHandler#put(int, io.netty.channel.ChannelFuture, io.netty.channel.ChannelPromise)
private static boolean anyInterfaceSupportsIpV6() { try { Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); while (interfaces.hasMoreElements()) { NetworkInterface iface = interfaces.nextElement(); Enumeration<InetAddress> addresses = iface.getInetAddresses(); while (addresses.hasMoreElements()) { if (addresses.nextElement() instanceof Inet6Address) { return true; } } } } catch (SocketException e) { logger.debug("Unable to detect if any interface supports IPv6, assuming IPv4-only", e); // ignore } return false; }
Returns {@code true} if any {@link NetworkInterface} supports {@code IPv6}, {@code false} otherwise.
protected DnsServerAddressStream newRedirectDnsServerStream( @SuppressWarnings("unused") String hostname, List<InetSocketAddress> nameservers) { DnsServerAddressStream cached = authoritativeDnsServerCache().get(hostname); if (cached == null || cached.size() == 0) { // If there is no cache hit (which may be the case for example when a NoopAuthoritativeDnsServerCache // is used), we will just directly use the provided nameservers. Collections.sort(nameservers, nameServerComparator); return new SequentialDnsServerAddressStream(nameservers, 0); } return cached; }
Creates a new {@link DnsServerAddressStream} to following a redirected DNS query. By overriding this it provides the opportunity to sort the name servers before following a redirected DNS query. @param hostname the hostname. @param nameservers The addresses of the DNS servers which are used in the event of a redirect. This may contain resolved and unresolved addresses so the used {@link DnsServerAddressStream} must allow unresolved addresses if you want to include these as well. @return A {@link DnsServerAddressStream} which will be used to follow the DNS redirect or {@code null} if none should be followed.
public final Future<InetAddress> resolve(String inetHost, Iterable<DnsRecord> additionals) { return resolve(inetHost, additionals, executor().<InetAddress>newPromise()); }
Resolves the specified name into an address. @param inetHost the name to resolve @param additionals additional records ({@code OPT}) @return the address as the result of the resolution
public final Future<InetAddress> resolve(String inetHost, Iterable<DnsRecord> additionals, Promise<InetAddress> promise) { checkNotNull(promise, "promise"); DnsRecord[] additionalsArray = toArray(additionals, true); try { doResolve(inetHost, additionalsArray, promise, resolveCache); return promise; } catch (Exception e) { return promise.setFailure(e); } }
Resolves the specified name into an address. @param inetHost the name to resolve @param additionals additional records ({@code OPT}) @param promise the {@link Promise} which will be fulfilled when the name resolution is finished @return the address as the result of the resolution
public final Future<List<InetAddress>> resolveAll(String inetHost, Iterable<DnsRecord> additionals) { return resolveAll(inetHost, additionals, executor().<List<InetAddress>>newPromise()); }
Resolves the specified host name and port into a list of address. @param inetHost the name to resolve @param additionals additional records ({@code OPT}) @return the list of the address as the result of the resolution
public final Future<List<InetAddress>> resolveAll(String inetHost, Iterable<DnsRecord> additionals, Promise<List<InetAddress>> promise) { checkNotNull(promise, "promise"); DnsRecord[] additionalsArray = toArray(additionals, true); try { doResolveAll(inetHost, additionalsArray, promise, resolveCache); return promise; } catch (Exception e) { return promise.setFailure(e); } }
Resolves the specified host name and port into a list of address. @param inetHost the name to resolve @param additionals additional records ({@code OPT}) @param promise the {@link Promise} which will be fulfilled when the name resolution is finished @return the list of the address as the result of the resolution
public final Future<List<DnsRecord>> resolveAll(DnsQuestion question) { return resolveAll(question, EMPTY_ADDITIONALS, executor().<List<DnsRecord>>newPromise()); }
Resolves the {@link DnsRecord}s that are matched by the specified {@link DnsQuestion}. Unlike {@link #query(DnsQuestion)}, this method handles redirection, CNAMEs and multiple name servers. If the specified {@link DnsQuestion} is {@code A} or {@code AAAA}, this method looks up the configured {@link HostsFileEntries} before sending a query to the name servers. If a match is found in the {@link HostsFileEntries}, a synthetic {@code A} or {@code AAAA} record will be returned. @param question the question @return the list of the {@link DnsRecord}s as the result of the resolution
public final Future<List<DnsRecord>> resolveAll(DnsQuestion question, Iterable<DnsRecord> additionals) { return resolveAll(question, additionals, executor().<List<DnsRecord>>newPromise()); }
Resolves the {@link DnsRecord}s that are matched by the specified {@link DnsQuestion}. Unlike {@link #query(DnsQuestion)}, this method handles redirection, CNAMEs and multiple name servers. If the specified {@link DnsQuestion} is {@code A} or {@code AAAA}, this method looks up the configured {@link HostsFileEntries} before sending a query to the name servers. If a match is found in the {@link HostsFileEntries}, a synthetic {@code A} or {@code AAAA} record will be returned. @param question the question @param additionals additional records ({@code OPT}) @return the list of the {@link DnsRecord}s as the result of the resolution
public final Future<List<DnsRecord>> resolveAll(DnsQuestion question, Iterable<DnsRecord> additionals, Promise<List<DnsRecord>> promise) { final DnsRecord[] additionalsArray = toArray(additionals, true); return resolveAll(question, additionalsArray, promise); }
Resolves the {@link DnsRecord}s that are matched by the specified {@link DnsQuestion}. Unlike {@link #query(DnsQuestion)}, this method handles redirection, CNAMEs and multiple name servers. If the specified {@link DnsQuestion} is {@code A} or {@code AAAA}, this method looks up the configured {@link HostsFileEntries} before sending a query to the name servers. If a match is found in the {@link HostsFileEntries}, a synthetic {@code A} or {@code AAAA} record will be returned. @param question the question @param additionals additional records ({@code OPT}) @param promise the {@link Promise} which will be fulfilled when the resolution is finished @return the list of the {@link DnsRecord}s as the result of the resolution
protected void doResolve(String inetHost, DnsRecord[] additionals, Promise<InetAddress> promise, DnsCache resolveCache) throws Exception { if (inetHost == null || inetHost.isEmpty()) { // If an empty hostname is used we should use "localhost", just like InetAddress.getByName(...) does. promise.setSuccess(loopbackAddress()); return; } final byte[] bytes = NetUtil.createByteArrayFromIpAddressString(inetHost); if (bytes != null) { // The inetHost is actually an ipaddress. promise.setSuccess(InetAddress.getByAddress(bytes)); return; } final String hostname = hostname(inetHost); InetAddress hostsFileEntry = resolveHostsFileEntry(hostname); if (hostsFileEntry != null) { promise.setSuccess(hostsFileEntry); return; } if (!doResolveCached(hostname, additionals, promise, resolveCache)) { doResolveUncached(hostname, additionals, promise, resolveCache, true); } }
Hook designed for extensibility so one can pass a different cache on each resolution attempt instead of using the global one.
protected void doResolveAll(String inetHost, DnsRecord[] additionals, Promise<List<InetAddress>> promise, DnsCache resolveCache) throws Exception { if (inetHost == null || inetHost.isEmpty()) { // If an empty hostname is used we should use "localhost", just like InetAddress.getAllByName(...) does. promise.setSuccess(Collections.singletonList(loopbackAddress())); return; } final byte[] bytes = NetUtil.createByteArrayFromIpAddressString(inetHost); if (bytes != null) { // The unresolvedAddress was created via a String that contains an ipaddress. promise.setSuccess(Collections.singletonList(InetAddress.getByAddress(bytes))); return; } final String hostname = hostname(inetHost); InetAddress hostsFileEntry = resolveHostsFileEntry(hostname); if (hostsFileEntry != null) { promise.setSuccess(Collections.singletonList(hostsFileEntry)); return; } if (!doResolveAllCached(hostname, additionals, promise, resolveCache, resolvedInternetProtocolFamilies)) { doResolveAllUncached(hostname, additionals, promise, resolveCache, false); } }
Hook designed for extensibility so one can pass a different cache on each resolution attempt instead of using the global one.
public Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> query(DnsQuestion question) { return query(nextNameServerAddress(), question); }
Sends a DNS query with the specified question.
public Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> query( DnsQuestion question, Iterable<DnsRecord> additionals) { return query(nextNameServerAddress(), question, additionals); }
Sends a DNS query with the specified question with additional records.
public Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> query( DnsQuestion question, Promise<AddressedEnvelope<? extends DnsResponse, InetSocketAddress>> promise) { return query(nextNameServerAddress(), question, Collections.<DnsRecord>emptyList(), promise); }
Sends a DNS query with the specified question.
public Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> query( InetSocketAddress nameServerAddr, DnsQuestion question) { return query0(nameServerAddr, question, EMPTY_ADDITIONALS, true, ch.newPromise(), ch.eventLoop().<AddressedEnvelope<? extends DnsResponse, InetSocketAddress>>newPromise()); }
Sends a DNS query with the specified question using the specified name server list.
public Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> query( InetSocketAddress nameServerAddr, DnsQuestion question, Iterable<DnsRecord> additionals) { return query0(nameServerAddr, question, toArray(additionals, false), true, ch.newPromise(), ch.eventLoop().<AddressedEnvelope<? extends DnsResponse, InetSocketAddress>>newPromise()); }
Sends a DNS query with the specified question with additional records using the specified name server list.
@SuppressWarnings("unchecked") public <T> Http2StreamChannelBootstrap option(ChannelOption<T> option, T value) { if (option == null) { throw new NullPointerException("option"); } if (value == null) { synchronized (options) { options.remove(option); } } else { synchronized (options) { options.put(option, value); } } return this; }
Allow to specify a {@link ChannelOption} which is used for the {@link Http2StreamChannel} instances once they got created. Use a value of {@code null} to remove a previous set {@link ChannelOption}.
@SuppressWarnings("unchecked") public <T> Http2StreamChannelBootstrap attr(AttributeKey<T> key, T value) { if (key == null) { throw new NullPointerException("key"); } if (value == null) { synchronized (attrs) { attrs.remove(key); } } else { synchronized (attrs) { attrs.put(key, value); } } return this; }
Allow to specify an initial attribute of the newly created {@link Http2StreamChannel}. If the {@code value} is {@code null}, the attribute of the specified {@code key} is removed.
public static DnsServerAddresses singleton(final InetSocketAddress address) { if (address == null) { throw new NullPointerException("address"); } if (address.isUnresolved()) { throw new IllegalArgumentException("cannot use an unresolved DNS server address: " + address); } return new SingletonDnsServerAddresses(address); }
Returns the {@link DnsServerAddresses} that yields only a single {@code address}.
static int getIndex(CharSequence name) { Integer index = STATIC_INDEX_BY_NAME.get(name); if (index == null) { return -1; } return index; }
Returns the lowest index value for the given header field name in the static table. Returns -1 if the header field name is not in the static table.
static int getIndex(CharSequence name, CharSequence value) { int index = getIndex(name); if (index == -1) { return -1; } // Note this assumes all entries for a given header field are sequential. while (index <= length) { HpackHeaderField entry = getEntry(index); if (equalsConstantTime(name, entry.name) == 0) { break; } if (equalsConstantTime(value, entry.value) != 0) { return index; } index++; } return -1; }
Returns the index value for the given header field in the static table. Returns -1 if the header field is not in the static table.
private static CharSequenceMap<Integer> createMap() { int length = STATIC_TABLE.size(); @SuppressWarnings("unchecked") CharSequenceMap<Integer> ret = new CharSequenceMap<Integer>(true, UnsupportedValueConverter.<Integer>instance(), length); // Iterate through the static table in reverse order to // save the smallest index for a given name in the map. for (int index = length; index > 0; index--) { HpackHeaderField entry = getEntry(index); CharSequence name = entry.name; ret.set(name, index); } return ret; }
create a map CharSequenceMap header name to index value to allow quick lookup
private static BitSet validCookieValueOctets() { BitSet bits = new BitSet(8); for (int i = 35; i < 127; i++) { // US-ASCII characters excluding CTLs (%x00-1F / %x7F) bits.set(i); } bits.set('"', false); // exclude DQUOTE = %x22 bits.set(',', false); // exclude comma = %x2C bits.set(';', false); // exclude semicolon = %x3B bits.set('\\', false); // exclude backslash = %x5C return bits; }
US-ASCII characters excluding CTLs, whitespace, DQUOTE, comma, semicolon, and backslash
private static BitSet validCookieNameOctets(BitSet validCookieValueOctets) { BitSet bits = new BitSet(8); bits.or(validCookieValueOctets); bits.set('(', false); bits.set(')', false); bits.set('<', false); bits.set('>', false); bits.set('@', false); bits.set(':', false); bits.set('/', false); bits.set('[', false); bits.set(']', false); bits.set('?', false); bits.set('=', false); bits.set('{', false); bits.set('}', false); bits.set(' ', false); bits.set('\t', false); return bits; }
| "{" | "}" | SP | HT
@Override public void upgradeFrom(ChannelHandlerContext ctx) { final ChannelPipeline p = ctx.pipeline(); p.remove(this); }
Upgrades to another protocol from HTTP. Removes the {@link Decoder} and {@link Encoder} from the pipeline.
protected void reportUntracedLeak(String resourceType) { logger.error("LEAK: {}.release() was not called before it's garbage-collected. " + "Enable advanced leak reporting to find out where the leak occurred. " + "To enable advanced leak reporting, " + "specify the JVM option '-D{}={}' or call {}.setLevel() " + "See http://netty.io/wiki/reference-counted-objects.html for more information.", resourceType, PROP_LEVEL, Level.ADVANCED.name().toLowerCase(), simpleClassName(this)); }
This method is called when an untraced leak is detected. It can be overridden for tracking how many times leaks have been detected.
public synchronized void start() { if (monitorActive) { return; } lastTime.set(milliSecondFromNano()); long localCheckInterval = checkInterval.get(); // if executor is null, it means it is piloted by a GlobalChannelTrafficCounter, so no executor if (localCheckInterval > 0 && executor != null) { monitorActive = true; monitor = new TrafficMonitoringTask(); scheduledFuture = executor.schedule(monitor, localCheckInterval, TimeUnit.MILLISECONDS); } }
Start the monitoring process.
public synchronized void stop() { if (!monitorActive) { return; } monitorActive = false; resetAccounting(milliSecondFromNano()); if (trafficShapingHandler != null) { trafficShapingHandler.doAccounting(this); } if (scheduledFuture != null) { scheduledFuture.cancel(true); } }
Stop the monitoring process.
synchronized void resetAccounting(long newLastTime) { long interval = newLastTime - lastTime.getAndSet(newLastTime); if (interval == 0) { // nothing to do return; } if (logger.isDebugEnabled() && interval > checkInterval() << 1) { logger.debug("Acct schedule not ok: " + interval + " > 2*" + checkInterval() + " from " + name); } lastReadBytes = currentReadBytes.getAndSet(0); lastWrittenBytes = currentWrittenBytes.getAndSet(0); lastReadThroughput = lastReadBytes * 1000 / interval; // nb byte / checkInterval in ms * 1000 (1s) lastWriteThroughput = lastWrittenBytes * 1000 / interval; // nb byte / checkInterval in ms * 1000 (1s) realWriteThroughput = realWrittenBytes.getAndSet(0) * 1000 / interval; lastWritingTime = Math.max(lastWritingTime, writingTime); lastReadingTime = Math.max(lastReadingTime, readingTime); }
Reset the accounting on Read and Write. @param newLastTime the milliseconds unix timestamp that we should be considered up-to-date for.
public void configure(long newCheckInterval) { long newInterval = newCheckInterval / 10 * 10; if (checkInterval.getAndSet(newInterval) != newInterval) { if (newInterval <= 0) { stop(); // No more active monitoring lastTime.set(milliSecondFromNano()); } else { // Start if necessary start(); } } }
Change checkInterval between two computations in millisecond. @param newCheckInterval The new check interval (in milliseconds)
@Deprecated public long readTimeToWait(final long size, final long limitTraffic, final long maxTime) { return readTimeToWait(size, limitTraffic, maxTime, milliSecondFromNano()); }
Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait time. @param size the recv size @param limitTraffic the traffic limit in bytes per second. @param maxTime the max time in ms to wait in case of excess of traffic. @return the current time to wait (in ms) if needed for Read operation.
public long readTimeToWait(final long size, final long limitTraffic, final long maxTime, final long now) { bytesRecvFlowControl(size); if (size == 0 || limitTraffic == 0) { return 0; } final long lastTimeCheck = lastTime.get(); long sum = currentReadBytes.get(); long localReadingTime = readingTime; long lastRB = lastReadBytes; final long interval = now - lastTimeCheck; long pastDelay = Math.max(lastReadingTime - lastTimeCheck, 0); if (interval > AbstractTrafficShapingHandler.MINIMAL_WAIT) { // Enough interval time to compute shaping long time = sum * 1000 / limitTraffic - interval + pastDelay; if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) { if (logger.isDebugEnabled()) { logger.debug("Time: " + time + ':' + sum + ':' + interval + ':' + pastDelay); } if (time > maxTime && now + time - localReadingTime > maxTime) { time = maxTime; } readingTime = Math.max(localReadingTime, now + time); return time; } readingTime = Math.max(localReadingTime, now); return 0; } // take the last read interval check to get enough interval time long lastsum = sum + lastRB; long lastinterval = interval + checkInterval.get(); long time = lastsum * 1000 / limitTraffic - lastinterval + pastDelay; if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) { if (logger.isDebugEnabled()) { logger.debug("Time: " + time + ':' + lastsum + ':' + lastinterval + ':' + pastDelay); } if (time > maxTime && now + time - localReadingTime > maxTime) { time = maxTime; } readingTime = Math.max(localReadingTime, now + time); return time; } readingTime = Math.max(localReadingTime, now); return 0; }
Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait time. @param size the recv size @param limitTraffic the traffic limit in bytes per second @param maxTime the max time in ms to wait in case of excess of traffic. @param now the current time @return the current time to wait (in ms) if needed for Read operation.
@Deprecated public long writeTimeToWait(final long size, final long limitTraffic, final long maxTime) { return writeTimeToWait(size, limitTraffic, maxTime, milliSecondFromNano()); }
Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait time. @param size the write size @param limitTraffic the traffic limit in bytes per second. @param maxTime the max time in ms to wait in case of excess of traffic. @return the current time to wait (in ms) if needed for Write operation.
public long writeTimeToWait(final long size, final long limitTraffic, final long maxTime, final long now) { bytesWriteFlowControl(size); if (size == 0 || limitTraffic == 0) { return 0; } final long lastTimeCheck = lastTime.get(); long sum = currentWrittenBytes.get(); long lastWB = lastWrittenBytes; long localWritingTime = writingTime; long pastDelay = Math.max(lastWritingTime - lastTimeCheck, 0); final long interval = now - lastTimeCheck; if (interval > AbstractTrafficShapingHandler.MINIMAL_WAIT) { // Enough interval time to compute shaping long time = sum * 1000 / limitTraffic - interval + pastDelay; if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) { if (logger.isDebugEnabled()) { logger.debug("Time: " + time + ':' + sum + ':' + interval + ':' + pastDelay); } if (time > maxTime && now + time - localWritingTime > maxTime) { time = maxTime; } writingTime = Math.max(localWritingTime, now + time); return time; } writingTime = Math.max(localWritingTime, now); return 0; } // take the last write interval check to get enough interval time long lastsum = sum + lastWB; long lastinterval = interval + checkInterval.get(); long time = lastsum * 1000 / limitTraffic - lastinterval + pastDelay; if (time > AbstractTrafficShapingHandler.MINIMAL_WAIT) { if (logger.isDebugEnabled()) { logger.debug("Time: " + time + ':' + lastsum + ':' + lastinterval + ':' + pastDelay); } if (time > maxTime && now + time - localWritingTime > maxTime) { time = maxTime; } writingTime = Math.max(localWritingTime, now + time); return time; } writingTime = Math.max(localWritingTime, now); return 0; }
Returns the time to wait (if any) for the given length message, using the given limitTraffic and the max wait time. @param size the write size @param limitTraffic the traffic limit in bytes per second. @param maxTime the max time in ms to wait in case of excess of traffic. @param now the current time @return the current time to wait (in ms) if needed for Write operation.
private void updateParentsAlloc(int id) { while (id > 1) { int parentId = id >>> 1; byte val1 = value(id); byte val2 = value(id ^ 1); byte val = val1 < val2 ? val1 : val2; setValue(parentId, val); id = parentId; } }
Update method used by allocate This is triggered only when a successor is allocated and all its predecessors need to update their state The minimal depth at which subtree rooted at id has some free space @param id id
private void updateParentsFree(int id) { int logChild = depth(id) + 1; while (id > 1) { int parentId = id >>> 1; byte val1 = value(id); byte val2 = value(id ^ 1); logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up if (val1 == logChild && val2 == logChild) { setValue(parentId, (byte) (logChild - 1)); } else { byte val = val1 < val2 ? val1 : val2; setValue(parentId, val); } id = parentId; } }
Update method used by free This needs to handle the special case when both children are completely free in which case parent be directly allocated on request of size = child-size * 2 @param id id
private int allocateNode(int d) { int id = 1; int initial = - (1 << d); // has last d bits = 0 and rest all = 1 byte val = value(id); if (val > d) { // unusable return -1; } while (val < d || (id & initial) == 0) { // id & initial == 1 << d for all ids at depth d, for < d it is 0 id <<= 1; val = value(id); if (val > d) { id ^= 1; val = value(id); } } byte value = value(id); assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d", value, id & initial, d); setValue(id, unusable); // mark as unusable updateParentsAlloc(id); return id; }
Algorithm to allocate an index in memoryMap when we query for a free node at depth d @param d depth @return index in memoryMap
private long allocateRun(int normCapacity) { int d = maxOrder - (log2(normCapacity) - pageShifts); int id = allocateNode(d); if (id < 0) { return id; } freeBytes -= runLength(id); return id; }
Allocate a run of pages (>=1) @param normCapacity normalized capacity @return index in memoryMap
private long allocateSubpage(int normCapacity) { // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. // This is need as we may add it back and so alter the linked-list structure. PoolSubpage<T> head = arena.findSubpagePoolHead(normCapacity); int d = maxOrder; // subpages are only be allocated from pages i.e., leaves synchronized (head) { int id = allocateNode(d); if (id < 0) { return id; } final PoolSubpage<T>[] subpages = this.subpages; final int pageSize = this.pageSize; freeBytes -= pageSize; int subpageIdx = subpageIdx(id); PoolSubpage<T> subpage = subpages[subpageIdx]; if (subpage == null) { subpage = new PoolSubpage<T>(head, this, id, runOffset(id), pageSize, normCapacity); subpages[subpageIdx] = subpage; } else { subpage.init(head, normCapacity); } return subpage.allocate(); } }
Create / initialize a new PoolSubpage of normCapacity Any PoolSubpage created / initialized here is added to subpage pool in the PoolArena that owns this PoolChunk @param normCapacity normalized capacity @return index in memoryMap
void free(long handle, ByteBuffer nioBuffer) { int memoryMapIdx = memoryMapIdx(handle); int bitmapIdx = bitmapIdx(handle); if (bitmapIdx != 0) { // free a subpage PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)]; assert subpage != null && subpage.doNotDestroy; // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. // This is need as we may add it back and so alter the linked-list structure. PoolSubpage<T> head = arena.findSubpagePoolHead(subpage.elemSize); synchronized (head) { if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) { return; } } } freeBytes += runLength(memoryMapIdx); setValue(memoryMapIdx, depth(memoryMapIdx)); updateParentsFree(memoryMapIdx); if (nioBuffer != null && cachedNioBuffers != null && cachedNioBuffers.size() < PooledByteBufAllocator.DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK) { cachedNioBuffers.offer(nioBuffer); } }
Free a subpage or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, we can completely free the owning Page so it is available for subsequent allocations @param handle handle to free
int valueToFront(final byte value) { int index = 0; byte temp = mtf[0]; if (value != temp) { mtf[0] = value; while (value != temp) { index++; final byte temp2 = temp; temp = mtf[index]; mtf[index] = temp2; } } return index; }
Moves a value to the head of the MTF list (forward Move To Front transform). @param value The value to move @return The position the value moved from
byte indexToFront(final int index) { final byte value = mtf[index]; System.arraycopy(mtf, 0, mtf, 1, index); mtf[0] = value; return value; }
Gets the value from a given index and moves it to the front of the MTF list (inverse Move To Front transform). @param index The index to move @return The value at the given index
void removeStream(DefaultStream stream, Iterator<?> itr) { final boolean removed; if (itr == null) { removed = streamMap.remove(stream.id()) != null; } else { itr.remove(); removed = true; } if (removed) { for (int i = 0; i < listeners.size(); i++) { try { listeners.get(i).onStreamRemoved(stream); } catch (Throwable cause) { logger.error("Caught Throwable from listener onStreamRemoved.", cause); } } if (closePromise != null && isStreamMapEmpty()) { closePromise.trySuccess(null); } } }
Remove a stream from the {@link #streamMap}. @param stream the stream to remove. @param itr an iterator that may be pointing to the stream during iteration and {@link Iterator#remove()} will be used if non-{@code null}.
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") I msg, boolean preferDirect) throws Exception { if (preferDirect) { return ctx.alloc().ioBuffer(); } else { return ctx.alloc().heapBuffer(); } }
Allocate a {@link ByteBuf} which will be used as argument of {@link #encode(ChannelHandlerContext, I, ByteBuf)}. Sub-classes may override this method to return {@link ByteBuf} with a perfect matching {@code initialCapacity}.
@Deprecated public static String encode(Cookie cookie) { return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(cookie); }
Encodes the specified cookie into a Set-Cookie header value. @param cookie the cookie @return a single Set-Cookie header value
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { in.markReaderIndex(); int preIndex = in.readerIndex(); int length = readRawVarint32(in); if (preIndex == in.readerIndex()) { return; } if (length < 0) { throw new CorruptedFrameException("negative length: " + length); } if (in.readableBytes() < length) { in.resetReaderIndex(); } else { out.add(in.readRetainedSlice(length)); } }
(just like LengthFieldBasedFrameDecoder)
private static int readRawVarint32(ByteBuf buffer) { if (!buffer.isReadable()) { return 0; } buffer.markReaderIndex(); byte tmp = buffer.readByte(); if (tmp >= 0) { return tmp; } else { int result = tmp & 127; if (!buffer.isReadable()) { buffer.resetReaderIndex(); return 0; } if ((tmp = buffer.readByte()) >= 0) { result |= tmp << 7; } else { result |= (tmp & 127) << 7; if (!buffer.isReadable()) { buffer.resetReaderIndex(); return 0; } if ((tmp = buffer.readByte()) >= 0) { result |= tmp << 14; } else { result |= (tmp & 127) << 14; if (!buffer.isReadable()) { buffer.resetReaderIndex(); return 0; } if ((tmp = buffer.readByte()) >= 0) { result |= tmp << 21; } else { result |= (tmp & 127) << 21; if (!buffer.isReadable()) { buffer.resetReaderIndex(); return 0; } result |= (tmp = buffer.readByte()) << 28; if (tmp < 0) { throw new CorruptedFrameException("malformed varint."); } } } } return result; } }
Reads variable length 32bit int from buffer @return decoded int if buffers readerIndex has been forwarded else nonsense value
public static DnsOpCode valueOf(int b) { switch (b) { case 0x00: return QUERY; case 0x01: return IQUERY; case 0x02: return STATUS; case 0x04: return NOTIFY; case 0x05: return UPDATE; } return new DnsOpCode(b); }
Returns the {@link DnsOpCode} instance of the specified byte value.
protected void handshakeFailure(ChannelHandlerContext ctx, Throwable cause) throws Exception { logger.warn("{} TLS handshake failed:", ctx.channel(), cause); ctx.close(); }
Invoked on failed initial SSL/TLS handshake.
public static HttpVersion valueOf(String text) { if (text == null) { throw new NullPointerException("text"); } text = text.trim().toUpperCase(); if ("RTSP/1.0".equals(text)) { return RTSP_1_0; } return new HttpVersion(text, true); }
Returns an existing or new {@link HttpVersion} instance which matches to the specified RTSP version string. If the specified {@code text} is equal to {@code "RTSP/1.0"}, {@link #RTSP_1_0} will be returned. Otherwise, a new {@link HttpVersion} instance will be returned.
public void awaitSettings(long timeout, TimeUnit unit) throws Exception { if (!promise.awaitUninterruptibly(timeout, unit)) { throw new IllegalStateException("Timed out waiting for settings"); } if (!promise.isSuccess()) { throw new RuntimeException(promise.cause()); } }
Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface handshake to complete. @param timeout Time to wait @param unit {@link java.util.concurrent.TimeUnit} for {@code timeout} @throws Exception if timeout or other failure occurs
public String encode(String name, String value) { return encode(new DefaultCookie(name, value)); }
Encodes the specified cookie into a Cookie header value. @param name the cookie name @param value the cookie value @return a Rfc6265 style Cookie header value
public String encode(Cookie cookie) { StringBuilder buf = stringBuilder(); encode(buf, checkNotNull(cookie, "cookie")); return stripTrailingSeparator(buf); }
Encodes the specified cookie into a Cookie header value. @param cookie the specified cookie @return a Rfc6265 style Cookie header value
public String encode(Cookie... cookies) { if (checkNotNull(cookies, "cookies").length == 0) { return null; } StringBuilder buf = stringBuilder(); if (strict) { if (cookies.length == 1) { encode(buf, cookies[0]); } else { Cookie[] cookiesSorted = Arrays.copyOf(cookies, cookies.length); Arrays.sort(cookiesSorted, COOKIE_COMPARATOR); for (Cookie c : cookiesSorted) { encode(buf, c); } } } else { for (Cookie c : cookies) { encode(buf, c); } } return stripTrailingSeparatorOrNull(buf); }
Encodes the specified cookies into a single Cookie header value. @param cookies some cookies @return a Rfc6265 style Cookie header value, null if no cookies are passed.
public String encode(Collection<? extends Cookie> cookies) { if (checkNotNull(cookies, "cookies").isEmpty()) { return null; } StringBuilder buf = stringBuilder(); if (strict) { if (cookies.size() == 1) { encode(buf, cookies.iterator().next()); } else { Cookie[] cookiesSorted = cookies.toArray(new Cookie[0]); Arrays.sort(cookiesSorted, COOKIE_COMPARATOR); for (Cookie c : cookiesSorted) { encode(buf, c); } } } else { for (Cookie c : cookies) { encode(buf, c); } } return stripTrailingSeparatorOrNull(buf); }
Encodes the specified cookies into a single Cookie header value. @param cookies some cookies @return a Rfc6265 style Cookie header value, null if no cookies are passed.
public String encode(Iterable<? extends Cookie> cookies) { Iterator<? extends Cookie> cookiesIt = checkNotNull(cookies, "cookies").iterator(); if (!cookiesIt.hasNext()) { return null; } StringBuilder buf = stringBuilder(); if (strict) { Cookie firstCookie = cookiesIt.next(); if (!cookiesIt.hasNext()) { encode(buf, firstCookie); } else { List<Cookie> cookiesList = InternalThreadLocalMap.get().arrayList(); cookiesList.add(firstCookie); while (cookiesIt.hasNext()) { cookiesList.add(cookiesIt.next()); } Cookie[] cookiesSorted = cookiesList.toArray(new Cookie[0]); Arrays.sort(cookiesSorted, COOKIE_COMPARATOR); for (Cookie c : cookiesSorted) { encode(buf, c); } } } else { while (cookiesIt.hasNext()) { encode(buf, cookiesIt.next()); } } return stripTrailingSeparatorOrNull(buf); }
Encodes the specified cookies into a single Cookie header value. @param cookies some cookies @return a Rfc6265 style Cookie header value, null if no cookies are passed.
public OCSPReq build() throws OCSPException, IOException, CertificateEncodingException { SecureRandom generator = checkNotNull(this.generator, "generator"); DigestCalculator calculator = checkNotNull(this.calculator, "calculator"); X509Certificate certificate = checkNotNull(this.certificate, "certificate"); X509Certificate issuer = checkNotNull(this.issuer, "issuer"); BigInteger serial = certificate.getSerialNumber(); CertificateID certId = new CertificateID(calculator, new X509CertificateHolder(issuer.getEncoded()), serial); OCSPReqBuilder builder = new OCSPReqBuilder(); builder.addRequest(certId); byte[] nonce = new byte[8]; generator.nextBytes(nonce); Extension[] extensions = new Extension[] { new Extension(OCSPObjectIdentifiers.id_pkix_ocsp_nonce, false, new DEROctetString(nonce)) }; builder.setRequestExtensions(new Extensions(extensions)); return builder.build(); }
ATTENTION: The returned {@link OCSPReq} is not re-usable/cacheable! It contains a one-time nonce and CA's will (should) reject subsequent requests that have the same nonce value.
private boolean decodeBulkStringEndOfLine(ByteBuf in, List<Object> out) throws Exception { if (in.readableBytes() < RedisConstants.EOL_LENGTH) { return false; } readEndOfLine(in); out.add(FullBulkStringRedisMessage.EMPTY_INSTANCE); resetDecoder(); return true; }
$0\r\n <here> \r\n
private boolean decodeBulkStringContent(ByteBuf in, List<Object> out) throws Exception { final int readableBytes = in.readableBytes(); if (readableBytes == 0 || remainingBulkLength == 0 && readableBytes < RedisConstants.EOL_LENGTH) { return false; } // if this is last frame. if (readableBytes >= remainingBulkLength + RedisConstants.EOL_LENGTH) { ByteBuf content = in.readSlice(remainingBulkLength); readEndOfLine(in); // Only call retain after readEndOfLine(...) as the method may throw an exception. out.add(new DefaultLastBulkStringRedisContent(content.retain())); resetDecoder(); return true; } // chunked write. int toRead = Math.min(remainingBulkLength, readableBytes); remainingBulkLength -= toRead; out.add(new DefaultBulkStringRedisContent(in.readSlice(toRead).retain())); return true; }
${expectedBulkLength}\r\n <here> {data...}\r\n
public static RejectedExecutionHandler backoff(final int retries, long backoffAmount, TimeUnit unit) { ObjectUtil.checkPositive(retries, "retries"); final long backOffNanos = unit.toNanos(backoffAmount); return new RejectedExecutionHandler() { @Override public void rejected(Runnable task, SingleThreadEventExecutor executor) { if (!executor.inEventLoop()) { for (int i = 0; i < retries; i++) { // Try to wake up the executor so it will empty its task queue. executor.wakeup(false); LockSupport.parkNanos(backOffNanos); if (executor.offerTask(task)) { return; } } } // Either we tried to add the task from within the EventLoop or we was not able to add it even with // backoff. throw new RejectedExecutionException(); } }; }
Tries to backoff when the task can not be added due restrictions for an configured amount of time. This is only done if the task was added from outside of the event loop which means {@link EventExecutor#inEventLoop()} returns {@code false}.
private static void checkHttpDataSize(HttpData data) { try { data.checkSize(data.length()); } catch (IOException ignored) { throw new IllegalArgumentException("Attribute bigger than maxSize allowed"); } }
Utility method
@SuppressWarnings("unchecked") public Bootstrap resolver(AddressResolverGroup<?> resolver) { this.resolver = (AddressResolverGroup<SocketAddress>) (resolver == null ? DEFAULT_RESOLVER : resolver); return this; }
Sets the {@link NameResolver} which will resolve the address of the unresolved named address. @param resolver the {@link NameResolver} for this {@code Bootstrap}; may be {@code null}, in which case a default resolver will be used @see io.netty.resolver.DefaultAddressResolverGroup
public ChannelFuture connect(String inetHost, int inetPort) { return connect(InetSocketAddress.createUnresolved(inetHost, inetPort)); }
Connect a {@link Channel} to the remote peer.
public ChannelFuture connect(SocketAddress remoteAddress) { if (remoteAddress == null) { throw new NullPointerException("remoteAddress"); } validate(); return doResolveAndConnect(remoteAddress, config.localAddress()); }
Connect a {@link Channel} to the remote peer.
static FormattingTuple format(String messagePattern, Object arg) { return arrayFormat(messagePattern, new Object[]{arg}); }
Performs single argument substitution for the 'messagePattern' passed as parameter. <p/> For example, <p/> <pre> MessageFormatter.format(&quot;Hi {}.&quot;, &quot;there&quot;); </pre> <p/> will return the string "Hi there.". <p/> @param messagePattern The message pattern which will be parsed and formatted @param arg The argument to be substituted in place of the formatting anchor @return The formatted message
static FormattingTuple format(final String messagePattern, Object argA, Object argB) { return arrayFormat(messagePattern, new Object[]{argA, argB}); }
Performs a two argument substitution for the 'messagePattern' passed as parameter. <p/> For example, <p/> <pre> MessageFormatter.format(&quot;Hi {}. My name is {}.&quot;, &quot;Alice&quot;, &quot;Bob&quot;); </pre> <p/> will return the string "Hi Alice. My name is Bob.". @param messagePattern The message pattern which will be parsed and formatted @param argA The argument to be substituted in place of the first formatting anchor @param argB The argument to be substituted in place of the second formatting anchor @return The formatted message
static FormattingTuple arrayFormat(final String messagePattern, final Object[] argArray) { if (argArray == null || argArray.length == 0) { return new FormattingTuple(messagePattern, null); } int lastArrIdx = argArray.length - 1; Object lastEntry = argArray[lastArrIdx]; Throwable throwable = lastEntry instanceof Throwable? (Throwable) lastEntry : null; if (messagePattern == null) { return new FormattingTuple(null, throwable); } int j = messagePattern.indexOf(DELIM_STR); if (j == -1) { // this is a simple string return new FormattingTuple(messagePattern, throwable); } StringBuilder sbuf = new StringBuilder(messagePattern.length() + 50); int i = 0; int L = 0; do { boolean notEscaped = j == 0 || messagePattern.charAt(j - 1) != ESCAPE_CHAR; if (notEscaped) { // normal case sbuf.append(messagePattern, i, j); } else { sbuf.append(messagePattern, i, j - 1); // check that escape char is not is escaped: "abc x:\\{}" notEscaped = j >= 2 && messagePattern.charAt(j - 2) == ESCAPE_CHAR; } i = j + 2; if (notEscaped) { deeplyAppendParameter(sbuf, argArray[L], null); L++; if (L > lastArrIdx) { break; } } else { sbuf.append(DELIM_STR); } j = messagePattern.indexOf(DELIM_STR, i); } while (j != -1); // append the characters following the last {} pair. sbuf.append(messagePattern, i, messagePattern.length()); return new FormattingTuple(sbuf.toString(), L <= lastArrIdx? throwable : null); }
Same principle as the {@link #format(String, Object)} and {@link #format(String, Object, Object)} methods except that any number of arguments can be passed in an array. @param messagePattern The message pattern which will be parsed and formatted @param argArray An array of arguments to be substituted in place of formatting anchors @return The formatted message
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Set<Object[]> seenSet) { if (o == null) { sbuf.append("null"); return; } Class<?> objClass = o.getClass(); if (!objClass.isArray()) { if (Number.class.isAssignableFrom(objClass)) { // Prevent String instantiation for some number types if (objClass == Long.class) { sbuf.append(((Long) o).longValue()); } else if (objClass == Integer.class || objClass == Short.class || objClass == Byte.class) { sbuf.append(((Number) o).intValue()); } else if (objClass == Double.class) { sbuf.append(((Double) o).doubleValue()); } else if (objClass == Float.class) { sbuf.append(((Float) o).floatValue()); } else { safeObjectAppend(sbuf, o); } } else { safeObjectAppend(sbuf, o); } } else { // check for primitive array types because they // unfortunately cannot be cast to Object[] sbuf.append('['); if (objClass == boolean[].class) { booleanArrayAppend(sbuf, (boolean[]) o); } else if (objClass == byte[].class) { byteArrayAppend(sbuf, (byte[]) o); } else if (objClass == char[].class) { charArrayAppend(sbuf, (char[]) o); } else if (objClass == short[].class) { shortArrayAppend(sbuf, (short[]) o); } else if (objClass == int[].class) { intArrayAppend(sbuf, (int[]) o); } else if (objClass == long[].class) { longArrayAppend(sbuf, (long[]) o); } else if (objClass == float[].class) { floatArrayAppend(sbuf, (float[]) o); } else if (objClass == double[].class) { doubleArrayAppend(sbuf, (double[]) o); } else { objectArrayAppend(sbuf, (Object[]) o, seenSet); } sbuf.append(']'); } }
special treatment of array values was suggested by 'lizongbo'
public static Executor apply(final Executor executor, final EventExecutor eventExecutor) { ObjectUtil.checkNotNull(executor, "executor"); ObjectUtil.checkNotNull(eventExecutor, "eventExecutor"); return new Executor() { @Override public void execute(final Runnable command) { executor.execute(apply(command, eventExecutor)); } }; }
Decorate the given {@link Executor} and ensure {@link #currentExecutor()} will return {@code eventExecutor} when called from within the {@link Runnable} during execution.
public static Runnable apply(final Runnable command, final EventExecutor eventExecutor) { ObjectUtil.checkNotNull(command, "command"); ObjectUtil.checkNotNull(eventExecutor, "eventExecutor"); return new Runnable() { @Override public void run() { setCurrentEventExecutor(eventExecutor); try { command.run(); } finally { setCurrentEventExecutor(null); } } }; }
Decorate the given {@link Runnable} and ensure {@link #currentExecutor()} will return {@code eventExecutor} when called from within the {@link Runnable} during execution.
public static ThreadFactory apply(final ThreadFactory threadFactory, final EventExecutor eventExecutor) { ObjectUtil.checkNotNull(threadFactory, "command"); ObjectUtil.checkNotNull(eventExecutor, "eventExecutor"); return new ThreadFactory() { @Override public Thread newThread(Runnable r) { return threadFactory.newThread(apply(r, eventExecutor)); } }; }
Decorate the given {@link ThreadFactory} and ensure {@link #currentExecutor()} will return {@code eventExecutor} when called from within the {@link Runnable} during execution.
@Deprecated protected String validateValue(String name, String value) { return validateAttributeValue(name, value); }
Validate a cookie attribute value, throws a {@link IllegalArgumentException} otherwise. Only intended to be used by {@link io.netty.handler.codec.http.DefaultCookie}. @param name attribute name @param value attribute value @return the trimmed, validated attribute value @deprecated CookieUtil is package private, will be removed once old Cookie API is dropped
public static SmtpCommand valueOf(CharSequence commandName) { ObjectUtil.checkNotNull(commandName, "commandName"); SmtpCommand command = COMMANDS.get(commandName.toString()); return command != null ? command : new SmtpCommand(AsciiString.of(commandName)); }
Returns the {@link SmtpCommand} for the given command name.
public static int epollBusyWait(FileDescriptor epollFd, EpollEventArray events) throws IOException { int ready = epollBusyWait0(epollFd.intValue(), events.memoryAddress(), events.length()); if (ready < 0) { throw newIOException("epoll_wait", ready); } return ready; }
Non-blocking variant of {@link #epollWait(FileDescriptor, EpollEventArray, FileDescriptor, int, int)} that will also hint to processor we are in a busy-wait loop.
public static int splice(int fd, long offIn, int fdOut, long offOut, long len) throws IOException { int res = splice0(fd, offIn, fdOut, offOut, len); if (res >= 0) { return res; } return ioResult("splice", res, SPLICE_CONNECTION_RESET_EXCEPTION, SPLICE_CLOSED_CHANNEL_EXCEPTION); }
File-descriptor operations
void recycle() { for (int i = 0 ; i < size; i ++) { array[i] = null; } size = 0; insertSinceRecycled = false; recycler.recycle(this); }
Recycle the array which will clear it and null out all entries in the internal storage.
private static int contentLength(Object msg) { if (msg instanceof MemcacheContent) { return ((MemcacheContent) msg).content().readableBytes(); } if (msg instanceof ByteBuf) { return ((ByteBuf) msg).readableBytes(); } if (msg instanceof FileRegion) { return (int) ((FileRegion) msg).count(); } throw new IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg)); }
Determine the content length of the given object. @param msg the object to determine the length of. @return the determined content length.
@Deprecated public DomainNameMapping<V> add(String hostname, V output) { map.put(normalizeHostname(checkNotNull(hostname, "hostname")), checkNotNull(output, "output")); return this; }
Adds a mapping that maps the specified (optionally wildcard) host name to the specified output value. <p> <a href="http://en.wikipedia.org/wiki/Wildcard_DNS_record">DNS wildcard</a> is supported as hostname. For example, you can use {@code *.netty.io} to match {@code netty.io} and {@code downloads.netty.io}. </p> @param hostname the host name (optionally wildcard) @param output the output value that will be returned by {@link #map(String)} when the specified host name matches the specified input host name @deprecated use {@link DomainNameMappingBuilder} to create and fill the mapping instead
static boolean matches(String template, String hostName) { if (template.startsWith("*.")) { return template.regionMatches(2, hostName, 0, hostName.length()) || commonSuffixOfLength(hostName, template, template.length() - 1); } return template.equals(hostName); }
Simple function to match <a href="http://en.wikipedia.org/wiki/Wildcard_DNS_record">DNS wildcard</a>.
static String normalizeHostname(String hostname) { if (needsNormalization(hostname)) { hostname = IDN.toASCII(hostname, IDN.ALLOW_UNASSIGNED); } return hostname.toLowerCase(Locale.US); }
IDNA ASCII conversion and case normalization
private int writeBytes(ChannelOutboundBuffer in, ByteBuf buf) throws Exception { int readableBytes = buf.readableBytes(); if (readableBytes == 0) { in.remove(); return 0; } if (buf.hasMemoryAddress() || buf.nioBufferCount() == 1) { return doWriteBytes(in, buf); } else { ByteBuffer[] nioBuffers = buf.nioBuffers(); return writeBytesMultiple(in, nioBuffers, nioBuffers.length, readableBytes, config().getMaxBytesPerGatheringWrite()); } }
Write bytes form the given {@link ByteBuf} to the underlying {@link java.nio.channels.Channel}. @param in the collection which contains objects to write. @param buf the {@link ByteBuf} from which the bytes should be written @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul>
private int writeBytesMultiple(ChannelOutboundBuffer in, IovArray array) throws IOException { final long expectedWrittenBytes = array.size(); assert expectedWrittenBytes != 0; final int cnt = array.count(); assert cnt != 0; final long localWrittenBytes = socket.writevAddresses(array.memoryAddress(0), cnt); if (localWrittenBytes > 0) { adjustMaxBytesPerGatheringWrite(expectedWrittenBytes, localWrittenBytes, array.maxBytes()); in.removeBytes(localWrittenBytes); return 1; } return WRITE_STATUS_SNDBUF_FULL; }
Write multiple bytes via {@link IovArray}. @param in the collection which contains objects to write. @param array The array which contains the content to write. @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul> @throws IOException If an I/O exception occurs during write.
private int writeBytesMultiple( ChannelOutboundBuffer in, ByteBuffer[] nioBuffers, int nioBufferCnt, long expectedWrittenBytes, long maxBytesPerGatheringWrite) throws IOException { assert expectedWrittenBytes != 0; if (expectedWrittenBytes > maxBytesPerGatheringWrite) { expectedWrittenBytes = maxBytesPerGatheringWrite; } final long localWrittenBytes = socket.writev(nioBuffers, 0, nioBufferCnt, expectedWrittenBytes); if (localWrittenBytes > 0) { adjustMaxBytesPerGatheringWrite(expectedWrittenBytes, localWrittenBytes, maxBytesPerGatheringWrite); in.removeBytes(localWrittenBytes); return 1; } return WRITE_STATUS_SNDBUF_FULL; }
Write multiple bytes via {@link ByteBuffer} array. @param in the collection which contains objects to write. @param nioBuffers The buffers to write. @param nioBufferCnt The number of buffers to write. @param expectedWrittenBytes The number of bytes we expect to write. @param maxBytesPerGatheringWrite The maximum number of bytes we should attempt to write. @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul> @throws IOException If an I/O exception occurs during write.
private int writeFileRegion(ChannelOutboundBuffer in, FileRegion region) throws Exception { if (region.transferred() >= region.count()) { in.remove(); return 0; } if (byteChannel == null) { byteChannel = new KQueueSocketWritableByteChannel(); } final long flushedAmount = region.transferTo(byteChannel, region.transferred()); if (flushedAmount > 0) { in.progress(flushedAmount); if (region.transferred() >= region.count()) { in.remove(); } return 1; } return WRITE_STATUS_SNDBUF_FULL; }
Write a {@link FileRegion} @param in the collection which contains objects to write. @param region the {@link FileRegion} from which the bytes should be written @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul>
protected int doWriteSingle(ChannelOutboundBuffer in) throws Exception { // The outbound buffer contains only one message or it contains a file region. Object msg = in.current(); if (msg instanceof ByteBuf) { return writeBytes(in, (ByteBuf) msg); } else if (msg instanceof DefaultFileRegion) { return writeDefaultFileRegion(in, (DefaultFileRegion) msg); } else if (msg instanceof FileRegion) { return writeFileRegion(in, (FileRegion) msg); } else { // Should never reach here. throw new Error(); } }
Attempt to write a single object. @param in the collection which contains objects to write. @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul> @throws Exception If an I/O error occurs.
private int doWriteMultiple(ChannelOutboundBuffer in) throws Exception { final long maxBytesPerGatheringWrite = config().getMaxBytesPerGatheringWrite(); IovArray array = ((KQueueEventLoop) eventLoop()).cleanArray(); array.maxBytes(maxBytesPerGatheringWrite); in.forEachFlushedMessage(array); if (array.count() >= 1) { // TODO: Handle the case where cnt == 1 specially. return writeBytesMultiple(in, array); } // cnt == 0, which means the outbound buffer contained empty buffers only. in.removeBytes(0); return 0; }
Attempt to write multiple {@link ByteBuf} objects. @param in the collection which contains objects to write. @return The value that should be decremented from the write quantum which starts at {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows: <ul> <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content) is encountered</li> <li>1 - if a single call to write data was made to the OS</li> <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but no data was accepted</li> </ul> @throws Exception If an I/O error occurs.
public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { Http2HeadersSink sink = new Http2HeadersSink(streamId, headers, maxHeaderListSize, validateHeaders); decode(in, sink); // Now that we've read all of our headers we can perform the validation steps. We must // delay throwing until this point to prevent dynamic table corruption. sink.finish(); }
Decode the header block into header fields. <p> This method assumes the entire header block is contained in {@code in}.
public void setMaxHeaderTableSize(long maxHeaderTableSize) throws Http2Exception { if (maxHeaderTableSize < MIN_HEADER_TABLE_SIZE || maxHeaderTableSize > MAX_HEADER_TABLE_SIZE) { throw connectionError(PROTOCOL_ERROR, "Header Table Size must be >= %d and <= %d but was %d", MIN_HEADER_TABLE_SIZE, MAX_HEADER_TABLE_SIZE, maxHeaderTableSize); } maxDynamicTableSize = maxHeaderTableSize; if (maxDynamicTableSize < encoderMaxDynamicTableSize) { // decoder requires less space than encoder // encoder MUST signal this change maxDynamicTableSizeChangeRequired = true; hpackDynamicTable.setCapacity(maxDynamicTableSize); } }
Set the maximum table size. If this is below the maximum size of the dynamic table used by the encoder, the beginning of the next header block MUST signal this change.
private DnsServerAddressStream getNameServersFromCache(String hostname) { int len = hostname.length(); if (len == 0) { // We never cache for root servers. return null; } // We always store in the cache with a trailing '.'. if (hostname.charAt(len - 1) != '.') { hostname += "."; } int idx = hostname.indexOf('.'); if (idx == hostname.length() - 1) { // We are not interested in handling '.' as we should never serve the root servers from cache. return null; } // We start from the closed match and then move down. for (;;) { // Skip '.' as well. hostname = hostname.substring(idx + 1); int idx2 = hostname.indexOf('.'); if (idx2 <= 0 || idx2 == hostname.length() - 1) { // We are not interested in handling '.TLD.' as we should never serve the root servers from cache. return null; } idx = idx2; DnsServerAddressStream entries = authoritativeDnsServerCache().get(hostname); if (entries != null) { // The returned List may contain unresolved InetSocketAddress instances that will be // resolved on the fly in query(....). return entries; } } }
Returns the {@link DnsServerAddressStream} that was cached for the given hostname or {@code null} if non could be found.
private boolean handleRedirect( DnsQuestion question, AddressedEnvelope<DnsResponse, InetSocketAddress> envelope, final DnsQueryLifecycleObserver queryLifecycleObserver, Promise<List<T>> promise) { final DnsResponse res = envelope.content(); // Check if we have answers, if not this may be an non authority NS and so redirects must be handled. if (res.count(DnsSection.ANSWER) == 0) { AuthoritativeNameServerList serverNames = extractAuthoritativeNameServers(question.name(), res); if (serverNames != null) { int additionalCount = res.count(DnsSection.ADDITIONAL); AuthoritativeDnsServerCache authoritativeDnsServerCache = authoritativeDnsServerCache(); for (int i = 0; i < additionalCount; i++) { final DnsRecord r = res.recordAt(DnsSection.ADDITIONAL, i); if (r.type() == DnsRecordType.A && !parent.supportsARecords() || r.type() == DnsRecordType.AAAA && !parent.supportsAAAARecords()) { continue; } // We may have multiple ADDITIONAL entries for the same nameserver name. For example one AAAA and // one A record. serverNames.handleWithAdditional(parent, r, authoritativeDnsServerCache); } // Process all unresolved nameservers as well. serverNames.handleWithoutAdditionals(parent, resolveCache(), authoritativeDnsServerCache); List<InetSocketAddress> addresses = serverNames.addressList(); // Give the user the chance to sort or filter the used servers for the query. DnsServerAddressStream serverStream = parent.newRedirectDnsServerStream( question.name(), addresses); if (serverStream != null) { query(serverStream, 0, question, queryLifecycleObserver.queryRedirected(new DnsAddressStreamList(serverStream)), true, promise, null); return true; } } } return false; }
Handles a redirect answer if needed and returns {@code true} if a redirect query has been made.
private static AuthoritativeNameServerList extractAuthoritativeNameServers(String questionName, DnsResponse res) { int authorityCount = res.count(DnsSection.AUTHORITY); if (authorityCount == 0) { return null; } AuthoritativeNameServerList serverNames = new AuthoritativeNameServerList(questionName); for (int i = 0; i < authorityCount; i++) { serverNames.add(res.recordAt(DnsSection.AUTHORITY, i)); } return serverNames.isEmpty() ? null : serverNames; }
Returns the {@code {@link AuthoritativeNameServerList} which were included in {@link DnsSection#AUTHORITY} or {@code null} if non are found.
private static int maxOutputBufferLength(int inputLength) { double factor; if (inputLength < 200) { factor = 1.5; } else if (inputLength < 500) { factor = 1.2; } else if (inputLength < 1000) { factor = 1.1; } else if (inputLength < 10000) { factor = 1.05; } else { factor = 1.02; } return 13 + (int) (inputLength * factor); }
Calculates maximum possible size of output buffer for not compressible data.
public void register() throws Exception { ChannelFuture future = loop.register(this); assert future.isDone(); Throwable cause = future.cause(); if (cause != null) { PlatformDependent.throwException(cause); } }
Register this {@code Channel} on its {@link EventLoop}.
@SuppressWarnings("unchecked") public <T> T readInbound() { T message = (T) poll(inboundMessages); if (message != null) { ReferenceCountUtil.touch(message, "Caller of readInbound() will handle the message from this point"); } return message; }
Return received data from this {@link Channel}
@SuppressWarnings("unchecked") public <T> T readOutbound() { T message = (T) poll(outboundMessages); if (message != null) { ReferenceCountUtil.touch(message, "Caller of readOutbound() will handle the message from this point."); } return message; }
Read data from the outbound. This may return {@code null} if nothing is readable.
public boolean writeInbound(Object... msgs) { ensureOpen(); if (msgs.length == 0) { return isNotEmpty(inboundMessages); } ChannelPipeline p = pipeline(); for (Object m: msgs) { p.fireChannelRead(m); } flushInbound(false, voidPromise()); return isNotEmpty(inboundMessages); }
Write messages to the inbound of this {@link Channel}. @param msgs the messages to be written @return {@code true} if the write operation did add something to the inbound buffer